2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
32 #include <drm/drm_print.h>
36 #include "intel_cdclk.h"
37 #include "intel_combo_phy.h"
38 #include "intel_crt.h"
39 #include "intel_csr.h"
41 #include "intel_dpio_phy.h"
42 #include "intel_drv.h"
43 #include "intel_hotplug.h"
44 #include "intel_sideband.h"
49 * The i915 driver supports dynamic enabling and disabling of entire hardware
50 * blocks at runtime. This is especially important on the display side where
51 * software is supposed to control many power gates manually on recent hardware,
52 * since on the GT side a lot of the power management is done by the hardware.
53 * But even there some manual control at the device level is required.
55 * Since i915 supports a diverse set of platforms with a unified codebase and
56 * hardware engineers just love to shuffle functionality around between power
57 * domains there's a sizeable amount of indirection required. This file provides
58 * generic functions to the driver for grabbing and releasing references for
59 * abstract power domains. It then maps those to the actual power wells
60 * present for a given platform.
63 static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915);
65 __intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref,
68 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
70 intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref);
72 static inline void intel_runtime_pm_put_raw(struct drm_i915_private *i915,
75 __intel_runtime_pm_put(i915, -1, false);
79 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
81 #include <linux/sort.h>
85 static noinline depot_stack_handle_t __save_depot_stack(void)
87 unsigned long entries[STACKDEPTH];
90 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
91 return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
94 static void __print_depot_stack(depot_stack_handle_t stack,
95 char *buf, int sz, int indent)
97 unsigned long *entries;
98 unsigned int nr_entries;
100 nr_entries = stack_depot_fetch(stack, &entries);
101 stack_trace_snprint(buf, sz, entries, nr_entries, indent);
104 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
106 struct i915_runtime_pm *rpm = &i915->runtime_pm;
108 spin_lock_init(&rpm->debug.lock);
111 static noinline depot_stack_handle_t
112 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
114 struct i915_runtime_pm *rpm = &i915->runtime_pm;
115 depot_stack_handle_t stack, *stacks;
118 if (!HAS_RUNTIME_PM(i915))
121 stack = __save_depot_stack();
125 spin_lock_irqsave(&rpm->debug.lock, flags);
127 if (!rpm->debug.count)
128 rpm->debug.last_acquire = stack;
130 stacks = krealloc(rpm->debug.owners,
131 (rpm->debug.count + 1) * sizeof(*stacks),
132 GFP_NOWAIT | __GFP_NOWARN);
134 stacks[rpm->debug.count++] = stack;
135 rpm->debug.owners = stacks;
140 spin_unlock_irqrestore(&rpm->debug.lock, flags);
145 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
146 depot_stack_handle_t stack)
148 struct i915_runtime_pm *rpm = &i915->runtime_pm;
149 unsigned long flags, n;
152 if (unlikely(stack == -1))
155 spin_lock_irqsave(&rpm->debug.lock, flags);
156 for (n = rpm->debug.count; n--; ) {
157 if (rpm->debug.owners[n] == stack) {
158 memmove(rpm->debug.owners + n,
159 rpm->debug.owners + n + 1,
160 (--rpm->debug.count - n) * sizeof(stack));
165 spin_unlock_irqrestore(&rpm->debug.lock, flags);
168 "Unmatched wakeref (tracking %lu), count %u\n",
169 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
172 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
176 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
177 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
179 stack = READ_ONCE(rpm->debug.last_release);
181 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
182 DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
189 static int cmphandle(const void *_a, const void *_b)
191 const depot_stack_handle_t * const a = _a, * const b = _b;
202 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
203 const struct intel_runtime_pm_debug *dbg)
208 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
212 if (dbg->last_acquire) {
213 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
214 drm_printf(p, "Wakeref last acquired:\n%s", buf);
217 if (dbg->last_release) {
218 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
219 drm_printf(p, "Wakeref last released:\n%s", buf);
222 drm_printf(p, "Wakeref count: %lu\n", dbg->count);
224 sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
226 for (i = 0; i < dbg->count; i++) {
227 depot_stack_handle_t stack = dbg->owners[i];
231 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
233 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
234 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
241 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
242 struct intel_runtime_pm_debug *saved)
246 debug->owners = NULL;
248 debug->last_release = __save_depot_stack();
252 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
254 struct drm_printer p;
259 p = drm_debug_printer("i915");
260 __print_intel_runtime_pm_wakeref(&p, debug);
262 kfree(debug->owners);
266 __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
268 struct i915_runtime_pm *rpm = &i915->runtime_pm;
269 struct intel_runtime_pm_debug dbg = {};
272 if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
277 __untrack_all_wakerefs(&rpm->debug, &dbg);
278 spin_unlock_irqrestore(&rpm->debug.lock, flags);
280 dump_and_free_wakeref_tracking(&dbg);
284 untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
286 struct i915_runtime_pm *rpm = &i915->runtime_pm;
287 struct intel_runtime_pm_debug dbg = {};
290 spin_lock_irqsave(&rpm->debug.lock, flags);
291 __untrack_all_wakerefs(&rpm->debug, &dbg);
292 spin_unlock_irqrestore(&rpm->debug.lock, flags);
294 dump_and_free_wakeref_tracking(&dbg);
297 void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
298 struct drm_printer *p)
300 struct intel_runtime_pm_debug dbg = {};
303 struct i915_runtime_pm *rpm = &i915->runtime_pm;
304 unsigned long alloc = dbg.count;
305 depot_stack_handle_t *s;
307 spin_lock_irq(&rpm->debug.lock);
308 dbg.count = rpm->debug.count;
309 if (dbg.count <= alloc) {
312 dbg.count * sizeof(*s));
314 dbg.last_acquire = rpm->debug.last_acquire;
315 dbg.last_release = rpm->debug.last_release;
316 spin_unlock_irq(&rpm->debug.lock);
317 if (dbg.count <= alloc)
320 s = krealloc(dbg.owners,
321 dbg.count * sizeof(*s),
322 GFP_NOWAIT | __GFP_NOWARN);
329 __print_intel_runtime_pm_wakeref(p, &dbg);
337 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
341 static depot_stack_handle_t
342 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
347 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
348 intel_wakeref_t wref)
353 __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
355 atomic_dec(&i915->runtime_pm.wakeref_count);
359 untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
366 intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock)
368 struct i915_runtime_pm *rpm = &i915->runtime_pm;
371 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
372 assert_rpm_wakelock_held(i915);
374 atomic_inc(&rpm->wakeref_count);
375 assert_rpm_raw_wakeref_held(i915);
380 intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock)
382 struct i915_runtime_pm *rpm = &i915->runtime_pm;
385 assert_rpm_wakelock_held(i915);
386 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
388 assert_rpm_raw_wakeref_held(i915);
391 __intel_wakeref_dec_and_check_tracking(i915);
394 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
395 enum i915_power_well_id power_well_id);
398 intel_display_power_domain_str(enum intel_display_power_domain domain)
401 case POWER_DOMAIN_DISPLAY_CORE:
402 return "DISPLAY_CORE";
403 case POWER_DOMAIN_PIPE_A:
405 case POWER_DOMAIN_PIPE_B:
407 case POWER_DOMAIN_PIPE_C:
409 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
410 return "PIPE_A_PANEL_FITTER";
411 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
412 return "PIPE_B_PANEL_FITTER";
413 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
414 return "PIPE_C_PANEL_FITTER";
415 case POWER_DOMAIN_TRANSCODER_A:
416 return "TRANSCODER_A";
417 case POWER_DOMAIN_TRANSCODER_B:
418 return "TRANSCODER_B";
419 case POWER_DOMAIN_TRANSCODER_C:
420 return "TRANSCODER_C";
421 case POWER_DOMAIN_TRANSCODER_EDP:
422 return "TRANSCODER_EDP";
423 case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
424 return "TRANSCODER_EDP_VDSC";
425 case POWER_DOMAIN_TRANSCODER_DSI_A:
426 return "TRANSCODER_DSI_A";
427 case POWER_DOMAIN_TRANSCODER_DSI_C:
428 return "TRANSCODER_DSI_C";
429 case POWER_DOMAIN_PORT_DDI_A_LANES:
430 return "PORT_DDI_A_LANES";
431 case POWER_DOMAIN_PORT_DDI_B_LANES:
432 return "PORT_DDI_B_LANES";
433 case POWER_DOMAIN_PORT_DDI_C_LANES:
434 return "PORT_DDI_C_LANES";
435 case POWER_DOMAIN_PORT_DDI_D_LANES:
436 return "PORT_DDI_D_LANES";
437 case POWER_DOMAIN_PORT_DDI_E_LANES:
438 return "PORT_DDI_E_LANES";
439 case POWER_DOMAIN_PORT_DDI_F_LANES:
440 return "PORT_DDI_F_LANES";
441 case POWER_DOMAIN_PORT_DDI_A_IO:
442 return "PORT_DDI_A_IO";
443 case POWER_DOMAIN_PORT_DDI_B_IO:
444 return "PORT_DDI_B_IO";
445 case POWER_DOMAIN_PORT_DDI_C_IO:
446 return "PORT_DDI_C_IO";
447 case POWER_DOMAIN_PORT_DDI_D_IO:
448 return "PORT_DDI_D_IO";
449 case POWER_DOMAIN_PORT_DDI_E_IO:
450 return "PORT_DDI_E_IO";
451 case POWER_DOMAIN_PORT_DDI_F_IO:
452 return "PORT_DDI_F_IO";
453 case POWER_DOMAIN_PORT_DSI:
455 case POWER_DOMAIN_PORT_CRT:
457 case POWER_DOMAIN_PORT_OTHER:
459 case POWER_DOMAIN_VGA:
461 case POWER_DOMAIN_AUDIO:
463 case POWER_DOMAIN_AUX_A:
465 case POWER_DOMAIN_AUX_B:
467 case POWER_DOMAIN_AUX_C:
469 case POWER_DOMAIN_AUX_D:
471 case POWER_DOMAIN_AUX_E:
473 case POWER_DOMAIN_AUX_F:
475 case POWER_DOMAIN_AUX_IO_A:
477 case POWER_DOMAIN_AUX_TBT1:
479 case POWER_DOMAIN_AUX_TBT2:
481 case POWER_DOMAIN_AUX_TBT3:
483 case POWER_DOMAIN_AUX_TBT4:
485 case POWER_DOMAIN_GMBUS:
487 case POWER_DOMAIN_INIT:
489 case POWER_DOMAIN_MODESET:
491 case POWER_DOMAIN_GT_IRQ:
494 MISSING_CASE(domain);
499 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
500 struct i915_power_well *power_well)
502 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
503 power_well->desc->ops->enable(dev_priv, power_well);
504 power_well->hw_enabled = true;
507 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
508 struct i915_power_well *power_well)
510 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
511 power_well->hw_enabled = false;
512 power_well->desc->ops->disable(dev_priv, power_well);
515 static void intel_power_well_get(struct drm_i915_private *dev_priv,
516 struct i915_power_well *power_well)
518 if (!power_well->count++)
519 intel_power_well_enable(dev_priv, power_well);
522 static void intel_power_well_put(struct drm_i915_private *dev_priv,
523 struct i915_power_well *power_well)
525 WARN(!power_well->count, "Use count on power well %s is already zero",
526 power_well->desc->name);
528 if (!--power_well->count)
529 intel_power_well_disable(dev_priv, power_well);
533 * __intel_display_power_is_enabled - unlocked check for a power domain
534 * @dev_priv: i915 device instance
535 * @domain: power domain to check
537 * This is the unlocked version of intel_display_power_is_enabled() and should
538 * only be used from error capture and recovery code where deadlocks are
542 * True when the power domain is enabled, false otherwise.
544 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
545 enum intel_display_power_domain domain)
547 struct i915_power_well *power_well;
550 if (dev_priv->runtime_pm.suspended)
555 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
556 if (power_well->desc->always_on)
559 if (!power_well->hw_enabled) {
569 * intel_display_power_is_enabled - check for a power domain
570 * @dev_priv: i915 device instance
571 * @domain: power domain to check
573 * This function can be used to check the hw power domain state. It is mostly
574 * used in hardware state readout functions. Everywhere else code should rely
575 * upon explicit power domain reference counting to ensure that the hardware
576 * block is powered up before accessing it.
578 * Callers must hold the relevant modesetting locks to ensure that concurrent
579 * threads can't disable the power well while the caller tries to read a few
583 * True when the power domain is enabled, false otherwise.
585 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
586 enum intel_display_power_domain domain)
588 struct i915_power_domains *power_domains;
591 power_domains = &dev_priv->power_domains;
593 mutex_lock(&power_domains->lock);
594 ret = __intel_display_power_is_enabled(dev_priv, domain);
595 mutex_unlock(&power_domains->lock);
601 * Starting with Haswell, we have a "Power Down Well" that can be turned off
602 * when not needed anymore. We have 4 registers that can request the power well
603 * to be enabled, and it will only be disabled if none of the registers is
604 * requesting it to be enabled.
606 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
607 u8 irq_pipe_mask, bool has_vga)
609 struct pci_dev *pdev = dev_priv->drm.pdev;
612 * After we re-enable the power well, if we touch VGA register 0x3d5
613 * we'll get unclaimed register interrupts. This stops after we write
614 * anything to the VGA MSR register. The vgacon module uses this
615 * register all the time, so if we unbind our driver and, as a
616 * consequence, bind vgacon, we'll get stuck in an infinite loop at
617 * console_unlock(). So make here we touch the VGA MSR register, making
618 * sure vgacon can keep working normally without triggering interrupts
619 * and error messages.
622 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
623 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
624 vga_put(pdev, VGA_RSRC_LEGACY_IO);
628 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
631 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
635 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
639 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
640 struct i915_power_well *power_well)
642 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
643 int pw_idx = power_well->desc->hsw.idx;
645 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
646 WARN_ON(intel_wait_for_register(&dev_priv->uncore,
648 HSW_PWR_WELL_CTL_STATE(pw_idx),
649 HSW_PWR_WELL_CTL_STATE(pw_idx),
653 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
654 const struct i915_power_well_regs *regs,
657 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
660 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
661 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
663 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
664 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
669 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
670 struct i915_power_well *power_well)
672 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
673 int pw_idx = power_well->desc->hsw.idx;
678 * Bspec doesn't require waiting for PWs to get disabled, but still do
679 * this for paranoia. The known cases where a PW will be forced on:
680 * - a KVMR request on any power well via the KVMR request register
681 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
682 * DEBUG request registers
683 * Skip the wait in case any of the request bits are set and print a
684 * diagnostic message.
686 wait_for((disabled = !(I915_READ(regs->driver) &
687 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
688 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
692 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
693 power_well->desc->name,
694 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
697 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
698 enum skl_power_gate pg)
700 /* Timeout 5us for PG#0, for other PGs 1us */
701 WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
702 SKL_FUSE_PG_DIST_STATUS(pg),
703 SKL_FUSE_PG_DIST_STATUS(pg), 1));
706 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
707 struct i915_power_well *power_well)
709 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
710 int pw_idx = power_well->desc->hsw.idx;
711 bool wait_fuses = power_well->desc->hsw.has_fuses;
712 enum skl_power_gate uninitialized_var(pg);
716 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
717 SKL_PW_CTL_IDX_TO_PG(pw_idx);
719 * For PW1 we have to wait both for the PW0/PG0 fuse state
720 * before enabling the power well and PW1/PG1's own fuse
721 * state after the enabling. For all other power wells with
722 * fuses we only have to wait for that PW/PG's fuse state
723 * after the enabling.
726 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
729 val = I915_READ(regs->driver);
730 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
731 hsw_wait_for_power_well_enable(dev_priv, power_well);
733 /* Display WA #1178: cnl */
734 if (IS_CANNONLAKE(dev_priv) &&
735 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
736 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
737 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
738 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
739 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
743 gen9_wait_for_power_well_fuses(dev_priv, pg);
745 hsw_power_well_post_enable(dev_priv,
746 power_well->desc->hsw.irq_pipe_mask,
747 power_well->desc->hsw.has_vga);
750 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
751 struct i915_power_well *power_well)
753 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
754 int pw_idx = power_well->desc->hsw.idx;
757 hsw_power_well_pre_disable(dev_priv,
758 power_well->desc->hsw.irq_pipe_mask);
760 val = I915_READ(regs->driver);
761 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
762 hsw_wait_for_power_well_disable(dev_priv, power_well);
765 #define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
768 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
769 struct i915_power_well *power_well)
771 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
772 int pw_idx = power_well->desc->hsw.idx;
773 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
776 val = I915_READ(regs->driver);
777 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
779 val = I915_READ(ICL_PORT_CL_DW12(port));
780 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
782 hsw_wait_for_power_well_enable(dev_priv, power_well);
784 /* Display WA #1178: icl */
785 if (IS_ICELAKE(dev_priv) &&
786 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
787 !intel_bios_is_port_edp(dev_priv, port)) {
788 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
789 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
790 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
795 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
796 struct i915_power_well *power_well)
798 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
799 int pw_idx = power_well->desc->hsw.idx;
800 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
803 val = I915_READ(ICL_PORT_CL_DW12(port));
804 I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
806 val = I915_READ(regs->driver);
807 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
809 hsw_wait_for_power_well_disable(dev_priv, power_well);
812 #define ICL_AUX_PW_TO_CH(pw_idx) \
813 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
816 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
817 struct i915_power_well *power_well)
819 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
822 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
823 val &= ~DP_AUX_CH_CTL_TBT_IO;
824 if (power_well->desc->hsw.is_tc_tbt)
825 val |= DP_AUX_CH_CTL_TBT_IO;
826 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
828 hsw_power_well_enable(dev_priv, power_well);
832 * We should only use the power well if we explicitly asked the hardware to
833 * enable it, so check if it's enabled and also check if we've requested it to
836 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
837 struct i915_power_well *power_well)
839 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
840 enum i915_power_well_id id = power_well->desc->id;
841 int pw_idx = power_well->desc->hsw.idx;
842 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
843 HSW_PWR_WELL_CTL_STATE(pw_idx);
846 val = I915_READ(regs->driver);
849 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
850 * and the MISC_IO PW will be not restored, so check instead for the
851 * BIOS's own request bits, which are forced-on for these power wells
852 * when exiting DC5/6.
854 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
855 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
856 val |= I915_READ(regs->bios);
858 return (val & mask) == mask;
861 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
863 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
864 "DC9 already programmed to be enabled.\n");
865 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
866 "DC5 still not disabled to enable DC9.\n");
867 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
868 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
869 "Power well 2 on.\n");
870 WARN_ONCE(intel_irqs_enabled(dev_priv),
871 "Interrupts not disabled yet.\n");
874 * TODO: check for the following to verify the conditions to enter DC9
875 * state are satisfied:
876 * 1] Check relevant display engine registers to verify if mode set
877 * disable sequence was followed.
878 * 2] Check if display uninitialize sequence is initialized.
882 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
884 WARN_ONCE(intel_irqs_enabled(dev_priv),
885 "Interrupts not disabled yet.\n");
886 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
887 "DC5 still not disabled.\n");
890 * TODO: check for the following to verify DC9 state was indeed
891 * entered before programming to disable it:
892 * 1] Check relevant display engine registers to verify if mode
893 * set disable sequence was followed.
894 * 2] Check if display uninitialize sequence is initialized.
898 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
905 I915_WRITE(DC_STATE_EN, state);
907 /* It has been observed that disabling the dc6 state sometimes
908 * doesn't stick and dmc keeps returning old value. Make sure
909 * the write really sticks enough times and also force rewrite until
910 * we are confident that state is exactly what we want.
913 v = I915_READ(DC_STATE_EN);
916 I915_WRITE(DC_STATE_EN, state);
919 } else if (rereads++ > 5) {
923 } while (rewrites < 100);
926 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
929 /* Most of the times we need one retry, avoid spam */
931 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
935 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
939 mask = DC_STATE_EN_UPTO_DC5;
940 if (INTEL_GEN(dev_priv) >= 11)
941 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
942 else if (IS_GEN9_LP(dev_priv))
943 mask |= DC_STATE_EN_DC9;
945 mask |= DC_STATE_EN_UPTO_DC6;
950 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
954 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
956 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
957 dev_priv->csr.dc_state, val);
958 dev_priv->csr.dc_state = val;
962 * gen9_set_dc_state - set target display C power state
963 * @dev_priv: i915 device instance
964 * @state: target DC power state
966 * - DC_STATE_EN_UPTO_DC5
967 * - DC_STATE_EN_UPTO_DC6
970 * Signal to DMC firmware/HW the target DC power state passed in @state.
971 * DMC/HW can turn off individual display clocks and power rails when entering
972 * a deeper DC power state (higher in number) and turns these back when exiting
973 * that state to a shallower power state (lower in number). The HW will decide
974 * when to actually enter a given state on an on-demand basis, for instance
975 * depending on the active state of display pipes. The state of display
976 * registers backed by affected power rails are saved/restored as needed.
978 * Based on the above enabling a deeper DC power state is asynchronous wrt.
979 * enabling it. Disabling a deeper power state is synchronous: for instance
980 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
981 * back on and register state is restored. This is guaranteed by the MMIO write
982 * to DC_STATE_EN blocking until the state is restored.
984 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
989 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
990 state &= dev_priv->csr.allowed_dc_mask;
992 val = I915_READ(DC_STATE_EN);
993 mask = gen9_dc_mask(dev_priv);
994 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
997 /* Check if DMC is ignoring our DC state requests */
998 if ((val & mask) != dev_priv->csr.dc_state)
999 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
1000 dev_priv->csr.dc_state, val & mask);
1005 gen9_write_dc_state(dev_priv, val);
1007 dev_priv->csr.dc_state = val & mask;
1010 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
1012 assert_can_enable_dc9(dev_priv);
1014 DRM_DEBUG_KMS("Enabling DC9\n");
1016 * Power sequencer reset is not needed on
1017 * platforms with South Display Engine on PCH,
1018 * because PPS registers are always on.
1020 if (!HAS_PCH_SPLIT(dev_priv))
1021 intel_power_sequencer_reset(dev_priv);
1022 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
1025 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
1027 assert_can_disable_dc9(dev_priv);
1029 DRM_DEBUG_KMS("Disabling DC9\n");
1031 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1033 intel_pps_unlock_regs_wa(dev_priv);
1036 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
1038 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
1039 "CSR program storage start is NULL\n");
1040 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
1041 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
1044 static struct i915_power_well *
1045 lookup_power_well(struct drm_i915_private *dev_priv,
1046 enum i915_power_well_id power_well_id)
1048 struct i915_power_well *power_well;
1050 for_each_power_well(dev_priv, power_well)
1051 if (power_well->desc->id == power_well_id)
1055 * It's not feasible to add error checking code to the callers since
1056 * this condition really shouldn't happen and it doesn't even make sense
1057 * to abort things like display initialization sequences. Just return
1058 * the first power well and hope the WARN gets reported so we can fix
1061 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
1062 return &dev_priv->power_domains.power_wells[0];
1065 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1067 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
1070 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
1072 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
1073 "DC5 already programmed to be enabled.\n");
1074 assert_rpm_wakelock_held(dev_priv);
1076 assert_csr_loaded(dev_priv);
1079 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1081 assert_can_enable_dc5(dev_priv);
1083 DRM_DEBUG_KMS("Enabling DC5\n");
1085 /* Wa Display #1183: skl,kbl,cfl */
1086 if (IS_GEN9_BC(dev_priv))
1087 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1088 SKL_SELECT_ALTERNATE_DC_EXIT);
1090 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1093 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1095 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1096 "Backlight is not disabled.\n");
1097 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1098 "DC6 already programmed to be enabled.\n");
1100 assert_csr_loaded(dev_priv);
1103 void skl_enable_dc6(struct drm_i915_private *dev_priv)
1105 assert_can_enable_dc6(dev_priv);
1107 DRM_DEBUG_KMS("Enabling DC6\n");
1109 /* Wa Display #1183: skl,kbl,cfl */
1110 if (IS_GEN9_BC(dev_priv))
1111 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1112 SKL_SELECT_ALTERNATE_DC_EXIT);
1114 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1117 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1118 struct i915_power_well *power_well)
1120 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1121 int pw_idx = power_well->desc->hsw.idx;
1122 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1123 u32 bios_req = I915_READ(regs->bios);
1125 /* Take over the request bit if set by BIOS. */
1126 if (bios_req & mask) {
1127 u32 drv_req = I915_READ(regs->driver);
1129 if (!(drv_req & mask))
1130 I915_WRITE(regs->driver, drv_req | mask);
1131 I915_WRITE(regs->bios, bios_req & ~mask);
1135 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1136 struct i915_power_well *power_well)
1138 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1141 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1142 struct i915_power_well *power_well)
1144 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1147 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1148 struct i915_power_well *power_well)
1150 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1153 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1155 struct i915_power_well *power_well;
1157 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1158 if (power_well->count > 0)
1159 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1161 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1162 if (power_well->count > 0)
1163 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1165 if (IS_GEMINILAKE(dev_priv)) {
1166 power_well = lookup_power_well(dev_priv,
1167 GLK_DISP_PW_DPIO_CMN_C);
1168 if (power_well->count > 0)
1169 bxt_ddi_phy_verify_state(dev_priv,
1170 power_well->desc->bxt.phy);
1174 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1175 struct i915_power_well *power_well)
1177 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1180 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1182 u32 tmp = I915_READ(DBUF_CTL);
1184 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1185 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1186 "Unexpected DBuf power power state (0x%08x)\n", tmp);
1189 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1190 struct i915_power_well *power_well)
1192 struct intel_cdclk_state cdclk_state = {};
1194 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1196 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1197 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1198 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1200 gen9_assert_dbuf_enabled(dev_priv);
1202 if (IS_GEN9_LP(dev_priv))
1203 bxt_verify_ddi_phy_power_wells(dev_priv);
1205 if (INTEL_GEN(dev_priv) >= 11)
1207 * DMC retains HW context only for port A, the other combo
1208 * PHY's HW context for port B is lost after DC transitions,
1209 * so we need to restore it manually.
1211 intel_combo_phy_init(dev_priv);
1214 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1215 struct i915_power_well *power_well)
1217 if (!dev_priv->csr.dmc_payload)
1220 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1221 skl_enable_dc6(dev_priv);
1222 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1223 gen9_enable_dc5(dev_priv);
1226 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1227 struct i915_power_well *power_well)
1231 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1232 struct i915_power_well *power_well)
1236 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1237 struct i915_power_well *power_well)
1242 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1243 struct i915_power_well *power_well)
1245 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1246 i830_enable_pipe(dev_priv, PIPE_A);
1247 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1248 i830_enable_pipe(dev_priv, PIPE_B);
1251 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1252 struct i915_power_well *power_well)
1254 i830_disable_pipe(dev_priv, PIPE_B);
1255 i830_disable_pipe(dev_priv, PIPE_A);
1258 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1259 struct i915_power_well *power_well)
1261 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1262 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1265 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1266 struct i915_power_well *power_well)
1268 if (power_well->count > 0)
1269 i830_pipes_power_well_enable(dev_priv, power_well);
1271 i830_pipes_power_well_disable(dev_priv, power_well);
1274 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1275 struct i915_power_well *power_well, bool enable)
1277 int pw_idx = power_well->desc->vlv.idx;
1282 mask = PUNIT_PWRGT_MASK(pw_idx);
1283 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1284 PUNIT_PWRGT_PWR_GATE(pw_idx);
1286 vlv_punit_get(dev_priv);
1289 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1294 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1297 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1299 if (wait_for(COND, 100))
1300 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1302 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1307 vlv_punit_put(dev_priv);
1310 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1311 struct i915_power_well *power_well)
1313 vlv_set_power_well(dev_priv, power_well, true);
1316 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1317 struct i915_power_well *power_well)
1319 vlv_set_power_well(dev_priv, power_well, false);
1322 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1323 struct i915_power_well *power_well)
1325 int pw_idx = power_well->desc->vlv.idx;
1326 bool enabled = false;
1331 mask = PUNIT_PWRGT_MASK(pw_idx);
1332 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1334 vlv_punit_get(dev_priv);
1336 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1338 * We only ever set the power-on and power-gate states, anything
1339 * else is unexpected.
1341 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1342 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1347 * A transient state at this point would mean some unexpected party
1348 * is poking at the power controls too.
1350 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1351 WARN_ON(ctrl != state);
1353 vlv_punit_put(dev_priv);
1358 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1363 * On driver load, a pipe may be active and driving a DSI display.
1364 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1365 * (and never recovering) in this case. intel_dsi_post_disable() will
1366 * clear it when we turn off the display.
1368 val = I915_READ(DSPCLK_GATE_D);
1369 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1370 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1371 I915_WRITE(DSPCLK_GATE_D, val);
1374 * Disable trickle feed and enable pnd deadline calculation
1376 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1377 I915_WRITE(CBR1_VLV, 0);
1379 WARN_ON(dev_priv->rawclk_freq == 0);
1381 I915_WRITE(RAWCLK_FREQ_VLV,
1382 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1385 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1387 struct intel_encoder *encoder;
1391 * Enable the CRI clock source so we can get at the
1392 * display and the reference clock for VGA
1393 * hotplug / manual detection. Supposedly DSI also
1394 * needs the ref clock up and running.
1396 * CHV DPLL B/C have some issues if VGA mode is enabled.
1398 for_each_pipe(dev_priv, pipe) {
1399 u32 val = I915_READ(DPLL(pipe));
1401 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1403 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1405 I915_WRITE(DPLL(pipe), val);
1408 vlv_init_display_clock_gating(dev_priv);
1410 spin_lock_irq(&dev_priv->irq_lock);
1411 valleyview_enable_display_irqs(dev_priv);
1412 spin_unlock_irq(&dev_priv->irq_lock);
1415 * During driver initialization/resume we can avoid restoring the
1416 * part of the HW/SW state that will be inited anyway explicitly.
1418 if (dev_priv->power_domains.initializing)
1421 intel_hpd_init(dev_priv);
1423 /* Re-enable the ADPA, if we have one */
1424 for_each_intel_encoder(&dev_priv->drm, encoder) {
1425 if (encoder->type == INTEL_OUTPUT_ANALOG)
1426 intel_crt_reset(&encoder->base);
1429 i915_redisable_vga_power_on(dev_priv);
1431 intel_pps_unlock_regs_wa(dev_priv);
1434 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1436 spin_lock_irq(&dev_priv->irq_lock);
1437 valleyview_disable_display_irqs(dev_priv);
1438 spin_unlock_irq(&dev_priv->irq_lock);
1440 /* make sure we're done processing display irqs */
1441 synchronize_irq(dev_priv->drm.irq);
1443 intel_power_sequencer_reset(dev_priv);
1445 /* Prevent us from re-enabling polling on accident in late suspend */
1446 if (!dev_priv->drm.dev->power.is_suspended)
1447 intel_hpd_poll_init(dev_priv);
1450 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1451 struct i915_power_well *power_well)
1453 vlv_set_power_well(dev_priv, power_well, true);
1455 vlv_display_power_well_init(dev_priv);
1458 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1459 struct i915_power_well *power_well)
1461 vlv_display_power_well_deinit(dev_priv);
1463 vlv_set_power_well(dev_priv, power_well, false);
1466 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1467 struct i915_power_well *power_well)
1469 /* since ref/cri clock was enabled */
1470 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1472 vlv_set_power_well(dev_priv, power_well, true);
1475 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1476 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1477 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1478 * b. The other bits such as sfr settings / modesel may all
1481 * This should only be done on init and resume from S3 with
1482 * both PLLs disabled, or we risk losing DPIO and PLL
1485 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1488 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1489 struct i915_power_well *power_well)
1493 for_each_pipe(dev_priv, pipe)
1494 assert_pll_disabled(dev_priv, pipe);
1496 /* Assert common reset */
1497 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1499 vlv_set_power_well(dev_priv, power_well, false);
1502 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1504 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1506 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1508 struct i915_power_well *cmn_bc =
1509 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1510 struct i915_power_well *cmn_d =
1511 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1512 u32 phy_control = dev_priv->chv_phy_control;
1514 u32 phy_status_mask = 0xffffffff;
1517 * The BIOS can leave the PHY is some weird state
1518 * where it doesn't fully power down some parts.
1519 * Disable the asserts until the PHY has been fully
1520 * reset (ie. the power well has been disabled at
1523 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1524 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1525 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1526 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1527 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1528 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1529 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1531 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1532 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1533 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1534 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1536 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1537 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1539 /* this assumes override is only used to enable lanes */
1540 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1541 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1543 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1544 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1546 /* CL1 is on whenever anything is on in either channel */
1547 if (BITS_SET(phy_control,
1548 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1549 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1550 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1553 * The DPLLB check accounts for the pipe B + port A usage
1554 * with CL2 powered up but all the lanes in the second channel
1557 if (BITS_SET(phy_control,
1558 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1559 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1560 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1562 if (BITS_SET(phy_control,
1563 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1564 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1565 if (BITS_SET(phy_control,
1566 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1567 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1569 if (BITS_SET(phy_control,
1570 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1571 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1572 if (BITS_SET(phy_control,
1573 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1574 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1577 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1578 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1580 /* this assumes override is only used to enable lanes */
1581 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1582 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1584 if (BITS_SET(phy_control,
1585 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1586 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1588 if (BITS_SET(phy_control,
1589 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1590 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1591 if (BITS_SET(phy_control,
1592 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1593 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1596 phy_status &= phy_status_mask;
1599 * The PHY may be busy with some initial calibration and whatnot,
1600 * so the power state can take a while to actually change.
1602 if (intel_wait_for_register(&dev_priv->uncore,
1607 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1608 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1609 phy_status, dev_priv->chv_phy_control);
1614 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1615 struct i915_power_well *power_well)
1621 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1622 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1624 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1632 /* since ref/cri clock was enabled */
1633 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1634 vlv_set_power_well(dev_priv, power_well, true);
1636 /* Poll for phypwrgood signal */
1637 if (intel_wait_for_register(&dev_priv->uncore,
1642 DRM_ERROR("Display PHY %d is not power up\n", phy);
1644 vlv_dpio_get(dev_priv);
1646 /* Enable dynamic power down */
1647 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1648 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1649 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1650 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1652 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1653 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1654 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1655 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1658 * Force the non-existing CL2 off. BXT does this
1659 * too, so maybe it saves some power even though
1660 * CL2 doesn't exist?
1662 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1663 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1664 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1667 vlv_dpio_put(dev_priv);
1669 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1670 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1672 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1673 phy, dev_priv->chv_phy_control);
1675 assert_chv_phy_status(dev_priv);
1678 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1679 struct i915_power_well *power_well)
1683 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1684 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1686 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1688 assert_pll_disabled(dev_priv, PIPE_A);
1689 assert_pll_disabled(dev_priv, PIPE_B);
1692 assert_pll_disabled(dev_priv, PIPE_C);
1695 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1696 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1698 vlv_set_power_well(dev_priv, power_well, false);
1700 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1701 phy, dev_priv->chv_phy_control);
1703 /* PHY is fully reset now, so we can enable the PHY state asserts */
1704 dev_priv->chv_phy_assert[phy] = true;
1706 assert_chv_phy_status(dev_priv);
1709 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1710 enum dpio_channel ch, bool override, unsigned int mask)
1712 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1713 u32 reg, val, expected, actual;
1716 * The BIOS can leave the PHY is some weird state
1717 * where it doesn't fully power down some parts.
1718 * Disable the asserts until the PHY has been fully
1719 * reset (ie. the power well has been disabled at
1722 if (!dev_priv->chv_phy_assert[phy])
1726 reg = _CHV_CMN_DW0_CH0;
1728 reg = _CHV_CMN_DW6_CH1;
1730 vlv_dpio_get(dev_priv);
1731 val = vlv_dpio_read(dev_priv, pipe, reg);
1732 vlv_dpio_put(dev_priv);
1735 * This assumes !override is only used when the port is disabled.
1736 * All lanes should power down even without the override when
1737 * the port is disabled.
1739 if (!override || mask == 0xf) {
1740 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1742 * If CH1 common lane is not active anymore
1743 * (eg. for pipe B DPLL) the entire channel will
1744 * shut down, which causes the common lane registers
1745 * to read as 0. That means we can't actually check
1746 * the lane power down status bits, but as the entire
1747 * register reads as 0 it's a good indication that the
1748 * channel is indeed entirely powered down.
1750 if (ch == DPIO_CH1 && val == 0)
1752 } else if (mask != 0x0) {
1753 expected = DPIO_ANYDL_POWERDOWN;
1759 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1761 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1762 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1764 WARN(actual != expected,
1765 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1766 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1767 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1771 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1772 enum dpio_channel ch, bool override)
1774 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1777 mutex_lock(&power_domains->lock);
1779 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1781 if (override == was_override)
1785 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1787 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1789 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1791 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1792 phy, ch, dev_priv->chv_phy_control);
1794 assert_chv_phy_status(dev_priv);
1797 mutex_unlock(&power_domains->lock);
1799 return was_override;
1802 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1803 bool override, unsigned int mask)
1805 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1806 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1807 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1808 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1810 mutex_lock(&power_domains->lock);
1812 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1813 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1816 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1818 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1820 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1822 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1823 phy, ch, mask, dev_priv->chv_phy_control);
1825 assert_chv_phy_status(dev_priv);
1827 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1829 mutex_unlock(&power_domains->lock);
1832 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1833 struct i915_power_well *power_well)
1835 enum pipe pipe = PIPE_A;
1839 vlv_punit_get(dev_priv);
1841 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1843 * We only ever set the power-on and power-gate states, anything
1844 * else is unexpected.
1846 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1847 enabled = state == DP_SSS_PWR_ON(pipe);
1850 * A transient state at this point would mean some unexpected party
1851 * is poking at the power controls too.
1853 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1854 WARN_ON(ctrl << 16 != state);
1856 vlv_punit_put(dev_priv);
1861 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1862 struct i915_power_well *power_well,
1865 enum pipe pipe = PIPE_A;
1869 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1871 vlv_punit_get(dev_priv);
1874 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1879 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1880 ctrl &= ~DP_SSC_MASK(pipe);
1881 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1882 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1884 if (wait_for(COND, 100))
1885 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1887 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1892 vlv_punit_put(dev_priv);
1895 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1896 struct i915_power_well *power_well)
1898 chv_set_pipe_power_well(dev_priv, power_well, true);
1900 vlv_display_power_well_init(dev_priv);
1903 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1904 struct i915_power_well *power_well)
1906 vlv_display_power_well_deinit(dev_priv);
1908 chv_set_pipe_power_well(dev_priv, power_well, false);
1911 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1913 return power_domains->async_put_domains[0] |
1914 power_domains->async_put_domains[1];
1917 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1920 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1922 return !WARN_ON(power_domains->async_put_domains[0] &
1923 power_domains->async_put_domains[1]);
1927 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1929 enum intel_display_power_domain domain;
1932 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1933 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1934 !!__async_put_domains_mask(power_domains));
1936 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1937 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1942 static void print_power_domains(struct i915_power_domains *power_domains,
1943 const char *prefix, u64 mask)
1945 enum intel_display_power_domain domain;
1947 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1948 for_each_power_domain(domain, mask)
1949 DRM_DEBUG_DRIVER("%s use_count %d\n",
1950 intel_display_power_domain_str(domain),
1951 power_domains->domain_use_count[domain]);
1955 print_async_put_domains_state(struct i915_power_domains *power_domains)
1957 DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1958 power_domains->async_put_wakeref);
1960 print_power_domains(power_domains, "async_put_domains[0]",
1961 power_domains->async_put_domains[0]);
1962 print_power_domains(power_domains, "async_put_domains[1]",
1963 power_domains->async_put_domains[1]);
1967 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1969 if (!__async_put_domains_state_ok(power_domains))
1970 print_async_put_domains_state(power_domains);
1976 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1981 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1985 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1987 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1989 assert_async_put_domain_masks_disjoint(power_domains);
1991 return __async_put_domains_mask(power_domains);
1995 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1996 enum intel_display_power_domain domain)
1998 assert_async_put_domain_masks_disjoint(power_domains);
2000 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2001 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2005 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2006 enum intel_display_power_domain domain)
2008 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2011 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2014 async_put_domains_clear_domain(power_domains, domain);
2018 if (async_put_domains_mask(power_domains))
2021 cancel_delayed_work(&power_domains->async_put_work);
2022 intel_runtime_pm_put_raw(dev_priv,
2023 fetch_and_zero(&power_domains->async_put_wakeref));
2025 verify_async_put_domains_state(power_domains);
2031 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2032 enum intel_display_power_domain domain)
2034 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2035 struct i915_power_well *power_well;
2037 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2040 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2041 intel_power_well_get(dev_priv, power_well);
2043 power_domains->domain_use_count[domain]++;
2047 * intel_display_power_get - grab a power domain reference
2048 * @dev_priv: i915 device instance
2049 * @domain: power domain to reference
2051 * This function grabs a power domain reference for @domain and ensures that the
2052 * power domain and all its parents are powered up. Therefore users should only
2053 * grab a reference to the innermost power domain they need.
2055 * Any power domain reference obtained by this function must have a symmetric
2056 * call to intel_display_power_put() to release the reference again.
2058 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2059 enum intel_display_power_domain domain)
2061 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2062 intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
2064 mutex_lock(&power_domains->lock);
2065 __intel_display_power_get_domain(dev_priv, domain);
2066 mutex_unlock(&power_domains->lock);
2072 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2073 * @dev_priv: i915 device instance
2074 * @domain: power domain to reference
2076 * This function grabs a power domain reference for @domain and ensures that the
2077 * power domain and all its parents are powered up. Therefore users should only
2078 * grab a reference to the innermost power domain they need.
2080 * Any power domain reference obtained by this function must have a symmetric
2081 * call to intel_display_power_put() to release the reference again.
2084 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2085 enum intel_display_power_domain domain)
2087 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2088 intel_wakeref_t wakeref;
2091 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
2095 mutex_lock(&power_domains->lock);
2097 if (__intel_display_power_is_enabled(dev_priv, domain)) {
2098 __intel_display_power_get_domain(dev_priv, domain);
2104 mutex_unlock(&power_domains->lock);
2107 intel_runtime_pm_put(dev_priv, wakeref);
2115 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2116 enum intel_display_power_domain domain)
2118 struct i915_power_domains *power_domains;
2119 struct i915_power_well *power_well;
2120 const char *name = intel_display_power_domain_str(domain);
2122 power_domains = &dev_priv->power_domains;
2124 WARN(!power_domains->domain_use_count[domain],
2125 "Use count on domain %s is already zero\n",
2127 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
2128 "Async disabling of domain %s is pending\n",
2131 power_domains->domain_use_count[domain]--;
2133 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2134 intel_power_well_put(dev_priv, power_well);
2137 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2138 enum intel_display_power_domain domain)
2140 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2142 mutex_lock(&power_domains->lock);
2143 __intel_display_power_put_domain(dev_priv, domain);
2144 mutex_unlock(&power_domains->lock);
2148 * intel_display_power_put_unchecked - release an unchecked power domain reference
2149 * @dev_priv: i915 device instance
2150 * @domain: power domain to reference
2152 * This function drops the power domain reference obtained by
2153 * intel_display_power_get() and might power down the corresponding hardware
2154 * block right away if this is the last reference.
2156 * This function exists only for historical reasons and should be avoided in
2157 * new code, as the correctness of its use cannot be checked. Always use
2158 * intel_display_power_put() instead.
2160 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2161 enum intel_display_power_domain domain)
2163 __intel_display_power_put(dev_priv, domain);
2164 intel_runtime_pm_put_unchecked(dev_priv);
2168 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2169 intel_wakeref_t wakeref)
2171 WARN_ON(power_domains->async_put_wakeref);
2172 power_domains->async_put_wakeref = wakeref;
2173 WARN_ON(!queue_delayed_work(system_unbound_wq,
2174 &power_domains->async_put_work,
2175 msecs_to_jiffies(100)));
2179 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2181 struct drm_i915_private *dev_priv =
2182 container_of(power_domains, struct drm_i915_private,
2184 enum intel_display_power_domain domain;
2185 intel_wakeref_t wakeref;
2188 * The caller must hold already raw wakeref, upgrade that to a proper
2189 * wakeref to make the state checker happy about the HW access during
2190 * power well disabling.
2192 assert_rpm_raw_wakeref_held(dev_priv);
2193 wakeref = intel_runtime_pm_get(dev_priv);
2195 for_each_power_domain(domain, mask) {
2196 /* Clear before put, so put's sanity check is happy. */
2197 async_put_domains_clear_domain(power_domains, domain);
2198 __intel_display_power_put_domain(dev_priv, domain);
2201 intel_runtime_pm_put(dev_priv, wakeref);
2205 intel_display_power_put_async_work(struct work_struct *work)
2207 struct drm_i915_private *dev_priv =
2208 container_of(work, struct drm_i915_private,
2209 power_domains.async_put_work.work);
2210 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2211 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
2212 intel_wakeref_t old_work_wakeref = 0;
2214 mutex_lock(&power_domains->lock);
2217 * Bail out if all the domain refs pending to be released were grabbed
2218 * by subsequent gets or a flush_work.
2220 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2221 if (!old_work_wakeref)
2224 release_async_put_domains(power_domains,
2225 power_domains->async_put_domains[0]);
2227 /* Requeue the work if more domains were async put meanwhile. */
2228 if (power_domains->async_put_domains[1]) {
2229 power_domains->async_put_domains[0] =
2230 fetch_and_zero(&power_domains->async_put_domains[1]);
2231 queue_async_put_domains_work(power_domains,
2232 fetch_and_zero(&new_work_wakeref));
2236 verify_async_put_domains_state(power_domains);
2238 mutex_unlock(&power_domains->lock);
2240 if (old_work_wakeref)
2241 intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
2242 if (new_work_wakeref)
2243 intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
2247 * intel_display_power_put_async - release a power domain reference asynchronously
2248 * @i915: i915 device instance
2249 * @domain: power domain to reference
2250 * @wakeref: wakeref acquired for the reference that is being released
2252 * This function drops the power domain reference obtained by
2253 * intel_display_power_get*() and schedules a work to power down the
2254 * corresponding hardware block if this is the last reference.
2256 void __intel_display_power_put_async(struct drm_i915_private *i915,
2257 enum intel_display_power_domain domain,
2258 intel_wakeref_t wakeref)
2260 struct i915_power_domains *power_domains = &i915->power_domains;
2261 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915);
2263 mutex_lock(&power_domains->lock);
2265 if (power_domains->domain_use_count[domain] > 1) {
2266 __intel_display_power_put_domain(i915, domain);
2271 WARN_ON(power_domains->domain_use_count[domain] != 1);
2273 /* Let a pending work requeue itself or queue a new one. */
2274 if (power_domains->async_put_wakeref) {
2275 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2277 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2278 queue_async_put_domains_work(power_domains,
2279 fetch_and_zero(&work_wakeref));
2283 verify_async_put_domains_state(power_domains);
2285 mutex_unlock(&power_domains->lock);
2288 intel_runtime_pm_put_raw(i915, work_wakeref);
2290 intel_runtime_pm_put(i915, wakeref);
2294 * intel_display_power_flush_work - flushes the async display power disabling work
2295 * @i915: i915 device instance
2297 * Flushes any pending work that was scheduled by a preceding
2298 * intel_display_power_put_async() call, completing the disabling of the
2299 * corresponding power domains.
2301 * Note that the work handler function may still be running after this
2302 * function returns; to ensure that the work handler isn't running use
2303 * intel_display_power_flush_work_sync() instead.
2305 void intel_display_power_flush_work(struct drm_i915_private *i915)
2307 struct i915_power_domains *power_domains = &i915->power_domains;
2308 intel_wakeref_t work_wakeref;
2310 mutex_lock(&power_domains->lock);
2312 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2316 release_async_put_domains(power_domains,
2317 async_put_domains_mask(power_domains));
2318 cancel_delayed_work(&power_domains->async_put_work);
2321 verify_async_put_domains_state(power_domains);
2323 mutex_unlock(&power_domains->lock);
2326 intel_runtime_pm_put_raw(i915, work_wakeref);
2330 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2331 * @i915: i915 device instance
2333 * Like intel_display_power_flush_work(), but also ensure that the work
2334 * handler function is not running any more when this function returns.
2337 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2339 struct i915_power_domains *power_domains = &i915->power_domains;
2341 intel_display_power_flush_work(i915);
2342 cancel_delayed_work_sync(&power_domains->async_put_work);
2344 verify_async_put_domains_state(power_domains);
2346 WARN_ON(power_domains->async_put_wakeref);
2349 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2351 * intel_display_power_put - release a power domain reference
2352 * @dev_priv: i915 device instance
2353 * @domain: power domain to reference
2354 * @wakeref: wakeref acquired for the reference that is being released
2356 * This function drops the power domain reference obtained by
2357 * intel_display_power_get() and might power down the corresponding hardware
2358 * block right away if this is the last reference.
2360 void intel_display_power_put(struct drm_i915_private *dev_priv,
2361 enum intel_display_power_domain domain,
2362 intel_wakeref_t wakeref)
2364 __intel_display_power_put(dev_priv, domain);
2365 intel_runtime_pm_put(dev_priv, wakeref);
2369 #define I830_PIPES_POWER_DOMAINS ( \
2370 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2371 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2372 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2373 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2374 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2375 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2376 BIT_ULL(POWER_DOMAIN_INIT))
2378 #define VLV_DISPLAY_POWER_DOMAINS ( \
2379 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2380 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2381 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2382 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2383 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2384 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2385 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2386 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2387 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2388 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2389 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2390 BIT_ULL(POWER_DOMAIN_VGA) | \
2391 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2392 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2393 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2394 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2395 BIT_ULL(POWER_DOMAIN_INIT))
2397 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2398 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2399 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2400 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2401 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2402 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2403 BIT_ULL(POWER_DOMAIN_INIT))
2405 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2406 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2407 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2408 BIT_ULL(POWER_DOMAIN_INIT))
2410 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2411 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2412 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2413 BIT_ULL(POWER_DOMAIN_INIT))
2415 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2416 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2417 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2418 BIT_ULL(POWER_DOMAIN_INIT))
2420 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2421 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2422 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2423 BIT_ULL(POWER_DOMAIN_INIT))
2425 #define CHV_DISPLAY_POWER_DOMAINS ( \
2426 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2427 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2428 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2429 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2430 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2431 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2432 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2434 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2435 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2436 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2437 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2438 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2439 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2440 BIT_ULL(POWER_DOMAIN_VGA) | \
2441 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2442 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2443 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2444 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2445 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2446 BIT_ULL(POWER_DOMAIN_INIT))
2448 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2449 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2450 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2451 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2452 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2453 BIT_ULL(POWER_DOMAIN_INIT))
2455 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2456 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2457 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2458 BIT_ULL(POWER_DOMAIN_INIT))
2460 #define HSW_DISPLAY_POWER_DOMAINS ( \
2461 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2462 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2463 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2464 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2465 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2466 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2467 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2468 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2469 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2470 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2471 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2472 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2473 BIT_ULL(POWER_DOMAIN_VGA) | \
2474 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2475 BIT_ULL(POWER_DOMAIN_INIT))
2477 #define BDW_DISPLAY_POWER_DOMAINS ( \
2478 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2479 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2480 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2481 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2482 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2483 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2484 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2485 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2486 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2487 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2488 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2489 BIT_ULL(POWER_DOMAIN_VGA) | \
2490 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2491 BIT_ULL(POWER_DOMAIN_INIT))
2493 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2494 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2495 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2496 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2497 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2498 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2499 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2500 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2501 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2502 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2503 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2504 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2505 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2506 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2507 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2508 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2509 BIT_ULL(POWER_DOMAIN_VGA) | \
2510 BIT_ULL(POWER_DOMAIN_INIT))
2511 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2512 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2513 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2514 BIT_ULL(POWER_DOMAIN_INIT))
2515 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2516 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2517 BIT_ULL(POWER_DOMAIN_INIT))
2518 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2519 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2520 BIT_ULL(POWER_DOMAIN_INIT))
2521 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2522 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2523 BIT_ULL(POWER_DOMAIN_INIT))
2524 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2525 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2526 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2527 BIT_ULL(POWER_DOMAIN_MODESET) | \
2528 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2529 BIT_ULL(POWER_DOMAIN_INIT))
2531 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2532 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2533 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2534 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2535 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2536 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2537 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2538 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2539 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2540 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2541 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2542 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2543 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2544 BIT_ULL(POWER_DOMAIN_VGA) | \
2545 BIT_ULL(POWER_DOMAIN_INIT))
2546 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2547 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2548 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2549 BIT_ULL(POWER_DOMAIN_MODESET) | \
2550 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2551 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2552 BIT_ULL(POWER_DOMAIN_INIT))
2553 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2554 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2555 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2556 BIT_ULL(POWER_DOMAIN_INIT))
2557 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2558 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2559 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2560 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2561 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2562 BIT_ULL(POWER_DOMAIN_INIT))
2564 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2565 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2566 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2567 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2568 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2569 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2570 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2571 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2572 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2573 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2574 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2575 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2576 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2577 BIT_ULL(POWER_DOMAIN_VGA) | \
2578 BIT_ULL(POWER_DOMAIN_INIT))
2579 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2580 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2581 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2582 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2583 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2584 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2585 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2586 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2587 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2588 BIT_ULL(POWER_DOMAIN_INIT))
2589 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2590 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2591 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2592 BIT_ULL(POWER_DOMAIN_INIT))
2593 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2594 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2595 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2596 BIT_ULL(POWER_DOMAIN_INIT))
2597 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2598 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2599 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2600 BIT_ULL(POWER_DOMAIN_INIT))
2601 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2602 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2603 BIT_ULL(POWER_DOMAIN_INIT))
2604 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2605 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2606 BIT_ULL(POWER_DOMAIN_INIT))
2607 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2608 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2609 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2610 BIT_ULL(POWER_DOMAIN_MODESET) | \
2611 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2612 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2613 BIT_ULL(POWER_DOMAIN_INIT))
2615 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2616 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2617 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2618 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2619 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2620 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2621 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2622 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2623 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2624 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2625 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2626 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2627 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2628 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2629 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2630 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2631 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2632 BIT_ULL(POWER_DOMAIN_VGA) | \
2633 BIT_ULL(POWER_DOMAIN_INIT))
2634 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2636 BIT_ULL(POWER_DOMAIN_INIT))
2637 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2638 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2639 BIT_ULL(POWER_DOMAIN_INIT))
2640 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2641 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2642 BIT_ULL(POWER_DOMAIN_INIT))
2643 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2644 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2645 BIT_ULL(POWER_DOMAIN_INIT))
2646 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2647 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2648 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2649 BIT_ULL(POWER_DOMAIN_INIT))
2650 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2651 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2652 BIT_ULL(POWER_DOMAIN_INIT))
2653 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2654 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2655 BIT_ULL(POWER_DOMAIN_INIT))
2656 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2657 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2658 BIT_ULL(POWER_DOMAIN_INIT))
2659 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2660 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2661 BIT_ULL(POWER_DOMAIN_INIT))
2662 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2663 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2664 BIT_ULL(POWER_DOMAIN_INIT))
2665 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2666 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2667 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2668 BIT_ULL(POWER_DOMAIN_MODESET) | \
2669 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2670 BIT_ULL(POWER_DOMAIN_INIT))
2673 * ICL PW_0/PG_0 domains (HW/DMC control):
2675 * - clocks except port PLL
2676 * - central power except FBC
2677 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2678 * ICL PW_1/PG_1 domains (HW/DMC control):
2680 * - PIPE_A and its planes, except VGA
2681 * - transcoder EDP + PSR
2686 #define ICL_PW_4_POWER_DOMAINS ( \
2687 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2688 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2689 BIT_ULL(POWER_DOMAIN_INIT))
2691 #define ICL_PW_3_POWER_DOMAINS ( \
2692 ICL_PW_4_POWER_DOMAINS | \
2693 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2694 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2695 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2696 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2697 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2698 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2699 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2700 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2701 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2702 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2703 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2704 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2705 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2706 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2707 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2708 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2709 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2710 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2711 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2712 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2713 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2714 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2715 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2716 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2717 BIT_ULL(POWER_DOMAIN_VGA) | \
2718 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2719 BIT_ULL(POWER_DOMAIN_INIT))
2722 * - KVMR (HW control)
2724 #define ICL_PW_2_POWER_DOMAINS ( \
2725 ICL_PW_3_POWER_DOMAINS | \
2726 BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
2727 BIT_ULL(POWER_DOMAIN_INIT))
2729 * - KVMR (HW control)
2731 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2732 ICL_PW_2_POWER_DOMAINS | \
2733 BIT_ULL(POWER_DOMAIN_MODESET) | \
2734 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2735 BIT_ULL(POWER_DOMAIN_INIT))
2737 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2738 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2739 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2740 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2741 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2742 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2743 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2744 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2745 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2746 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2747 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2748 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2750 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2751 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2752 BIT_ULL(POWER_DOMAIN_AUX_A))
2753 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2754 BIT_ULL(POWER_DOMAIN_AUX_B))
2755 #define ICL_AUX_C_IO_POWER_DOMAINS ( \
2756 BIT_ULL(POWER_DOMAIN_AUX_C))
2757 #define ICL_AUX_D_IO_POWER_DOMAINS ( \
2758 BIT_ULL(POWER_DOMAIN_AUX_D))
2759 #define ICL_AUX_E_IO_POWER_DOMAINS ( \
2760 BIT_ULL(POWER_DOMAIN_AUX_E))
2761 #define ICL_AUX_F_IO_POWER_DOMAINS ( \
2762 BIT_ULL(POWER_DOMAIN_AUX_F))
2763 #define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2764 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2765 #define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2766 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2767 #define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2768 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2769 #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2770 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2772 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2773 .sync_hw = i9xx_power_well_sync_hw_noop,
2774 .enable = i9xx_always_on_power_well_noop,
2775 .disable = i9xx_always_on_power_well_noop,
2776 .is_enabled = i9xx_always_on_power_well_enabled,
2779 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2780 .sync_hw = i9xx_power_well_sync_hw_noop,
2781 .enable = chv_pipe_power_well_enable,
2782 .disable = chv_pipe_power_well_disable,
2783 .is_enabled = chv_pipe_power_well_enabled,
2786 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2787 .sync_hw = i9xx_power_well_sync_hw_noop,
2788 .enable = chv_dpio_cmn_power_well_enable,
2789 .disable = chv_dpio_cmn_power_well_disable,
2790 .is_enabled = vlv_power_well_enabled,
2793 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2795 .name = "always-on",
2797 .domains = POWER_DOMAIN_MASK,
2798 .ops = &i9xx_always_on_power_well_ops,
2799 .id = DISP_PW_ID_NONE,
2803 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2804 .sync_hw = i830_pipes_power_well_sync_hw,
2805 .enable = i830_pipes_power_well_enable,
2806 .disable = i830_pipes_power_well_disable,
2807 .is_enabled = i830_pipes_power_well_enabled,
2810 static const struct i915_power_well_desc i830_power_wells[] = {
2812 .name = "always-on",
2814 .domains = POWER_DOMAIN_MASK,
2815 .ops = &i9xx_always_on_power_well_ops,
2816 .id = DISP_PW_ID_NONE,
2820 .domains = I830_PIPES_POWER_DOMAINS,
2821 .ops = &i830_pipes_power_well_ops,
2822 .id = DISP_PW_ID_NONE,
2826 static const struct i915_power_well_ops hsw_power_well_ops = {
2827 .sync_hw = hsw_power_well_sync_hw,
2828 .enable = hsw_power_well_enable,
2829 .disable = hsw_power_well_disable,
2830 .is_enabled = hsw_power_well_enabled,
2833 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2834 .sync_hw = i9xx_power_well_sync_hw_noop,
2835 .enable = gen9_dc_off_power_well_enable,
2836 .disable = gen9_dc_off_power_well_disable,
2837 .is_enabled = gen9_dc_off_power_well_enabled,
2840 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2841 .sync_hw = i9xx_power_well_sync_hw_noop,
2842 .enable = bxt_dpio_cmn_power_well_enable,
2843 .disable = bxt_dpio_cmn_power_well_disable,
2844 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2847 static const struct i915_power_well_regs hsw_power_well_regs = {
2848 .bios = HSW_PWR_WELL_CTL1,
2849 .driver = HSW_PWR_WELL_CTL2,
2850 .kvmr = HSW_PWR_WELL_CTL3,
2851 .debug = HSW_PWR_WELL_CTL4,
2854 static const struct i915_power_well_desc hsw_power_wells[] = {
2856 .name = "always-on",
2858 .domains = POWER_DOMAIN_MASK,
2859 .ops = &i9xx_always_on_power_well_ops,
2860 .id = DISP_PW_ID_NONE,
2864 .domains = HSW_DISPLAY_POWER_DOMAINS,
2865 .ops = &hsw_power_well_ops,
2866 .id = HSW_DISP_PW_GLOBAL,
2868 .hsw.regs = &hsw_power_well_regs,
2869 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2870 .hsw.has_vga = true,
2875 static const struct i915_power_well_desc bdw_power_wells[] = {
2877 .name = "always-on",
2879 .domains = POWER_DOMAIN_MASK,
2880 .ops = &i9xx_always_on_power_well_ops,
2881 .id = DISP_PW_ID_NONE,
2885 .domains = BDW_DISPLAY_POWER_DOMAINS,
2886 .ops = &hsw_power_well_ops,
2887 .id = HSW_DISP_PW_GLOBAL,
2889 .hsw.regs = &hsw_power_well_regs,
2890 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2891 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2892 .hsw.has_vga = true,
2897 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2898 .sync_hw = i9xx_power_well_sync_hw_noop,
2899 .enable = vlv_display_power_well_enable,
2900 .disable = vlv_display_power_well_disable,
2901 .is_enabled = vlv_power_well_enabled,
2904 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2905 .sync_hw = i9xx_power_well_sync_hw_noop,
2906 .enable = vlv_dpio_cmn_power_well_enable,
2907 .disable = vlv_dpio_cmn_power_well_disable,
2908 .is_enabled = vlv_power_well_enabled,
2911 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2912 .sync_hw = i9xx_power_well_sync_hw_noop,
2913 .enable = vlv_power_well_enable,
2914 .disable = vlv_power_well_disable,
2915 .is_enabled = vlv_power_well_enabled,
2918 static const struct i915_power_well_desc vlv_power_wells[] = {
2920 .name = "always-on",
2922 .domains = POWER_DOMAIN_MASK,
2923 .ops = &i9xx_always_on_power_well_ops,
2924 .id = DISP_PW_ID_NONE,
2928 .domains = VLV_DISPLAY_POWER_DOMAINS,
2929 .ops = &vlv_display_power_well_ops,
2930 .id = VLV_DISP_PW_DISP2D,
2932 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2936 .name = "dpio-tx-b-01",
2937 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2938 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2939 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2940 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2941 .ops = &vlv_dpio_power_well_ops,
2942 .id = DISP_PW_ID_NONE,
2944 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2948 .name = "dpio-tx-b-23",
2949 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2950 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2951 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2952 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2953 .ops = &vlv_dpio_power_well_ops,
2954 .id = DISP_PW_ID_NONE,
2956 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2960 .name = "dpio-tx-c-01",
2961 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2962 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2963 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2964 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2965 .ops = &vlv_dpio_power_well_ops,
2966 .id = DISP_PW_ID_NONE,
2968 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2972 .name = "dpio-tx-c-23",
2973 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2974 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2975 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2976 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2977 .ops = &vlv_dpio_power_well_ops,
2978 .id = DISP_PW_ID_NONE,
2980 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2984 .name = "dpio-common",
2985 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2986 .ops = &vlv_dpio_cmn_power_well_ops,
2987 .id = VLV_DISP_PW_DPIO_CMN_BC,
2989 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2994 static const struct i915_power_well_desc chv_power_wells[] = {
2996 .name = "always-on",
2998 .domains = POWER_DOMAIN_MASK,
2999 .ops = &i9xx_always_on_power_well_ops,
3000 .id = DISP_PW_ID_NONE,
3005 * Pipe A power well is the new disp2d well. Pipe B and C
3006 * power wells don't actually exist. Pipe A power well is
3007 * required for any pipe to work.
3009 .domains = CHV_DISPLAY_POWER_DOMAINS,
3010 .ops = &chv_pipe_power_well_ops,
3011 .id = DISP_PW_ID_NONE,
3014 .name = "dpio-common-bc",
3015 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3016 .ops = &chv_dpio_cmn_power_well_ops,
3017 .id = VLV_DISP_PW_DPIO_CMN_BC,
3019 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3023 .name = "dpio-common-d",
3024 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3025 .ops = &chv_dpio_cmn_power_well_ops,
3026 .id = CHV_DISP_PW_DPIO_CMN_D,
3028 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3033 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3034 enum i915_power_well_id power_well_id)
3036 struct i915_power_well *power_well;
3039 power_well = lookup_power_well(dev_priv, power_well_id);
3040 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3045 static const struct i915_power_well_desc skl_power_wells[] = {
3047 .name = "always-on",
3049 .domains = POWER_DOMAIN_MASK,
3050 .ops = &i9xx_always_on_power_well_ops,
3051 .id = DISP_PW_ID_NONE,
3054 .name = "power well 1",
3055 /* Handled by the DMC firmware */
3058 .ops = &hsw_power_well_ops,
3059 .id = SKL_DISP_PW_1,
3061 .hsw.regs = &hsw_power_well_regs,
3062 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3063 .hsw.has_fuses = true,
3067 .name = "MISC IO power well",
3068 /* Handled by the DMC firmware */
3071 .ops = &hsw_power_well_ops,
3072 .id = SKL_DISP_PW_MISC_IO,
3074 .hsw.regs = &hsw_power_well_regs,
3075 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3080 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3081 .ops = &gen9_dc_off_power_well_ops,
3082 .id = DISP_PW_ID_NONE,
3085 .name = "power well 2",
3086 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3087 .ops = &hsw_power_well_ops,
3088 .id = SKL_DISP_PW_2,
3090 .hsw.regs = &hsw_power_well_regs,
3091 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3092 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3093 .hsw.has_vga = true,
3094 .hsw.has_fuses = true,
3098 .name = "DDI A/E IO power well",
3099 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3100 .ops = &hsw_power_well_ops,
3101 .id = DISP_PW_ID_NONE,
3103 .hsw.regs = &hsw_power_well_regs,
3104 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3108 .name = "DDI B IO power well",
3109 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3110 .ops = &hsw_power_well_ops,
3111 .id = DISP_PW_ID_NONE,
3113 .hsw.regs = &hsw_power_well_regs,
3114 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3118 .name = "DDI C IO power well",
3119 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3120 .ops = &hsw_power_well_ops,
3121 .id = DISP_PW_ID_NONE,
3123 .hsw.regs = &hsw_power_well_regs,
3124 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3128 .name = "DDI D IO power well",
3129 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3130 .ops = &hsw_power_well_ops,
3131 .id = DISP_PW_ID_NONE,
3133 .hsw.regs = &hsw_power_well_regs,
3134 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3139 static const struct i915_power_well_desc bxt_power_wells[] = {
3141 .name = "always-on",
3143 .domains = POWER_DOMAIN_MASK,
3144 .ops = &i9xx_always_on_power_well_ops,
3145 .id = DISP_PW_ID_NONE,
3148 .name = "power well 1",
3149 /* Handled by the DMC firmware */
3152 .ops = &hsw_power_well_ops,
3153 .id = SKL_DISP_PW_1,
3155 .hsw.regs = &hsw_power_well_regs,
3156 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3157 .hsw.has_fuses = true,
3162 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3163 .ops = &gen9_dc_off_power_well_ops,
3164 .id = DISP_PW_ID_NONE,
3167 .name = "power well 2",
3168 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3169 .ops = &hsw_power_well_ops,
3170 .id = SKL_DISP_PW_2,
3172 .hsw.regs = &hsw_power_well_regs,
3173 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3174 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3175 .hsw.has_vga = true,
3176 .hsw.has_fuses = true,
3180 .name = "dpio-common-a",
3181 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3182 .ops = &bxt_dpio_cmn_power_well_ops,
3183 .id = BXT_DISP_PW_DPIO_CMN_A,
3185 .bxt.phy = DPIO_PHY1,
3189 .name = "dpio-common-bc",
3190 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3191 .ops = &bxt_dpio_cmn_power_well_ops,
3192 .id = VLV_DISP_PW_DPIO_CMN_BC,
3194 .bxt.phy = DPIO_PHY0,
3199 static const struct i915_power_well_desc glk_power_wells[] = {
3201 .name = "always-on",
3203 .domains = POWER_DOMAIN_MASK,
3204 .ops = &i9xx_always_on_power_well_ops,
3205 .id = DISP_PW_ID_NONE,
3208 .name = "power well 1",
3209 /* Handled by the DMC firmware */
3212 .ops = &hsw_power_well_ops,
3213 .id = SKL_DISP_PW_1,
3215 .hsw.regs = &hsw_power_well_regs,
3216 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3217 .hsw.has_fuses = true,
3222 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3223 .ops = &gen9_dc_off_power_well_ops,
3224 .id = DISP_PW_ID_NONE,
3227 .name = "power well 2",
3228 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3229 .ops = &hsw_power_well_ops,
3230 .id = SKL_DISP_PW_2,
3232 .hsw.regs = &hsw_power_well_regs,
3233 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3234 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3235 .hsw.has_vga = true,
3236 .hsw.has_fuses = true,
3240 .name = "dpio-common-a",
3241 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3242 .ops = &bxt_dpio_cmn_power_well_ops,
3243 .id = BXT_DISP_PW_DPIO_CMN_A,
3245 .bxt.phy = DPIO_PHY1,
3249 .name = "dpio-common-b",
3250 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3251 .ops = &bxt_dpio_cmn_power_well_ops,
3252 .id = VLV_DISP_PW_DPIO_CMN_BC,
3254 .bxt.phy = DPIO_PHY0,
3258 .name = "dpio-common-c",
3259 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3260 .ops = &bxt_dpio_cmn_power_well_ops,
3261 .id = GLK_DISP_PW_DPIO_CMN_C,
3263 .bxt.phy = DPIO_PHY2,
3268 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3269 .ops = &hsw_power_well_ops,
3270 .id = DISP_PW_ID_NONE,
3272 .hsw.regs = &hsw_power_well_regs,
3273 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3278 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3279 .ops = &hsw_power_well_ops,
3280 .id = DISP_PW_ID_NONE,
3282 .hsw.regs = &hsw_power_well_regs,
3283 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3288 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3289 .ops = &hsw_power_well_ops,
3290 .id = DISP_PW_ID_NONE,
3292 .hsw.regs = &hsw_power_well_regs,
3293 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3297 .name = "DDI A IO power well",
3298 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3299 .ops = &hsw_power_well_ops,
3300 .id = DISP_PW_ID_NONE,
3302 .hsw.regs = &hsw_power_well_regs,
3303 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3307 .name = "DDI B IO power well",
3308 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3309 .ops = &hsw_power_well_ops,
3310 .id = DISP_PW_ID_NONE,
3312 .hsw.regs = &hsw_power_well_regs,
3313 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3317 .name = "DDI C IO power well",
3318 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3319 .ops = &hsw_power_well_ops,
3320 .id = DISP_PW_ID_NONE,
3322 .hsw.regs = &hsw_power_well_regs,
3323 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3328 static const struct i915_power_well_desc cnl_power_wells[] = {
3330 .name = "always-on",
3332 .domains = POWER_DOMAIN_MASK,
3333 .ops = &i9xx_always_on_power_well_ops,
3334 .id = DISP_PW_ID_NONE,
3337 .name = "power well 1",
3338 /* Handled by the DMC firmware */
3341 .ops = &hsw_power_well_ops,
3342 .id = SKL_DISP_PW_1,
3344 .hsw.regs = &hsw_power_well_regs,
3345 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3346 .hsw.has_fuses = true,
3351 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3352 .ops = &hsw_power_well_ops,
3353 .id = DISP_PW_ID_NONE,
3355 .hsw.regs = &hsw_power_well_regs,
3356 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3361 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3362 .ops = &hsw_power_well_ops,
3363 .id = DISP_PW_ID_NONE,
3365 .hsw.regs = &hsw_power_well_regs,
3366 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3371 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3372 .ops = &hsw_power_well_ops,
3373 .id = DISP_PW_ID_NONE,
3375 .hsw.regs = &hsw_power_well_regs,
3376 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3381 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3382 .ops = &hsw_power_well_ops,
3383 .id = DISP_PW_ID_NONE,
3385 .hsw.regs = &hsw_power_well_regs,
3386 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3391 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3392 .ops = &gen9_dc_off_power_well_ops,
3393 .id = DISP_PW_ID_NONE,
3396 .name = "power well 2",
3397 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3398 .ops = &hsw_power_well_ops,
3399 .id = SKL_DISP_PW_2,
3401 .hsw.regs = &hsw_power_well_regs,
3402 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3403 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3404 .hsw.has_vga = true,
3405 .hsw.has_fuses = true,
3409 .name = "DDI A IO power well",
3410 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3411 .ops = &hsw_power_well_ops,
3412 .id = DISP_PW_ID_NONE,
3414 .hsw.regs = &hsw_power_well_regs,
3415 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3419 .name = "DDI B IO power well",
3420 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3421 .ops = &hsw_power_well_ops,
3422 .id = DISP_PW_ID_NONE,
3424 .hsw.regs = &hsw_power_well_regs,
3425 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3429 .name = "DDI C IO power well",
3430 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3431 .ops = &hsw_power_well_ops,
3432 .id = DISP_PW_ID_NONE,
3434 .hsw.regs = &hsw_power_well_regs,
3435 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3439 .name = "DDI D IO power well",
3440 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3441 .ops = &hsw_power_well_ops,
3442 .id = DISP_PW_ID_NONE,
3444 .hsw.regs = &hsw_power_well_regs,
3445 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3449 .name = "DDI F IO power well",
3450 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3451 .ops = &hsw_power_well_ops,
3452 .id = DISP_PW_ID_NONE,
3454 .hsw.regs = &hsw_power_well_regs,
3455 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3460 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3461 .ops = &hsw_power_well_ops,
3462 .id = DISP_PW_ID_NONE,
3464 .hsw.regs = &hsw_power_well_regs,
3465 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3470 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3471 .sync_hw = hsw_power_well_sync_hw,
3472 .enable = icl_combo_phy_aux_power_well_enable,
3473 .disable = icl_combo_phy_aux_power_well_disable,
3474 .is_enabled = hsw_power_well_enabled,
3477 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3478 .sync_hw = hsw_power_well_sync_hw,
3479 .enable = icl_tc_phy_aux_power_well_enable,
3480 .disable = hsw_power_well_disable,
3481 .is_enabled = hsw_power_well_enabled,
3484 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3485 .bios = ICL_PWR_WELL_CTL_AUX1,
3486 .driver = ICL_PWR_WELL_CTL_AUX2,
3487 .debug = ICL_PWR_WELL_CTL_AUX4,
3490 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3491 .bios = ICL_PWR_WELL_CTL_DDI1,
3492 .driver = ICL_PWR_WELL_CTL_DDI2,
3493 .debug = ICL_PWR_WELL_CTL_DDI4,
3496 static const struct i915_power_well_desc icl_power_wells[] = {
3498 .name = "always-on",
3500 .domains = POWER_DOMAIN_MASK,
3501 .ops = &i9xx_always_on_power_well_ops,
3502 .id = DISP_PW_ID_NONE,
3505 .name = "power well 1",
3506 /* Handled by the DMC firmware */
3509 .ops = &hsw_power_well_ops,
3510 .id = SKL_DISP_PW_1,
3512 .hsw.regs = &hsw_power_well_regs,
3513 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3514 .hsw.has_fuses = true,
3519 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3520 .ops = &gen9_dc_off_power_well_ops,
3521 .id = DISP_PW_ID_NONE,
3524 .name = "power well 2",
3525 .domains = ICL_PW_2_POWER_DOMAINS,
3526 .ops = &hsw_power_well_ops,
3527 .id = SKL_DISP_PW_2,
3529 .hsw.regs = &hsw_power_well_regs,
3530 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3531 .hsw.has_fuses = true,
3535 .name = "power well 3",
3536 .domains = ICL_PW_3_POWER_DOMAINS,
3537 .ops = &hsw_power_well_ops,
3538 .id = DISP_PW_ID_NONE,
3540 .hsw.regs = &hsw_power_well_regs,
3541 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3542 .hsw.irq_pipe_mask = BIT(PIPE_B),
3543 .hsw.has_vga = true,
3544 .hsw.has_fuses = true,
3549 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3550 .ops = &hsw_power_well_ops,
3551 .id = DISP_PW_ID_NONE,
3553 .hsw.regs = &icl_ddi_power_well_regs,
3554 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3559 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3560 .ops = &hsw_power_well_ops,
3561 .id = DISP_PW_ID_NONE,
3563 .hsw.regs = &icl_ddi_power_well_regs,
3564 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3569 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3570 .ops = &hsw_power_well_ops,
3571 .id = DISP_PW_ID_NONE,
3573 .hsw.regs = &icl_ddi_power_well_regs,
3574 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3579 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3580 .ops = &hsw_power_well_ops,
3581 .id = DISP_PW_ID_NONE,
3583 .hsw.regs = &icl_ddi_power_well_regs,
3584 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3589 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3590 .ops = &hsw_power_well_ops,
3591 .id = DISP_PW_ID_NONE,
3593 .hsw.regs = &icl_ddi_power_well_regs,
3594 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3599 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3600 .ops = &hsw_power_well_ops,
3601 .id = DISP_PW_ID_NONE,
3603 .hsw.regs = &icl_ddi_power_well_regs,
3604 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3609 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3610 .ops = &icl_combo_phy_aux_power_well_ops,
3611 .id = DISP_PW_ID_NONE,
3613 .hsw.regs = &icl_aux_power_well_regs,
3614 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3619 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3620 .ops = &icl_combo_phy_aux_power_well_ops,
3621 .id = DISP_PW_ID_NONE,
3623 .hsw.regs = &icl_aux_power_well_regs,
3624 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3629 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
3630 .ops = &icl_tc_phy_aux_power_well_ops,
3631 .id = DISP_PW_ID_NONE,
3633 .hsw.regs = &icl_aux_power_well_regs,
3634 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3635 .hsw.is_tc_tbt = false,
3640 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
3641 .ops = &icl_tc_phy_aux_power_well_ops,
3642 .id = DISP_PW_ID_NONE,
3644 .hsw.regs = &icl_aux_power_well_regs,
3645 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3646 .hsw.is_tc_tbt = false,
3651 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
3652 .ops = &icl_tc_phy_aux_power_well_ops,
3653 .id = DISP_PW_ID_NONE,
3655 .hsw.regs = &icl_aux_power_well_regs,
3656 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3657 .hsw.is_tc_tbt = false,
3662 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
3663 .ops = &icl_tc_phy_aux_power_well_ops,
3664 .id = DISP_PW_ID_NONE,
3666 .hsw.regs = &icl_aux_power_well_regs,
3667 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3668 .hsw.is_tc_tbt = false,
3673 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3674 .ops = &icl_tc_phy_aux_power_well_ops,
3675 .id = DISP_PW_ID_NONE,
3677 .hsw.regs = &icl_aux_power_well_regs,
3678 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3679 .hsw.is_tc_tbt = true,
3684 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3685 .ops = &icl_tc_phy_aux_power_well_ops,
3686 .id = DISP_PW_ID_NONE,
3688 .hsw.regs = &icl_aux_power_well_regs,
3689 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3690 .hsw.is_tc_tbt = true,
3695 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3696 .ops = &icl_tc_phy_aux_power_well_ops,
3697 .id = DISP_PW_ID_NONE,
3699 .hsw.regs = &icl_aux_power_well_regs,
3700 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3701 .hsw.is_tc_tbt = true,
3706 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3707 .ops = &icl_tc_phy_aux_power_well_ops,
3708 .id = DISP_PW_ID_NONE,
3710 .hsw.regs = &icl_aux_power_well_regs,
3711 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3712 .hsw.is_tc_tbt = true,
3716 .name = "power well 4",
3717 .domains = ICL_PW_4_POWER_DOMAINS,
3718 .ops = &hsw_power_well_ops,
3719 .id = DISP_PW_ID_NONE,
3721 .hsw.regs = &hsw_power_well_regs,
3722 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3723 .hsw.has_fuses = true,
3724 .hsw.irq_pipe_mask = BIT(PIPE_C),
3730 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3731 int disable_power_well)
3733 if (disable_power_well >= 0)
3734 return !!disable_power_well;
3739 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3746 if (INTEL_GEN(dev_priv) >= 11) {
3749 * DC9 has a separate HW flow from the rest of the DC states,
3750 * not depending on the DMC firmware. It's needed by system
3751 * suspend/resume, so allow it unconditionally.
3753 mask = DC_STATE_EN_DC9;
3754 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3757 } else if (IS_GEN9_LP(dev_priv)) {
3759 mask = DC_STATE_EN_DC9;
3765 if (!i915_modparams.disable_power_well)
3768 if (enable_dc >= 0 && enable_dc <= max_dc) {
3769 requested_dc = enable_dc;
3770 } else if (enable_dc == -1) {
3771 requested_dc = max_dc;
3772 } else if (enable_dc > max_dc && enable_dc <= 2) {
3773 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3775 requested_dc = max_dc;
3777 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3778 requested_dc = max_dc;
3781 if (requested_dc > 1)
3782 mask |= DC_STATE_EN_UPTO_DC6;
3783 if (requested_dc > 0)
3784 mask |= DC_STATE_EN_UPTO_DC5;
3786 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3792 __set_power_wells(struct i915_power_domains *power_domains,
3793 const struct i915_power_well_desc *power_well_descs,
3794 int power_well_count)
3796 u64 power_well_ids = 0;
3799 power_domains->power_well_count = power_well_count;
3800 power_domains->power_wells =
3801 kcalloc(power_well_count,
3802 sizeof(*power_domains->power_wells),
3804 if (!power_domains->power_wells)
3807 for (i = 0; i < power_well_count; i++) {
3808 enum i915_power_well_id id = power_well_descs[i].id;
3810 power_domains->power_wells[i].desc = &power_well_descs[i];
3812 if (id == DISP_PW_ID_NONE)
3815 WARN_ON(id >= sizeof(power_well_ids) * 8);
3816 WARN_ON(power_well_ids & BIT_ULL(id));
3817 power_well_ids |= BIT_ULL(id);
3823 #define set_power_wells(power_domains, __power_well_descs) \
3824 __set_power_wells(power_domains, __power_well_descs, \
3825 ARRAY_SIZE(__power_well_descs))
3828 * intel_power_domains_init - initializes the power domain structures
3829 * @dev_priv: i915 device instance
3831 * Initializes the power domain structures for @dev_priv depending upon the
3832 * supported platform.
3834 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3836 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3839 i915_modparams.disable_power_well =
3840 sanitize_disable_power_well_option(dev_priv,
3841 i915_modparams.disable_power_well);
3842 dev_priv->csr.allowed_dc_mask =
3843 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3845 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3847 mutex_init(&power_domains->lock);
3849 INIT_DELAYED_WORK(&power_domains->async_put_work,
3850 intel_display_power_put_async_work);
3853 * The enabling order will be from lower to higher indexed wells,
3854 * the disabling order is reversed.
3856 if (IS_GEN(dev_priv, 11)) {
3857 err = set_power_wells(power_domains, icl_power_wells);
3858 } else if (IS_CANNONLAKE(dev_priv)) {
3859 err = set_power_wells(power_domains, cnl_power_wells);
3862 * DDI and Aux IO are getting enabled for all ports
3863 * regardless the presence or use. So, in order to avoid
3864 * timeouts, lets remove them from the list
3865 * for the SKUs without port F.
3867 if (!IS_CNL_WITH_PORT_F(dev_priv))
3868 power_domains->power_well_count -= 2;
3869 } else if (IS_GEMINILAKE(dev_priv)) {
3870 err = set_power_wells(power_domains, glk_power_wells);
3871 } else if (IS_BROXTON(dev_priv)) {
3872 err = set_power_wells(power_domains, bxt_power_wells);
3873 } else if (IS_GEN9_BC(dev_priv)) {
3874 err = set_power_wells(power_domains, skl_power_wells);
3875 } else if (IS_CHERRYVIEW(dev_priv)) {
3876 err = set_power_wells(power_domains, chv_power_wells);
3877 } else if (IS_BROADWELL(dev_priv)) {
3878 err = set_power_wells(power_domains, bdw_power_wells);
3879 } else if (IS_HASWELL(dev_priv)) {
3880 err = set_power_wells(power_domains, hsw_power_wells);
3881 } else if (IS_VALLEYVIEW(dev_priv)) {
3882 err = set_power_wells(power_domains, vlv_power_wells);
3883 } else if (IS_I830(dev_priv)) {
3884 err = set_power_wells(power_domains, i830_power_wells);
3886 err = set_power_wells(power_domains, i9xx_always_on_power_well);
3893 * intel_power_domains_cleanup - clean up power domains resources
3894 * @dev_priv: i915 device instance
3896 * Release any resources acquired by intel_power_domains_init()
3898 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3900 kfree(dev_priv->power_domains.power_wells);
3903 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3905 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3906 struct i915_power_well *power_well;
3908 mutex_lock(&power_domains->lock);
3909 for_each_power_well(dev_priv, power_well) {
3910 power_well->desc->ops->sync_hw(dev_priv, power_well);
3911 power_well->hw_enabled =
3912 power_well->desc->ops->is_enabled(dev_priv, power_well);
3914 mutex_unlock(&power_domains->lock);
3918 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3919 i915_reg_t reg, bool enable)
3923 val = I915_READ(reg);
3924 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3925 I915_WRITE(reg, val);
3929 status = I915_READ(reg) & DBUF_POWER_STATE;
3930 if ((enable && !status) || (!enable && status)) {
3931 DRM_ERROR("DBus power %s timeout!\n",
3932 enable ? "enable" : "disable");
3938 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3940 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3943 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3945 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3948 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3950 if (INTEL_GEN(dev_priv) < 11)
3955 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3958 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3961 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3962 DRM_ERROR("Invalid number of dbuf slices requested\n");
3966 if (req_slices == hw_enabled_slices || req_slices == 0)
3969 if (req_slices > hw_enabled_slices)
3970 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3972 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3975 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3978 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3980 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3981 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3982 POSTING_READ(DBUF_CTL_S2);
3986 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3987 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3988 DRM_ERROR("DBuf power enable timeout\n");
3991 * FIXME: for now pretend that we only have 1 slice, see
3992 * intel_enabled_dbuf_slices_num().
3994 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
3997 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3999 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4000 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4001 POSTING_READ(DBUF_CTL_S2);
4005 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4006 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4007 DRM_ERROR("DBuf power disable timeout!\n");
4010 * FIXME: for now pretend that the first slice is always
4011 * enabled, see intel_enabled_dbuf_slices_num().
4013 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4016 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4020 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4021 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4022 MBUS_ABOX_B_CREDIT(1) |
4023 MBUS_ABOX_BW_CREDIT(1);
4025 I915_WRITE(MBUS_ABOX_CTL, val);
4028 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4030 u32 val = I915_READ(LCPLL_CTL);
4033 * The LCPLL register should be turned on by the BIOS. For now
4034 * let's just check its state and print errors in case
4035 * something is wrong. Don't even try to turn it on.
4038 if (val & LCPLL_CD_SOURCE_FCLK)
4039 DRM_ERROR("CDCLK source is not LCPLL\n");
4041 if (val & LCPLL_PLL_DISABLE)
4042 DRM_ERROR("LCPLL is disabled\n");
4045 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4047 struct drm_device *dev = &dev_priv->drm;
4048 struct intel_crtc *crtc;
4050 for_each_intel_crtc(dev, crtc)
4051 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4052 pipe_name(crtc->pipe));
4054 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4055 "Display power well on\n");
4056 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4058 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4059 "WRPLL1 enabled\n");
4060 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4061 "WRPLL2 enabled\n");
4062 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4063 "Panel power on\n");
4064 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4065 "CPU PWM1 enabled\n");
4066 if (IS_HASWELL(dev_priv))
4067 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4068 "CPU PWM2 enabled\n");
4069 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4070 "PCH PWM1 enabled\n");
4071 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4072 "Utility pin enabled\n");
4073 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4074 "PCH GTC enabled\n");
4077 * In theory we can still leave IRQs enabled, as long as only the HPD
4078 * interrupts remain enabled. We used to check for that, but since it's
4079 * gen-specific and since we only disable LCPLL after we fully disable
4080 * the interrupts, the check below should be enough.
4082 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4085 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4087 if (IS_HASWELL(dev_priv))
4088 return I915_READ(D_COMP_HSW);
4090 return I915_READ(D_COMP_BDW);
4093 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4095 if (IS_HASWELL(dev_priv)) {
4096 if (sandybridge_pcode_write(dev_priv,
4097 GEN6_PCODE_WRITE_D_COMP, val))
4098 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4100 I915_WRITE(D_COMP_BDW, val);
4101 POSTING_READ(D_COMP_BDW);
4106 * This function implements pieces of two sequences from BSpec:
4107 * - Sequence for display software to disable LCPLL
4108 * - Sequence for display software to allow package C8+
4109 * The steps implemented here are just the steps that actually touch the LCPLL
4110 * register. Callers should take care of disabling all the display engine
4111 * functions, doing the mode unset, fixing interrupts, etc.
4113 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4114 bool switch_to_fclk, bool allow_power_down)
4118 assert_can_disable_lcpll(dev_priv);
4120 val = I915_READ(LCPLL_CTL);
4122 if (switch_to_fclk) {
4123 val |= LCPLL_CD_SOURCE_FCLK;
4124 I915_WRITE(LCPLL_CTL, val);
4126 if (wait_for_us(I915_READ(LCPLL_CTL) &
4127 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4128 DRM_ERROR("Switching to FCLK failed\n");
4130 val = I915_READ(LCPLL_CTL);
4133 val |= LCPLL_PLL_DISABLE;
4134 I915_WRITE(LCPLL_CTL, val);
4135 POSTING_READ(LCPLL_CTL);
4137 if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
4138 LCPLL_PLL_LOCK, 0, 1))
4139 DRM_ERROR("LCPLL still locked\n");
4141 val = hsw_read_dcomp(dev_priv);
4142 val |= D_COMP_COMP_DISABLE;
4143 hsw_write_dcomp(dev_priv, val);
4146 if (wait_for((hsw_read_dcomp(dev_priv) &
4147 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4148 DRM_ERROR("D_COMP RCOMP still in progress\n");
4150 if (allow_power_down) {
4151 val = I915_READ(LCPLL_CTL);
4152 val |= LCPLL_POWER_DOWN_ALLOW;
4153 I915_WRITE(LCPLL_CTL, val);
4154 POSTING_READ(LCPLL_CTL);
4159 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4162 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4166 val = I915_READ(LCPLL_CTL);
4168 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4169 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4173 * Make sure we're not on PC8 state before disabling PC8, otherwise
4174 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4176 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4178 if (val & LCPLL_POWER_DOWN_ALLOW) {
4179 val &= ~LCPLL_POWER_DOWN_ALLOW;
4180 I915_WRITE(LCPLL_CTL, val);
4181 POSTING_READ(LCPLL_CTL);
4184 val = hsw_read_dcomp(dev_priv);
4185 val |= D_COMP_COMP_FORCE;
4186 val &= ~D_COMP_COMP_DISABLE;
4187 hsw_write_dcomp(dev_priv, val);
4189 val = I915_READ(LCPLL_CTL);
4190 val &= ~LCPLL_PLL_DISABLE;
4191 I915_WRITE(LCPLL_CTL, val);
4193 if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
4194 LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
4195 DRM_ERROR("LCPLL not locked yet\n");
4197 if (val & LCPLL_CD_SOURCE_FCLK) {
4198 val = I915_READ(LCPLL_CTL);
4199 val &= ~LCPLL_CD_SOURCE_FCLK;
4200 I915_WRITE(LCPLL_CTL, val);
4202 if (wait_for_us((I915_READ(LCPLL_CTL) &
4203 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4204 DRM_ERROR("Switching back to LCPLL failed\n");
4207 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4209 intel_update_cdclk(dev_priv);
4210 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4214 * Package states C8 and deeper are really deep PC states that can only be
4215 * reached when all the devices on the system allow it, so even if the graphics
4216 * device allows PC8+, it doesn't mean the system will actually get to these
4217 * states. Our driver only allows PC8+ when going into runtime PM.
4219 * The requirements for PC8+ are that all the outputs are disabled, the power
4220 * well is disabled and most interrupts are disabled, and these are also
4221 * requirements for runtime PM. When these conditions are met, we manually do
4222 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4223 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4226 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4227 * the state of some registers, so when we come back from PC8+ we need to
4228 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4229 * need to take care of the registers kept by RC6. Notice that this happens even
4230 * if we don't put the device in PCI D3 state (which is what currently happens
4231 * because of the runtime PM support).
4233 * For more, read "Display Sequences for Package C8" on the hardware
4236 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4240 DRM_DEBUG_KMS("Enabling package C8+\n");
4242 if (HAS_PCH_LPT_LP(dev_priv)) {
4243 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4244 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4245 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4248 lpt_disable_clkout_dp(dev_priv);
4249 hsw_disable_lcpll(dev_priv, true, true);
4252 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4256 DRM_DEBUG_KMS("Disabling package C8+\n");
4258 hsw_restore_lcpll(dev_priv);
4259 intel_init_pch_refclk(dev_priv);
4261 if (HAS_PCH_LPT_LP(dev_priv)) {
4262 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4263 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4264 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4268 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4272 u32 reset_bits, val;
4274 if (IS_IVYBRIDGE(dev_priv)) {
4276 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4278 reg = HSW_NDE_RSTWRN_OPT;
4279 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4282 val = I915_READ(reg);
4289 I915_WRITE(reg, val);
4292 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4295 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4296 struct i915_power_well *well;
4298 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4300 /* enable PCH reset handshake */
4301 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4303 /* enable PG1 and Misc I/O */
4304 mutex_lock(&power_domains->lock);
4306 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4307 intel_power_well_enable(dev_priv, well);
4309 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4310 intel_power_well_enable(dev_priv, well);
4312 mutex_unlock(&power_domains->lock);
4314 intel_cdclk_init(dev_priv);
4316 gen9_dbuf_enable(dev_priv);
4318 if (resume && dev_priv->csr.dmc_payload)
4319 intel_csr_load_program(dev_priv);
4322 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4324 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4325 struct i915_power_well *well;
4327 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4329 gen9_dbuf_disable(dev_priv);
4331 intel_cdclk_uninit(dev_priv);
4333 /* The spec doesn't call for removing the reset handshake flag */
4334 /* disable PG1 and Misc I/O */
4336 mutex_lock(&power_domains->lock);
4339 * BSpec says to keep the MISC IO power well enabled here, only
4340 * remove our request for power well 1.
4341 * Note that even though the driver's request is removed power well 1
4342 * may stay enabled after this due to DMC's own request on it.
4344 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4345 intel_power_well_disable(dev_priv, well);
4347 mutex_unlock(&power_domains->lock);
4349 usleep_range(10, 30); /* 10 us delay per Bspec */
4352 void bxt_display_core_init(struct drm_i915_private *dev_priv,
4355 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4356 struct i915_power_well *well;
4358 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4361 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4362 * or else the reset will hang because there is no PCH to respond.
4363 * Move the handshake programming to initialization sequence.
4364 * Previously was left up to BIOS.
4366 intel_pch_reset_handshake(dev_priv, false);
4369 mutex_lock(&power_domains->lock);
4371 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4372 intel_power_well_enable(dev_priv, well);
4374 mutex_unlock(&power_domains->lock);
4376 intel_cdclk_init(dev_priv);
4378 gen9_dbuf_enable(dev_priv);
4380 if (resume && dev_priv->csr.dmc_payload)
4381 intel_csr_load_program(dev_priv);
4384 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4386 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4387 struct i915_power_well *well;
4389 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4391 gen9_dbuf_disable(dev_priv);
4393 intel_cdclk_uninit(dev_priv);
4395 /* The spec doesn't call for removing the reset handshake flag */
4398 * Disable PW1 (PG1).
4399 * Note that even though the driver's request is removed power well 1
4400 * may stay enabled after this due to DMC's own request on it.
4402 mutex_lock(&power_domains->lock);
4404 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4405 intel_power_well_disable(dev_priv, well);
4407 mutex_unlock(&power_domains->lock);
4409 usleep_range(10, 30); /* 10 us delay per Bspec */
4412 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4414 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4415 struct i915_power_well *well;
4417 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4419 /* 1. Enable PCH Reset Handshake */
4420 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4423 intel_combo_phy_init(dev_priv);
4426 * 4. Enable Power Well 1 (PG1).
4427 * The AUX IO power wells will be enabled on demand.
4429 mutex_lock(&power_domains->lock);
4430 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4431 intel_power_well_enable(dev_priv, well);
4432 mutex_unlock(&power_domains->lock);
4434 /* 5. Enable CD clock */
4435 intel_cdclk_init(dev_priv);
4437 /* 6. Enable DBUF */
4438 gen9_dbuf_enable(dev_priv);
4440 if (resume && dev_priv->csr.dmc_payload)
4441 intel_csr_load_program(dev_priv);
4444 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4446 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4447 struct i915_power_well *well;
4449 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4451 /* 1. Disable all display engine functions -> aready done */
4453 /* 2. Disable DBUF */
4454 gen9_dbuf_disable(dev_priv);
4456 /* 3. Disable CD clock */
4457 intel_cdclk_uninit(dev_priv);
4460 * 4. Disable Power Well 1 (PG1).
4461 * The AUX IO power wells are toggled on demand, so they are already
4462 * disabled at this point.
4464 mutex_lock(&power_domains->lock);
4465 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4466 intel_power_well_disable(dev_priv, well);
4467 mutex_unlock(&power_domains->lock);
4469 usleep_range(10, 30); /* 10 us delay per Bspec */
4472 intel_combo_phy_uninit(dev_priv);
4475 void icl_display_core_init(struct drm_i915_private *dev_priv,
4478 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4479 struct i915_power_well *well;
4481 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4483 /* 1. Enable PCH reset handshake. */
4484 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4486 /* 2. Initialize all combo phys */
4487 intel_combo_phy_init(dev_priv);
4490 * 3. Enable Power Well 1 (PG1).
4491 * The AUX IO power wells will be enabled on demand.
4493 mutex_lock(&power_domains->lock);
4494 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4495 intel_power_well_enable(dev_priv, well);
4496 mutex_unlock(&power_domains->lock);
4498 /* 4. Enable CDCLK. */
4499 intel_cdclk_init(dev_priv);
4501 /* 5. Enable DBUF. */
4502 icl_dbuf_enable(dev_priv);
4504 /* 6. Setup MBUS. */
4505 icl_mbus_init(dev_priv);
4507 if (resume && dev_priv->csr.dmc_payload)
4508 intel_csr_load_program(dev_priv);
4511 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4513 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4514 struct i915_power_well *well;
4516 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4518 /* 1. Disable all display engine functions -> aready done */
4520 /* 2. Disable DBUF */
4521 icl_dbuf_disable(dev_priv);
4523 /* 3. Disable CD clock */
4524 intel_cdclk_uninit(dev_priv);
4527 * 4. Disable Power Well 1 (PG1).
4528 * The AUX IO power wells are toggled on demand, so they are already
4529 * disabled at this point.
4531 mutex_lock(&power_domains->lock);
4532 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4533 intel_power_well_disable(dev_priv, well);
4534 mutex_unlock(&power_domains->lock);
4537 intel_combo_phy_uninit(dev_priv);
4540 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4542 struct i915_power_well *cmn_bc =
4543 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4544 struct i915_power_well *cmn_d =
4545 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4548 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4549 * workaround never ever read DISPLAY_PHY_CONTROL, and
4550 * instead maintain a shadow copy ourselves. Use the actual
4551 * power well state and lane status to reconstruct the
4552 * expected initial value.
4554 dev_priv->chv_phy_control =
4555 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4556 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4557 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4558 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4559 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4562 * If all lanes are disabled we leave the override disabled
4563 * with all power down bits cleared to match the state we
4564 * would use after disabling the port. Otherwise enable the
4565 * override and set the lane powerdown bits accding to the
4566 * current lane status.
4568 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4569 u32 status = I915_READ(DPLL(PIPE_A));
4572 mask = status & DPLL_PORTB_READY_MASK;
4576 dev_priv->chv_phy_control |=
4577 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4579 dev_priv->chv_phy_control |=
4580 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4582 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4586 dev_priv->chv_phy_control |=
4587 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4589 dev_priv->chv_phy_control |=
4590 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4592 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4594 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4596 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4599 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4600 u32 status = I915_READ(DPIO_PHY_STATUS);
4603 mask = status & DPLL_PORTD_READY_MASK;
4608 dev_priv->chv_phy_control |=
4609 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4611 dev_priv->chv_phy_control |=
4612 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4614 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4616 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4618 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4621 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4623 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4624 dev_priv->chv_phy_control);
4627 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4629 struct i915_power_well *cmn =
4630 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4631 struct i915_power_well *disp2d =
4632 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4634 /* If the display might be already active skip this */
4635 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4636 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4637 I915_READ(DPIO_CTL) & DPIO_CMNRST)
4640 DRM_DEBUG_KMS("toggling display PHY side reset\n");
4642 /* cmnlane needs DPLL registers */
4643 disp2d->desc->ops->enable(dev_priv, disp2d);
4646 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4647 * Need to assert and de-assert PHY SB reset by gating the
4648 * common lane power, then un-gating it.
4649 * Simply ungating isn't enough to reset the PHY enough to get
4650 * ports and lanes running.
4652 cmn->desc->ops->disable(dev_priv, cmn);
4655 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4659 vlv_punit_get(dev_priv);
4660 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4661 vlv_punit_put(dev_priv);
4666 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4668 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4669 "VED not power gated\n");
4672 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4674 static const struct pci_device_id isp_ids[] = {
4675 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4676 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4680 WARN(!pci_dev_present(isp_ids) &&
4681 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4682 "ISP not power gated\n");
4685 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4688 * intel_power_domains_init_hw - initialize hardware power domain state
4689 * @i915: i915 device instance
4690 * @resume: Called from resume code paths or not
4692 * This function initializes the hardware power domain state and enables all
4693 * power wells belonging to the INIT power domain. Power wells in other
4694 * domains (and not in the INIT domain) are referenced or disabled by
4695 * intel_modeset_readout_hw_state(). After that the reference count of each
4696 * power well must match its HW enabled state, see
4697 * intel_power_domains_verify_state().
4699 * It will return with power domains disabled (to be enabled later by
4700 * intel_power_domains_enable()) and must be paired with
4701 * intel_power_domains_fini_hw().
4703 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4705 struct i915_power_domains *power_domains = &i915->power_domains;
4707 power_domains->initializing = true;
4709 if (INTEL_GEN(i915) >= 11) {
4710 icl_display_core_init(i915, resume);
4711 } else if (IS_CANNONLAKE(i915)) {
4712 cnl_display_core_init(i915, resume);
4713 } else if (IS_GEN9_BC(i915)) {
4714 skl_display_core_init(i915, resume);
4715 } else if (IS_GEN9_LP(i915)) {
4716 bxt_display_core_init(i915, resume);
4717 } else if (IS_CHERRYVIEW(i915)) {
4718 mutex_lock(&power_domains->lock);
4719 chv_phy_control_init(i915);
4720 mutex_unlock(&power_domains->lock);
4721 assert_isp_power_gated(i915);
4722 } else if (IS_VALLEYVIEW(i915)) {
4723 mutex_lock(&power_domains->lock);
4724 vlv_cmnlane_wa(i915);
4725 mutex_unlock(&power_domains->lock);
4726 assert_ved_power_gated(i915);
4727 assert_isp_power_gated(i915);
4728 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4729 hsw_assert_cdclk(i915);
4730 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4731 } else if (IS_IVYBRIDGE(i915)) {
4732 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4736 * Keep all power wells enabled for any dependent HW access during
4737 * initialization and to make sure we keep BIOS enabled display HW
4738 * resources powered until display HW readout is complete. We drop
4739 * this reference in intel_power_domains_enable().
4741 power_domains->wakeref =
4742 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4744 /* Disable power support if the user asked so. */
4745 if (!i915_modparams.disable_power_well)
4746 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4747 intel_power_domains_sync_hw(i915);
4749 power_domains->initializing = false;
4753 * intel_power_domains_fini_hw - deinitialize hw power domain state
4754 * @i915: i915 device instance
4756 * De-initializes the display power domain HW state. It also ensures that the
4757 * device stays powered up so that the driver can be reloaded.
4759 * It must be called with power domains already disabled (after a call to
4760 * intel_power_domains_disable()) and must be paired with
4761 * intel_power_domains_init_hw().
4763 void intel_power_domains_fini_hw(struct drm_i915_private *i915)
4765 intel_wakeref_t wakeref __maybe_unused =
4766 fetch_and_zero(&i915->power_domains.wakeref);
4768 /* Remove the refcount we took to keep power well support disabled. */
4769 if (!i915_modparams.disable_power_well)
4770 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4772 intel_display_power_flush_work_sync(i915);
4774 intel_power_domains_verify_state(i915);
4776 /* Keep the power well enabled, but cancel its rpm wakeref. */
4777 intel_runtime_pm_put(i915, wakeref);
4781 * intel_power_domains_enable - enable toggling of display power wells
4782 * @i915: i915 device instance
4784 * Enable the ondemand enabling/disabling of the display power wells. Note that
4785 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4786 * only at specific points of the display modeset sequence, thus they are not
4787 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4788 * of these function is to keep the rest of power wells enabled until the end
4789 * of display HW readout (which will acquire the power references reflecting
4790 * the current HW state).
4792 void intel_power_domains_enable(struct drm_i915_private *i915)
4794 intel_wakeref_t wakeref __maybe_unused =
4795 fetch_and_zero(&i915->power_domains.wakeref);
4797 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4798 intel_power_domains_verify_state(i915);
4802 * intel_power_domains_disable - disable toggling of display power wells
4803 * @i915: i915 device instance
4805 * Disable the ondemand enabling/disabling of the display power wells. See
4806 * intel_power_domains_enable() for which power wells this call controls.
4808 void intel_power_domains_disable(struct drm_i915_private *i915)
4810 struct i915_power_domains *power_domains = &i915->power_domains;
4812 WARN_ON(power_domains->wakeref);
4813 power_domains->wakeref =
4814 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4816 intel_power_domains_verify_state(i915);
4820 * intel_power_domains_suspend - suspend power domain state
4821 * @i915: i915 device instance
4822 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4824 * This function prepares the hardware power domain state before entering
4827 * It must be called with power domains already disabled (after a call to
4828 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4830 void intel_power_domains_suspend(struct drm_i915_private *i915,
4831 enum i915_drm_suspend_mode suspend_mode)
4833 struct i915_power_domains *power_domains = &i915->power_domains;
4834 intel_wakeref_t wakeref __maybe_unused =
4835 fetch_and_zero(&power_domains->wakeref);
4837 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4840 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4841 * support don't manually deinit the power domains. This also means the
4842 * CSR/DMC firmware will stay active, it will power down any HW
4843 * resources as required and also enable deeper system power states
4844 * that would be blocked if the firmware was inactive.
4846 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4847 suspend_mode == I915_DRM_SUSPEND_IDLE &&
4848 i915->csr.dmc_payload) {
4849 intel_display_power_flush_work(i915);
4850 intel_power_domains_verify_state(i915);
4855 * Even if power well support was disabled we still want to disable
4856 * power wells if power domains must be deinitialized for suspend.
4858 if (!i915_modparams.disable_power_well)
4859 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4861 intel_display_power_flush_work(i915);
4862 intel_power_domains_verify_state(i915);
4864 if (INTEL_GEN(i915) >= 11)
4865 icl_display_core_uninit(i915);
4866 else if (IS_CANNONLAKE(i915))
4867 cnl_display_core_uninit(i915);
4868 else if (IS_GEN9_BC(i915))
4869 skl_display_core_uninit(i915);
4870 else if (IS_GEN9_LP(i915))
4871 bxt_display_core_uninit(i915);
4873 power_domains->display_core_suspended = true;
4877 * intel_power_domains_resume - resume power domain state
4878 * @i915: i915 device instance
4880 * This function resume the hardware power domain state during system resume.
4882 * It will return with power domain support disabled (to be enabled later by
4883 * intel_power_domains_enable()) and must be paired with
4884 * intel_power_domains_suspend().
4886 void intel_power_domains_resume(struct drm_i915_private *i915)
4888 struct i915_power_domains *power_domains = &i915->power_domains;
4890 if (power_domains->display_core_suspended) {
4891 intel_power_domains_init_hw(i915, true);
4892 power_domains->display_core_suspended = false;
4894 WARN_ON(power_domains->wakeref);
4895 power_domains->wakeref =
4896 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4899 intel_power_domains_verify_state(i915);
4902 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4904 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
4906 struct i915_power_domains *power_domains = &i915->power_domains;
4907 struct i915_power_well *power_well;
4909 for_each_power_well(i915, power_well) {
4910 enum intel_display_power_domain domain;
4912 DRM_DEBUG_DRIVER("%-25s %d\n",
4913 power_well->desc->name, power_well->count);
4915 for_each_power_domain(domain, power_well->desc->domains)
4916 DRM_DEBUG_DRIVER(" %-23s %d\n",
4917 intel_display_power_domain_str(domain),
4918 power_domains->domain_use_count[domain]);
4923 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4924 * @i915: i915 device instance
4926 * Verify if the reference count of each power well matches its HW enabled
4927 * state and the total refcount of the domains it belongs to. This must be
4928 * called after modeset HW state sanitization, which is responsible for
4929 * acquiring reference counts for any power wells in use and disabling the
4930 * ones left on by BIOS but not required by any active output.
4932 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4934 struct i915_power_domains *power_domains = &i915->power_domains;
4935 struct i915_power_well *power_well;
4936 bool dump_domain_info;
4938 mutex_lock(&power_domains->lock);
4940 verify_async_put_domains_state(power_domains);
4942 dump_domain_info = false;
4943 for_each_power_well(i915, power_well) {
4944 enum intel_display_power_domain domain;
4948 enabled = power_well->desc->ops->is_enabled(i915, power_well);
4949 if ((power_well->count || power_well->desc->always_on) !=
4951 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4952 power_well->desc->name,
4953 power_well->count, enabled);
4956 for_each_power_domain(domain, power_well->desc->domains)
4957 domains_count += power_domains->domain_use_count[domain];
4959 if (power_well->count != domains_count) {
4960 DRM_ERROR("power well %s refcount/domain refcount mismatch "
4961 "(refcount %d/domains refcount %d)\n",
4962 power_well->desc->name, power_well->count,
4964 dump_domain_info = true;
4968 if (dump_domain_info) {
4972 intel_power_domains_dump_info(i915);
4977 mutex_unlock(&power_domains->lock);
4982 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4988 static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915,
4991 struct pci_dev *pdev = i915->drm.pdev;
4992 struct device *kdev = &pdev->dev;
4995 ret = pm_runtime_get_sync(kdev);
4996 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4998 intel_runtime_pm_acquire(i915, wakelock);
5000 return track_intel_runtime_pm_wakeref(i915);
5003 static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
5005 return __intel_runtime_pm_get(i915, false);
5009 * intel_runtime_pm_get - grab a runtime pm reference
5010 * @i915: i915 device instance
5012 * This function grabs a device-level runtime pm reference (mostly used for GEM
5013 * code to ensure the GTT or GT is on) and ensures that it is powered up.
5015 * Any runtime pm reference obtained by this function must have a symmetric
5016 * call to intel_runtime_pm_put() to release the reference again.
5018 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
5020 intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
5022 return __intel_runtime_pm_get(i915, true);
5026 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
5027 * @i915: i915 device instance
5029 * This function grabs a device-level runtime pm reference if the device is
5030 * already in use and ensures that it is powered up. It is illegal to try
5031 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
5033 * Any runtime pm reference obtained by this function must have a symmetric
5034 * call to intel_runtime_pm_put() to release the reference again.
5036 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
5037 * as True if the wakeref was acquired, or False otherwise.
5039 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
5041 if (IS_ENABLED(CONFIG_PM)) {
5042 struct pci_dev *pdev = i915->drm.pdev;
5043 struct device *kdev = &pdev->dev;
5046 * In cases runtime PM is disabled by the RPM core and we get
5047 * an -EINVAL return value we are not supposed to call this
5048 * function, since the power state is undefined. This applies
5049 * atm to the late/early system suspend/resume handlers.
5051 if (pm_runtime_get_if_in_use(kdev) <= 0)
5055 intel_runtime_pm_acquire(i915, true);
5057 return track_intel_runtime_pm_wakeref(i915);
5061 * intel_runtime_pm_get_noresume - grab a runtime pm reference
5062 * @i915: i915 device instance
5064 * This function grabs a device-level runtime pm reference (mostly used for GEM
5065 * code to ensure the GTT or GT is on).
5067 * It will _not_ power up the device but instead only check that it's powered
5068 * on. Therefore it is only valid to call this functions from contexts where
5069 * the device is known to be powered up and where trying to power it up would
5070 * result in hilarity and deadlocks. That pretty much means only the system
5071 * suspend/resume code where this is used to grab runtime pm references for
5072 * delayed setup down in work items.
5074 * Any runtime pm reference obtained by this function must have a symmetric
5075 * call to intel_runtime_pm_put() to release the reference again.
5077 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
5079 intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
5081 struct pci_dev *pdev = i915->drm.pdev;
5082 struct device *kdev = &pdev->dev;
5084 assert_rpm_wakelock_held(i915);
5085 pm_runtime_get_noresume(kdev);
5087 intel_runtime_pm_acquire(i915, true);
5089 return track_intel_runtime_pm_wakeref(i915);
5092 static void __intel_runtime_pm_put(struct drm_i915_private *i915,
5093 intel_wakeref_t wref,
5096 struct pci_dev *pdev = i915->drm.pdev;
5097 struct device *kdev = &pdev->dev;
5099 untrack_intel_runtime_pm_wakeref(i915, wref);
5101 intel_runtime_pm_release(i915, wakelock);
5103 pm_runtime_mark_last_busy(kdev);
5104 pm_runtime_put_autosuspend(kdev);
5107 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5109 intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
5111 __intel_runtime_pm_put(i915, wref, false);
5116 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
5117 * @i915: i915 device instance
5119 * This function drops the device-level runtime pm reference obtained by
5120 * intel_runtime_pm_get() and might power down the corresponding
5121 * hardware block right away if this is the last reference.
5123 * This function exists only for historical reasons and should be avoided in
5124 * new code, as the correctness of its use cannot be checked. Always use
5125 * intel_runtime_pm_put() instead.
5127 void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
5129 __intel_runtime_pm_put(i915, -1, true);
5132 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5134 * intel_runtime_pm_put - release a runtime pm reference
5135 * @i915: i915 device instance
5136 * @wref: wakeref acquired for the reference that is being released
5138 * This function drops the device-level runtime pm reference obtained by
5139 * intel_runtime_pm_get() and might power down the corresponding
5140 * hardware block right away if this is the last reference.
5142 void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
5144 __intel_runtime_pm_put(i915, wref, true);
5149 * intel_runtime_pm_enable - enable runtime pm
5150 * @i915: i915 device instance
5152 * This function enables runtime pm at the end of the driver load sequence.
5154 * Note that this function does currently not enable runtime pm for the
5155 * subordinate display power domains. That is done by
5156 * intel_power_domains_enable().
5158 void intel_runtime_pm_enable(struct drm_i915_private *i915)
5160 struct pci_dev *pdev = i915->drm.pdev;
5161 struct device *kdev = &pdev->dev;
5164 * Disable the system suspend direct complete optimization, which can
5165 * leave the device suspended skipping the driver's suspend handlers
5166 * if the device was already runtime suspended. This is needed due to
5167 * the difference in our runtime and system suspend sequence and
5168 * becaue the HDA driver may require us to enable the audio power
5169 * domain during system suspend.
5171 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
5173 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
5174 pm_runtime_mark_last_busy(kdev);
5177 * Take a permanent reference to disable the RPM functionality and drop
5178 * it only when unloading the driver. Use the low level get/put helpers,
5179 * so the driver's own RPM reference tracking asserts also work on
5180 * platforms without RPM support.
5182 if (!HAS_RUNTIME_PM(i915)) {
5185 pm_runtime_dont_use_autosuspend(kdev);
5186 ret = pm_runtime_get_sync(kdev);
5187 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
5189 pm_runtime_use_autosuspend(kdev);
5193 * The core calls the driver load handler with an RPM reference held.
5194 * We drop that here and will reacquire it during unloading in
5195 * intel_power_domains_fini().
5197 pm_runtime_put_autosuspend(kdev);
5200 void intel_runtime_pm_disable(struct drm_i915_private *i915)
5202 struct pci_dev *pdev = i915->drm.pdev;
5203 struct device *kdev = &pdev->dev;
5205 /* Transfer rpm ownership back to core */
5206 WARN(pm_runtime_get_sync(kdev) < 0,
5207 "Failed to pass rpm ownership back to core\n");
5209 pm_runtime_dont_use_autosuspend(kdev);
5211 if (!HAS_RUNTIME_PM(i915))
5212 pm_runtime_put(kdev);
5215 void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
5217 struct i915_runtime_pm *rpm = &i915->runtime_pm;
5218 int count = atomic_read(&rpm->wakeref_count);
5221 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
5222 intel_rpm_raw_wakeref_count(count),
5223 intel_rpm_wakelock_count(count));
5225 untrack_all_intel_runtime_pm_wakerefs(i915);
5228 void intel_runtime_pm_init_early(struct drm_i915_private *i915)
5230 init_intel_runtime_pm_wakeref(i915);