2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
33 #include "intel_drv.h"
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 enum i915_power_well_id power_well_id);
56 intel_display_power_domain_str(enum intel_display_power_domain domain)
59 case POWER_DOMAIN_PIPE_A:
61 case POWER_DOMAIN_PIPE_B:
63 case POWER_DOMAIN_PIPE_C:
65 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
66 return "PIPE_A_PANEL_FITTER";
67 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
68 return "PIPE_B_PANEL_FITTER";
69 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
70 return "PIPE_C_PANEL_FITTER";
71 case POWER_DOMAIN_TRANSCODER_A:
72 return "TRANSCODER_A";
73 case POWER_DOMAIN_TRANSCODER_B:
74 return "TRANSCODER_B";
75 case POWER_DOMAIN_TRANSCODER_C:
76 return "TRANSCODER_C";
77 case POWER_DOMAIN_TRANSCODER_EDP:
78 return "TRANSCODER_EDP";
79 case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
80 return "TRANSCODER_EDP_VDSC";
81 case POWER_DOMAIN_TRANSCODER_DSI_A:
82 return "TRANSCODER_DSI_A";
83 case POWER_DOMAIN_TRANSCODER_DSI_C:
84 return "TRANSCODER_DSI_C";
85 case POWER_DOMAIN_PORT_DDI_A_LANES:
86 return "PORT_DDI_A_LANES";
87 case POWER_DOMAIN_PORT_DDI_B_LANES:
88 return "PORT_DDI_B_LANES";
89 case POWER_DOMAIN_PORT_DDI_C_LANES:
90 return "PORT_DDI_C_LANES";
91 case POWER_DOMAIN_PORT_DDI_D_LANES:
92 return "PORT_DDI_D_LANES";
93 case POWER_DOMAIN_PORT_DDI_E_LANES:
94 return "PORT_DDI_E_LANES";
95 case POWER_DOMAIN_PORT_DDI_F_LANES:
96 return "PORT_DDI_F_LANES";
97 case POWER_DOMAIN_PORT_DDI_A_IO:
98 return "PORT_DDI_A_IO";
99 case POWER_DOMAIN_PORT_DDI_B_IO:
100 return "PORT_DDI_B_IO";
101 case POWER_DOMAIN_PORT_DDI_C_IO:
102 return "PORT_DDI_C_IO";
103 case POWER_DOMAIN_PORT_DDI_D_IO:
104 return "PORT_DDI_D_IO";
105 case POWER_DOMAIN_PORT_DDI_E_IO:
106 return "PORT_DDI_E_IO";
107 case POWER_DOMAIN_PORT_DDI_F_IO:
108 return "PORT_DDI_F_IO";
109 case POWER_DOMAIN_PORT_DSI:
111 case POWER_DOMAIN_PORT_CRT:
113 case POWER_DOMAIN_PORT_OTHER:
115 case POWER_DOMAIN_VGA:
117 case POWER_DOMAIN_AUDIO:
119 case POWER_DOMAIN_PLLS:
121 case POWER_DOMAIN_AUX_A:
123 case POWER_DOMAIN_AUX_B:
125 case POWER_DOMAIN_AUX_C:
127 case POWER_DOMAIN_AUX_D:
129 case POWER_DOMAIN_AUX_E:
131 case POWER_DOMAIN_AUX_F:
133 case POWER_DOMAIN_AUX_IO_A:
135 case POWER_DOMAIN_AUX_TBT1:
137 case POWER_DOMAIN_AUX_TBT2:
139 case POWER_DOMAIN_AUX_TBT3:
141 case POWER_DOMAIN_AUX_TBT4:
143 case POWER_DOMAIN_GMBUS:
145 case POWER_DOMAIN_INIT:
147 case POWER_DOMAIN_MODESET:
149 case POWER_DOMAIN_GT_IRQ:
152 MISSING_CASE(domain);
157 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
158 struct i915_power_well *power_well)
160 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
161 power_well->desc->ops->enable(dev_priv, power_well);
162 power_well->hw_enabled = true;
165 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
166 struct i915_power_well *power_well)
168 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
169 power_well->hw_enabled = false;
170 power_well->desc->ops->disable(dev_priv, power_well);
173 static void intel_power_well_get(struct drm_i915_private *dev_priv,
174 struct i915_power_well *power_well)
176 if (!power_well->count++)
177 intel_power_well_enable(dev_priv, power_well);
180 static void intel_power_well_put(struct drm_i915_private *dev_priv,
181 struct i915_power_well *power_well)
183 WARN(!power_well->count, "Use count on power well %s is already zero",
184 power_well->desc->name);
186 if (!--power_well->count)
187 intel_power_well_disable(dev_priv, power_well);
191 * __intel_display_power_is_enabled - unlocked check for a power domain
192 * @dev_priv: i915 device instance
193 * @domain: power domain to check
195 * This is the unlocked version of intel_display_power_is_enabled() and should
196 * only be used from error capture and recovery code where deadlocks are
200 * True when the power domain is enabled, false otherwise.
202 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
203 enum intel_display_power_domain domain)
205 struct i915_power_well *power_well;
208 if (dev_priv->runtime_pm.suspended)
213 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
214 if (power_well->desc->always_on)
217 if (!power_well->hw_enabled) {
227 * intel_display_power_is_enabled - check for a power domain
228 * @dev_priv: i915 device instance
229 * @domain: power domain to check
231 * This function can be used to check the hw power domain state. It is mostly
232 * used in hardware state readout functions. Everywhere else code should rely
233 * upon explicit power domain reference counting to ensure that the hardware
234 * block is powered up before accessing it.
236 * Callers must hold the relevant modesetting locks to ensure that concurrent
237 * threads can't disable the power well while the caller tries to read a few
241 * True when the power domain is enabled, false otherwise.
243 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
244 enum intel_display_power_domain domain)
246 struct i915_power_domains *power_domains;
249 power_domains = &dev_priv->power_domains;
251 mutex_lock(&power_domains->lock);
252 ret = __intel_display_power_is_enabled(dev_priv, domain);
253 mutex_unlock(&power_domains->lock);
259 * Starting with Haswell, we have a "Power Down Well" that can be turned off
260 * when not needed anymore. We have 4 registers that can request the power well
261 * to be enabled, and it will only be disabled if none of the registers is
262 * requesting it to be enabled.
264 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
265 u8 irq_pipe_mask, bool has_vga)
267 struct pci_dev *pdev = dev_priv->drm.pdev;
270 * After we re-enable the power well, if we touch VGA register 0x3d5
271 * we'll get unclaimed register interrupts. This stops after we write
272 * anything to the VGA MSR register. The vgacon module uses this
273 * register all the time, so if we unbind our driver and, as a
274 * consequence, bind vgacon, we'll get stuck in an infinite loop at
275 * console_unlock(). So make here we touch the VGA MSR register, making
276 * sure vgacon can keep working normally without triggering interrupts
277 * and error messages.
280 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
281 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
282 vga_put(pdev, VGA_RSRC_LEGACY_IO);
286 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
289 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
293 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
297 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
298 struct i915_power_well *power_well)
300 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
301 int pw_idx = power_well->desc->hsw.idx;
303 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
304 WARN_ON(intel_wait_for_register(dev_priv,
306 HSW_PWR_WELL_CTL_STATE(pw_idx),
307 HSW_PWR_WELL_CTL_STATE(pw_idx),
311 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
312 const struct i915_power_well_regs *regs,
315 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
318 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
319 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
321 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
322 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
327 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
328 struct i915_power_well *power_well)
330 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
331 int pw_idx = power_well->desc->hsw.idx;
336 * Bspec doesn't require waiting for PWs to get disabled, but still do
337 * this for paranoia. The known cases where a PW will be forced on:
338 * - a KVMR request on any power well via the KVMR request register
339 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
340 * DEBUG request registers
341 * Skip the wait in case any of the request bits are set and print a
342 * diagnostic message.
344 wait_for((disabled = !(I915_READ(regs->driver) &
345 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
346 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
350 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
351 power_well->desc->name,
352 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
355 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
356 enum skl_power_gate pg)
358 /* Timeout 5us for PG#0, for other PGs 1us */
359 WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
360 SKL_FUSE_PG_DIST_STATUS(pg),
361 SKL_FUSE_PG_DIST_STATUS(pg), 1));
364 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
365 struct i915_power_well *power_well)
367 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
368 int pw_idx = power_well->desc->hsw.idx;
369 bool wait_fuses = power_well->desc->hsw.has_fuses;
370 enum skl_power_gate uninitialized_var(pg);
374 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
375 SKL_PW_CTL_IDX_TO_PG(pw_idx);
377 * For PW1 we have to wait both for the PW0/PG0 fuse state
378 * before enabling the power well and PW1/PG1's own fuse
379 * state after the enabling. For all other power wells with
380 * fuses we only have to wait for that PW/PG's fuse state
381 * after the enabling.
384 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
387 val = I915_READ(regs->driver);
388 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
389 hsw_wait_for_power_well_enable(dev_priv, power_well);
391 /* Display WA #1178: cnl */
392 if (IS_CANNONLAKE(dev_priv) &&
393 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
394 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
395 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
396 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
397 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
401 gen9_wait_for_power_well_fuses(dev_priv, pg);
403 hsw_power_well_post_enable(dev_priv,
404 power_well->desc->hsw.irq_pipe_mask,
405 power_well->desc->hsw.has_vga);
408 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
409 struct i915_power_well *power_well)
411 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
412 int pw_idx = power_well->desc->hsw.idx;
415 hsw_power_well_pre_disable(dev_priv,
416 power_well->desc->hsw.irq_pipe_mask);
418 val = I915_READ(regs->driver);
419 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
420 hsw_wait_for_power_well_disable(dev_priv, power_well);
423 #define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
426 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
427 struct i915_power_well *power_well)
429 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
430 int pw_idx = power_well->desc->hsw.idx;
431 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
434 val = I915_READ(regs->driver);
435 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
437 val = I915_READ(ICL_PORT_CL_DW12(port));
438 I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
440 hsw_wait_for_power_well_enable(dev_priv, power_well);
442 /* Display WA #1178: icl */
443 if (IS_ICELAKE(dev_priv) &&
444 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
445 !intel_bios_is_port_edp(dev_priv, port)) {
446 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
447 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
448 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
453 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
454 struct i915_power_well *power_well)
456 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
457 int pw_idx = power_well->desc->hsw.idx;
458 enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
461 val = I915_READ(ICL_PORT_CL_DW12(port));
462 I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
464 val = I915_READ(regs->driver);
465 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
467 hsw_wait_for_power_well_disable(dev_priv, power_well);
470 #define ICL_AUX_PW_TO_CH(pw_idx) \
471 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
474 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
475 struct i915_power_well *power_well)
477 enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
480 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
481 val &= ~DP_AUX_CH_CTL_TBT_IO;
482 if (power_well->desc->hsw.is_tc_tbt)
483 val |= DP_AUX_CH_CTL_TBT_IO;
484 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
486 hsw_power_well_enable(dev_priv, power_well);
490 * We should only use the power well if we explicitly asked the hardware to
491 * enable it, so check if it's enabled and also check if we've requested it to
494 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
495 struct i915_power_well *power_well)
497 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
498 enum i915_power_well_id id = power_well->desc->id;
499 int pw_idx = power_well->desc->hsw.idx;
500 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
501 HSW_PWR_WELL_CTL_STATE(pw_idx);
504 val = I915_READ(regs->driver);
507 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
508 * and the MISC_IO PW will be not restored, so check instead for the
509 * BIOS's own request bits, which are forced-on for these power wells
510 * when exiting DC5/6.
512 if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
513 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
514 val |= I915_READ(regs->bios);
516 return (val & mask) == mask;
519 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
521 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
522 "DC9 already programmed to be enabled.\n");
523 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
524 "DC5 still not disabled to enable DC9.\n");
525 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
526 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
527 "Power well 2 on.\n");
528 WARN_ONCE(intel_irqs_enabled(dev_priv),
529 "Interrupts not disabled yet.\n");
532 * TODO: check for the following to verify the conditions to enter DC9
533 * state are satisfied:
534 * 1] Check relevant display engine registers to verify if mode set
535 * disable sequence was followed.
536 * 2] Check if display uninitialize sequence is initialized.
540 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
542 WARN_ONCE(intel_irqs_enabled(dev_priv),
543 "Interrupts not disabled yet.\n");
544 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
545 "DC5 still not disabled.\n");
548 * TODO: check for the following to verify DC9 state was indeed
549 * entered before programming to disable it:
550 * 1] Check relevant display engine registers to verify if mode
551 * set disable sequence was followed.
552 * 2] Check if display uninitialize sequence is initialized.
556 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
563 I915_WRITE(DC_STATE_EN, state);
565 /* It has been observed that disabling the dc6 state sometimes
566 * doesn't stick and dmc keeps returning old value. Make sure
567 * the write really sticks enough times and also force rewrite until
568 * we are confident that state is exactly what we want.
571 v = I915_READ(DC_STATE_EN);
574 I915_WRITE(DC_STATE_EN, state);
577 } else if (rereads++ > 5) {
581 } while (rewrites < 100);
584 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
587 /* Most of the times we need one retry, avoid spam */
589 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
593 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
597 mask = DC_STATE_EN_UPTO_DC5;
598 if (INTEL_GEN(dev_priv) >= 11)
599 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
600 else if (IS_GEN9_LP(dev_priv))
601 mask |= DC_STATE_EN_DC9;
603 mask |= DC_STATE_EN_UPTO_DC6;
608 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
612 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
614 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
615 dev_priv->csr.dc_state, val);
616 dev_priv->csr.dc_state = val;
620 * gen9_set_dc_state - set target display C power state
621 * @dev_priv: i915 device instance
622 * @state: target DC power state
624 * - DC_STATE_EN_UPTO_DC5
625 * - DC_STATE_EN_UPTO_DC6
628 * Signal to DMC firmware/HW the target DC power state passed in @state.
629 * DMC/HW can turn off individual display clocks and power rails when entering
630 * a deeper DC power state (higher in number) and turns these back when exiting
631 * that state to a shallower power state (lower in number). The HW will decide
632 * when to actually enter a given state on an on-demand basis, for instance
633 * depending on the active state of display pipes. The state of display
634 * registers backed by affected power rails are saved/restored as needed.
636 * Based on the above enabling a deeper DC power state is asynchronous wrt.
637 * enabling it. Disabling a deeper power state is synchronous: for instance
638 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
639 * back on and register state is restored. This is guaranteed by the MMIO write
640 * to DC_STATE_EN blocking until the state is restored.
642 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
647 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
648 state &= dev_priv->csr.allowed_dc_mask;
650 val = I915_READ(DC_STATE_EN);
651 mask = gen9_dc_mask(dev_priv);
652 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
655 /* Check if DMC is ignoring our DC state requests */
656 if ((val & mask) != dev_priv->csr.dc_state)
657 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
658 dev_priv->csr.dc_state, val & mask);
663 gen9_write_dc_state(dev_priv, val);
665 dev_priv->csr.dc_state = val & mask;
668 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
670 assert_can_enable_dc9(dev_priv);
672 DRM_DEBUG_KMS("Enabling DC9\n");
674 * Power sequencer reset is not needed on
675 * platforms with South Display Engine on PCH,
676 * because PPS registers are always on.
678 if (!HAS_PCH_SPLIT(dev_priv))
679 intel_power_sequencer_reset(dev_priv);
680 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
683 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
685 assert_can_disable_dc9(dev_priv);
687 DRM_DEBUG_KMS("Disabling DC9\n");
689 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
691 intel_pps_unlock_regs_wa(dev_priv);
694 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
696 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
697 "CSR program storage start is NULL\n");
698 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
699 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
702 static struct i915_power_well *
703 lookup_power_well(struct drm_i915_private *dev_priv,
704 enum i915_power_well_id power_well_id)
706 struct i915_power_well *power_well;
708 for_each_power_well(dev_priv, power_well)
709 if (power_well->desc->id == power_well_id)
713 * It's not feasible to add error checking code to the callers since
714 * this condition really shouldn't happen and it doesn't even make sense
715 * to abort things like display initialization sequences. Just return
716 * the first power well and hope the WARN gets reported so we can fix
719 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
720 return &dev_priv->power_domains.power_wells[0];
723 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
725 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
728 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
730 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
731 "DC5 already programmed to be enabled.\n");
732 assert_rpm_wakelock_held(dev_priv);
734 assert_csr_loaded(dev_priv);
737 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
739 assert_can_enable_dc5(dev_priv);
741 DRM_DEBUG_KMS("Enabling DC5\n");
743 /* Wa Display #1183: skl,kbl,cfl */
744 if (IS_GEN9_BC(dev_priv))
745 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
746 SKL_SELECT_ALTERNATE_DC_EXIT);
748 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
751 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
753 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
754 "Backlight is not disabled.\n");
755 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
756 "DC6 already programmed to be enabled.\n");
758 assert_csr_loaded(dev_priv);
761 void skl_enable_dc6(struct drm_i915_private *dev_priv)
763 assert_can_enable_dc6(dev_priv);
765 DRM_DEBUG_KMS("Enabling DC6\n");
767 /* Wa Display #1183: skl,kbl,cfl */
768 if (IS_GEN9_BC(dev_priv))
769 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
770 SKL_SELECT_ALTERNATE_DC_EXIT);
772 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
775 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
776 struct i915_power_well *power_well)
778 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
779 int pw_idx = power_well->desc->hsw.idx;
780 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
781 u32 bios_req = I915_READ(regs->bios);
783 /* Take over the request bit if set by BIOS. */
784 if (bios_req & mask) {
785 u32 drv_req = I915_READ(regs->driver);
787 if (!(drv_req & mask))
788 I915_WRITE(regs->driver, drv_req | mask);
789 I915_WRITE(regs->bios, bios_req & ~mask);
793 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
794 struct i915_power_well *power_well)
796 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
799 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
800 struct i915_power_well *power_well)
802 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
805 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
806 struct i915_power_well *power_well)
808 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
811 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
813 struct i915_power_well *power_well;
815 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
816 if (power_well->count > 0)
817 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
819 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
820 if (power_well->count > 0)
821 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
823 if (IS_GEMINILAKE(dev_priv)) {
824 power_well = lookup_power_well(dev_priv,
825 GLK_DISP_PW_DPIO_CMN_C);
826 if (power_well->count > 0)
827 bxt_ddi_phy_verify_state(dev_priv,
828 power_well->desc->bxt.phy);
832 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
833 struct i915_power_well *power_well)
835 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
838 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
840 u32 tmp = I915_READ(DBUF_CTL);
842 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
843 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
844 "Unexpected DBuf power power state (0x%08x)\n", tmp);
847 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
848 struct i915_power_well *power_well)
850 struct intel_cdclk_state cdclk_state = {};
852 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
854 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
855 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
856 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
858 gen9_assert_dbuf_enabled(dev_priv);
860 if (IS_GEN9_LP(dev_priv))
861 bxt_verify_ddi_phy_power_wells(dev_priv);
863 if (INTEL_GEN(dev_priv) >= 11)
865 * DMC retains HW context only for port A, the other combo
866 * PHY's HW context for port B is lost after DC transitions,
867 * so we need to restore it manually.
869 icl_combo_phys_init(dev_priv);
872 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
873 struct i915_power_well *power_well)
875 if (!dev_priv->csr.dmc_payload)
878 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
879 skl_enable_dc6(dev_priv);
880 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
881 gen9_enable_dc5(dev_priv);
884 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
885 struct i915_power_well *power_well)
889 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
890 struct i915_power_well *power_well)
894 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
895 struct i915_power_well *power_well)
900 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
901 struct i915_power_well *power_well)
903 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
904 i830_enable_pipe(dev_priv, PIPE_A);
905 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
906 i830_enable_pipe(dev_priv, PIPE_B);
909 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
910 struct i915_power_well *power_well)
912 i830_disable_pipe(dev_priv, PIPE_B);
913 i830_disable_pipe(dev_priv, PIPE_A);
916 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
917 struct i915_power_well *power_well)
919 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
920 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
923 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
924 struct i915_power_well *power_well)
926 if (power_well->count > 0)
927 i830_pipes_power_well_enable(dev_priv, power_well);
929 i830_pipes_power_well_disable(dev_priv, power_well);
932 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
933 struct i915_power_well *power_well, bool enable)
935 int pw_idx = power_well->desc->vlv.idx;
940 mask = PUNIT_PWRGT_MASK(pw_idx);
941 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
942 PUNIT_PWRGT_PWR_GATE(pw_idx);
944 mutex_lock(&dev_priv->pcu_lock);
947 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
952 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
955 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
957 if (wait_for(COND, 100))
958 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
960 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
965 mutex_unlock(&dev_priv->pcu_lock);
968 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
969 struct i915_power_well *power_well)
971 vlv_set_power_well(dev_priv, power_well, true);
974 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
975 struct i915_power_well *power_well)
977 vlv_set_power_well(dev_priv, power_well, false);
980 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
981 struct i915_power_well *power_well)
983 int pw_idx = power_well->desc->vlv.idx;
984 bool enabled = false;
989 mask = PUNIT_PWRGT_MASK(pw_idx);
990 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
992 mutex_lock(&dev_priv->pcu_lock);
994 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
996 * We only ever set the power-on and power-gate states, anything
997 * else is unexpected.
999 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1000 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1005 * A transient state at this point would mean some unexpected party
1006 * is poking at the power controls too.
1008 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1009 WARN_ON(ctrl != state);
1011 mutex_unlock(&dev_priv->pcu_lock);
1016 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1021 * On driver load, a pipe may be active and driving a DSI display.
1022 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1023 * (and never recovering) in this case. intel_dsi_post_disable() will
1024 * clear it when we turn off the display.
1026 val = I915_READ(DSPCLK_GATE_D);
1027 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1028 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1029 I915_WRITE(DSPCLK_GATE_D, val);
1032 * Disable trickle feed and enable pnd deadline calculation
1034 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1035 I915_WRITE(CBR1_VLV, 0);
1037 WARN_ON(dev_priv->rawclk_freq == 0);
1039 I915_WRITE(RAWCLK_FREQ_VLV,
1040 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1043 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1045 struct intel_encoder *encoder;
1049 * Enable the CRI clock source so we can get at the
1050 * display and the reference clock for VGA
1051 * hotplug / manual detection. Supposedly DSI also
1052 * needs the ref clock up and running.
1054 * CHV DPLL B/C have some issues if VGA mode is enabled.
1056 for_each_pipe(dev_priv, pipe) {
1057 u32 val = I915_READ(DPLL(pipe));
1059 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1061 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1063 I915_WRITE(DPLL(pipe), val);
1066 vlv_init_display_clock_gating(dev_priv);
1068 spin_lock_irq(&dev_priv->irq_lock);
1069 valleyview_enable_display_irqs(dev_priv);
1070 spin_unlock_irq(&dev_priv->irq_lock);
1073 * During driver initialization/resume we can avoid restoring the
1074 * part of the HW/SW state that will be inited anyway explicitly.
1076 if (dev_priv->power_domains.initializing)
1079 intel_hpd_init(dev_priv);
1081 /* Re-enable the ADPA, if we have one */
1082 for_each_intel_encoder(&dev_priv->drm, encoder) {
1083 if (encoder->type == INTEL_OUTPUT_ANALOG)
1084 intel_crt_reset(&encoder->base);
1087 i915_redisable_vga_power_on(dev_priv);
1089 intel_pps_unlock_regs_wa(dev_priv);
1092 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1094 spin_lock_irq(&dev_priv->irq_lock);
1095 valleyview_disable_display_irqs(dev_priv);
1096 spin_unlock_irq(&dev_priv->irq_lock);
1098 /* make sure we're done processing display irqs */
1099 synchronize_irq(dev_priv->drm.irq);
1101 intel_power_sequencer_reset(dev_priv);
1103 /* Prevent us from re-enabling polling on accident in late suspend */
1104 if (!dev_priv->drm.dev->power.is_suspended)
1105 intel_hpd_poll_init(dev_priv);
1108 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1109 struct i915_power_well *power_well)
1111 vlv_set_power_well(dev_priv, power_well, true);
1113 vlv_display_power_well_init(dev_priv);
1116 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1117 struct i915_power_well *power_well)
1119 vlv_display_power_well_deinit(dev_priv);
1121 vlv_set_power_well(dev_priv, power_well, false);
1124 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1125 struct i915_power_well *power_well)
1127 /* since ref/cri clock was enabled */
1128 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1130 vlv_set_power_well(dev_priv, power_well, true);
1133 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1134 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1135 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1136 * b. The other bits such as sfr settings / modesel may all
1139 * This should only be done on init and resume from S3 with
1140 * both PLLs disabled, or we risk losing DPIO and PLL
1143 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1146 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1147 struct i915_power_well *power_well)
1151 for_each_pipe(dev_priv, pipe)
1152 assert_pll_disabled(dev_priv, pipe);
1154 /* Assert common reset */
1155 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1157 vlv_set_power_well(dev_priv, power_well, false);
1160 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1162 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1164 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1166 struct i915_power_well *cmn_bc =
1167 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1168 struct i915_power_well *cmn_d =
1169 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1170 u32 phy_control = dev_priv->chv_phy_control;
1172 u32 phy_status_mask = 0xffffffff;
1175 * The BIOS can leave the PHY is some weird state
1176 * where it doesn't fully power down some parts.
1177 * Disable the asserts until the PHY has been fully
1178 * reset (ie. the power well has been disabled at
1181 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1182 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1183 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1184 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1185 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1186 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1187 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1189 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1190 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1191 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1192 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1194 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1195 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1197 /* this assumes override is only used to enable lanes */
1198 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1199 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1201 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1202 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1204 /* CL1 is on whenever anything is on in either channel */
1205 if (BITS_SET(phy_control,
1206 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1207 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1208 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1211 * The DPLLB check accounts for the pipe B + port A usage
1212 * with CL2 powered up but all the lanes in the second channel
1215 if (BITS_SET(phy_control,
1216 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1217 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1218 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1220 if (BITS_SET(phy_control,
1221 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1222 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1223 if (BITS_SET(phy_control,
1224 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1225 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1227 if (BITS_SET(phy_control,
1228 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1229 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1230 if (BITS_SET(phy_control,
1231 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1232 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1235 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1236 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1238 /* this assumes override is only used to enable lanes */
1239 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1240 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1242 if (BITS_SET(phy_control,
1243 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1244 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1246 if (BITS_SET(phy_control,
1247 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1248 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1249 if (BITS_SET(phy_control,
1250 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1251 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1254 phy_status &= phy_status_mask;
1257 * The PHY may be busy with some initial calibration and whatnot,
1258 * so the power state can take a while to actually change.
1260 if (intel_wait_for_register(dev_priv,
1265 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1266 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1267 phy_status, dev_priv->chv_phy_control);
1272 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1273 struct i915_power_well *power_well)
1279 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1280 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1282 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1290 /* since ref/cri clock was enabled */
1291 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1292 vlv_set_power_well(dev_priv, power_well, true);
1294 /* Poll for phypwrgood signal */
1295 if (intel_wait_for_register(dev_priv,
1300 DRM_ERROR("Display PHY %d is not power up\n", phy);
1302 mutex_lock(&dev_priv->sb_lock);
1304 /* Enable dynamic power down */
1305 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1306 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1307 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1308 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1310 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1311 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1312 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1313 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1316 * Force the non-existing CL2 off. BXT does this
1317 * too, so maybe it saves some power even though
1318 * CL2 doesn't exist?
1320 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1321 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1322 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1325 mutex_unlock(&dev_priv->sb_lock);
1327 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1328 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1330 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1331 phy, dev_priv->chv_phy_control);
1333 assert_chv_phy_status(dev_priv);
1336 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1337 struct i915_power_well *power_well)
1341 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1342 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1344 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1346 assert_pll_disabled(dev_priv, PIPE_A);
1347 assert_pll_disabled(dev_priv, PIPE_B);
1350 assert_pll_disabled(dev_priv, PIPE_C);
1353 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1354 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1356 vlv_set_power_well(dev_priv, power_well, false);
1358 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1359 phy, dev_priv->chv_phy_control);
1361 /* PHY is fully reset now, so we can enable the PHY state asserts */
1362 dev_priv->chv_phy_assert[phy] = true;
1364 assert_chv_phy_status(dev_priv);
1367 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1368 enum dpio_channel ch, bool override, unsigned int mask)
1370 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1371 u32 reg, val, expected, actual;
1374 * The BIOS can leave the PHY is some weird state
1375 * where it doesn't fully power down some parts.
1376 * Disable the asserts until the PHY has been fully
1377 * reset (ie. the power well has been disabled at
1380 if (!dev_priv->chv_phy_assert[phy])
1384 reg = _CHV_CMN_DW0_CH0;
1386 reg = _CHV_CMN_DW6_CH1;
1388 mutex_lock(&dev_priv->sb_lock);
1389 val = vlv_dpio_read(dev_priv, pipe, reg);
1390 mutex_unlock(&dev_priv->sb_lock);
1393 * This assumes !override is only used when the port is disabled.
1394 * All lanes should power down even without the override when
1395 * the port is disabled.
1397 if (!override || mask == 0xf) {
1398 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1400 * If CH1 common lane is not active anymore
1401 * (eg. for pipe B DPLL) the entire channel will
1402 * shut down, which causes the common lane registers
1403 * to read as 0. That means we can't actually check
1404 * the lane power down status bits, but as the entire
1405 * register reads as 0 it's a good indication that the
1406 * channel is indeed entirely powered down.
1408 if (ch == DPIO_CH1 && val == 0)
1410 } else if (mask != 0x0) {
1411 expected = DPIO_ANYDL_POWERDOWN;
1417 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1419 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1420 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1422 WARN(actual != expected,
1423 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1424 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1425 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1429 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1430 enum dpio_channel ch, bool override)
1432 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1435 mutex_lock(&power_domains->lock);
1437 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1439 if (override == was_override)
1443 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1445 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1447 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1449 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1450 phy, ch, dev_priv->chv_phy_control);
1452 assert_chv_phy_status(dev_priv);
1455 mutex_unlock(&power_domains->lock);
1457 return was_override;
1460 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1461 bool override, unsigned int mask)
1463 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1464 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1465 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1466 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1468 mutex_lock(&power_domains->lock);
1470 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1471 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1474 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1476 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1478 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1480 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1481 phy, ch, mask, dev_priv->chv_phy_control);
1483 assert_chv_phy_status(dev_priv);
1485 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1487 mutex_unlock(&power_domains->lock);
1490 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1491 struct i915_power_well *power_well)
1493 enum pipe pipe = PIPE_A;
1497 mutex_lock(&dev_priv->pcu_lock);
1499 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1501 * We only ever set the power-on and power-gate states, anything
1502 * else is unexpected.
1504 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1505 enabled = state == DP_SSS_PWR_ON(pipe);
1508 * A transient state at this point would mean some unexpected party
1509 * is poking at the power controls too.
1511 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1512 WARN_ON(ctrl << 16 != state);
1514 mutex_unlock(&dev_priv->pcu_lock);
1519 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1520 struct i915_power_well *power_well,
1523 enum pipe pipe = PIPE_A;
1527 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1529 mutex_lock(&dev_priv->pcu_lock);
1532 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1537 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1538 ctrl &= ~DP_SSC_MASK(pipe);
1539 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1540 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1542 if (wait_for(COND, 100))
1543 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1545 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1550 mutex_unlock(&dev_priv->pcu_lock);
1553 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1554 struct i915_power_well *power_well)
1556 chv_set_pipe_power_well(dev_priv, power_well, true);
1558 vlv_display_power_well_init(dev_priv);
1561 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1562 struct i915_power_well *power_well)
1564 vlv_display_power_well_deinit(dev_priv);
1566 chv_set_pipe_power_well(dev_priv, power_well, false);
1570 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1571 enum intel_display_power_domain domain)
1573 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1574 struct i915_power_well *power_well;
1576 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1577 intel_power_well_get(dev_priv, power_well);
1579 power_domains->domain_use_count[domain]++;
1583 * intel_display_power_get - grab a power domain reference
1584 * @dev_priv: i915 device instance
1585 * @domain: power domain to reference
1587 * This function grabs a power domain reference for @domain and ensures that the
1588 * power domain and all its parents are powered up. Therefore users should only
1589 * grab a reference to the innermost power domain they need.
1591 * Any power domain reference obtained by this function must have a symmetric
1592 * call to intel_display_power_put() to release the reference again.
1594 void intel_display_power_get(struct drm_i915_private *dev_priv,
1595 enum intel_display_power_domain domain)
1597 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1599 intel_runtime_pm_get(dev_priv);
1601 mutex_lock(&power_domains->lock);
1603 __intel_display_power_get_domain(dev_priv, domain);
1605 mutex_unlock(&power_domains->lock);
1609 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1610 * @dev_priv: i915 device instance
1611 * @domain: power domain to reference
1613 * This function grabs a power domain reference for @domain and ensures that the
1614 * power domain and all its parents are powered up. Therefore users should only
1615 * grab a reference to the innermost power domain they need.
1617 * Any power domain reference obtained by this function must have a symmetric
1618 * call to intel_display_power_put() to release the reference again.
1620 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1621 enum intel_display_power_domain domain)
1623 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1626 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1629 mutex_lock(&power_domains->lock);
1631 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1632 __intel_display_power_get_domain(dev_priv, domain);
1638 mutex_unlock(&power_domains->lock);
1641 intel_runtime_pm_put(dev_priv);
1647 * intel_display_power_put - release a power domain reference
1648 * @dev_priv: i915 device instance
1649 * @domain: power domain to reference
1651 * This function drops the power domain reference obtained by
1652 * intel_display_power_get() and might power down the corresponding hardware
1653 * block right away if this is the last reference.
1655 void intel_display_power_put(struct drm_i915_private *dev_priv,
1656 enum intel_display_power_domain domain)
1658 struct i915_power_domains *power_domains;
1659 struct i915_power_well *power_well;
1661 power_domains = &dev_priv->power_domains;
1663 mutex_lock(&power_domains->lock);
1665 WARN(!power_domains->domain_use_count[domain],
1666 "Use count on domain %s is already zero\n",
1667 intel_display_power_domain_str(domain));
1668 power_domains->domain_use_count[domain]--;
1670 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1671 intel_power_well_put(dev_priv, power_well);
1673 mutex_unlock(&power_domains->lock);
1675 intel_runtime_pm_put(dev_priv);
1678 #define I830_PIPES_POWER_DOMAINS ( \
1679 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1680 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1681 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1682 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1683 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1684 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1685 BIT_ULL(POWER_DOMAIN_INIT))
1687 #define VLV_DISPLAY_POWER_DOMAINS ( \
1688 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1689 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1690 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1691 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1692 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1693 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1694 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1695 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1696 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1697 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1698 BIT_ULL(POWER_DOMAIN_VGA) | \
1699 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1700 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1701 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1702 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1703 BIT_ULL(POWER_DOMAIN_INIT))
1705 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1706 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1707 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1708 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1709 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1710 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1711 BIT_ULL(POWER_DOMAIN_INIT))
1713 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1714 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1715 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1716 BIT_ULL(POWER_DOMAIN_INIT))
1718 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
1719 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1720 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1721 BIT_ULL(POWER_DOMAIN_INIT))
1723 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
1724 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1725 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1726 BIT_ULL(POWER_DOMAIN_INIT))
1728 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
1729 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1730 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1731 BIT_ULL(POWER_DOMAIN_INIT))
1733 #define CHV_DISPLAY_POWER_DOMAINS ( \
1734 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1735 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1736 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1737 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1738 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1739 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1740 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1741 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1742 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1743 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1744 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1745 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1746 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1747 BIT_ULL(POWER_DOMAIN_VGA) | \
1748 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1749 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1750 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1751 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1752 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1753 BIT_ULL(POWER_DOMAIN_INIT))
1755 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
1756 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1757 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1758 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1759 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1760 BIT_ULL(POWER_DOMAIN_INIT))
1762 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1763 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1764 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1765 BIT_ULL(POWER_DOMAIN_INIT))
1767 #define HSW_DISPLAY_POWER_DOMAINS ( \
1768 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1769 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1770 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1771 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1772 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1773 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1774 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1775 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1776 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1777 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1778 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1779 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1780 BIT_ULL(POWER_DOMAIN_VGA) | \
1781 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1782 BIT_ULL(POWER_DOMAIN_INIT))
1784 #define BDW_DISPLAY_POWER_DOMAINS ( \
1785 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1786 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1787 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1788 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1789 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1790 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1791 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1792 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1793 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1794 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1795 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1796 BIT_ULL(POWER_DOMAIN_VGA) | \
1797 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1798 BIT_ULL(POWER_DOMAIN_INIT))
1800 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1801 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1802 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1803 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1804 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1805 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1806 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1807 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1808 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1809 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1810 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1811 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
1812 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1813 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1814 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1815 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1816 BIT_ULL(POWER_DOMAIN_VGA) | \
1817 BIT_ULL(POWER_DOMAIN_INIT))
1818 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
1819 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
1820 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
1821 BIT_ULL(POWER_DOMAIN_INIT))
1822 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
1823 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
1824 BIT_ULL(POWER_DOMAIN_INIT))
1825 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
1826 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
1827 BIT_ULL(POWER_DOMAIN_INIT))
1828 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
1829 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
1830 BIT_ULL(POWER_DOMAIN_INIT))
1831 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1832 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1833 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
1834 BIT_ULL(POWER_DOMAIN_MODESET) | \
1835 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1836 BIT_ULL(POWER_DOMAIN_INIT))
1838 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1839 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1840 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1841 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1842 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1843 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1844 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1845 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1846 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1847 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1848 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1849 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1850 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1851 BIT_ULL(POWER_DOMAIN_VGA) | \
1852 BIT_ULL(POWER_DOMAIN_INIT))
1853 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1854 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1855 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
1856 BIT_ULL(POWER_DOMAIN_MODESET) | \
1857 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1858 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1859 BIT_ULL(POWER_DOMAIN_INIT))
1860 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
1861 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
1862 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1863 BIT_ULL(POWER_DOMAIN_INIT))
1864 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
1865 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1866 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1867 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1868 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1869 BIT_ULL(POWER_DOMAIN_INIT))
1871 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1872 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1873 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1874 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1875 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1876 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1877 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1878 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1879 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1880 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1881 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1882 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1883 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1884 BIT_ULL(POWER_DOMAIN_VGA) | \
1885 BIT_ULL(POWER_DOMAIN_INIT))
1886 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
1887 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1888 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
1889 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1890 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
1891 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1892 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
1893 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
1894 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1895 BIT_ULL(POWER_DOMAIN_INIT))
1896 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
1897 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1898 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1899 BIT_ULL(POWER_DOMAIN_INIT))
1900 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
1901 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1902 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1903 BIT_ULL(POWER_DOMAIN_INIT))
1904 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
1905 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1906 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
1907 BIT_ULL(POWER_DOMAIN_INIT))
1908 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
1909 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1910 BIT_ULL(POWER_DOMAIN_INIT))
1911 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
1912 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1913 BIT_ULL(POWER_DOMAIN_INIT))
1914 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1915 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1916 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
1917 BIT_ULL(POWER_DOMAIN_MODESET) | \
1918 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1919 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1920 BIT_ULL(POWER_DOMAIN_INIT))
1922 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1923 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1924 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1925 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1926 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1927 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1928 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1929 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1930 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1931 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1932 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1933 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
1934 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1935 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1936 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1937 BIT_ULL(POWER_DOMAIN_AUX_F) | \
1938 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1939 BIT_ULL(POWER_DOMAIN_VGA) | \
1940 BIT_ULL(POWER_DOMAIN_INIT))
1941 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
1942 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
1943 BIT_ULL(POWER_DOMAIN_INIT))
1944 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
1945 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
1946 BIT_ULL(POWER_DOMAIN_INIT))
1947 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
1948 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
1949 BIT_ULL(POWER_DOMAIN_INIT))
1950 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
1951 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
1952 BIT_ULL(POWER_DOMAIN_INIT))
1953 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
1954 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1955 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
1956 BIT_ULL(POWER_DOMAIN_INIT))
1957 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
1958 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1959 BIT_ULL(POWER_DOMAIN_INIT))
1960 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
1961 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1962 BIT_ULL(POWER_DOMAIN_INIT))
1963 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
1964 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1965 BIT_ULL(POWER_DOMAIN_INIT))
1966 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
1967 BIT_ULL(POWER_DOMAIN_AUX_F) | \
1968 BIT_ULL(POWER_DOMAIN_INIT))
1969 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
1970 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
1971 BIT_ULL(POWER_DOMAIN_INIT))
1972 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1973 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1974 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
1975 BIT_ULL(POWER_DOMAIN_MODESET) | \
1976 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1977 BIT_ULL(POWER_DOMAIN_INIT))
1980 * ICL PW_0/PG_0 domains (HW/DMC control):
1982 * - clocks except port PLL
1983 * - central power except FBC
1984 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
1985 * ICL PW_1/PG_1 domains (HW/DMC control):
1987 * - PIPE_A and its planes, except VGA
1988 * - transcoder EDP + PSR
1993 #define ICL_PW_4_POWER_DOMAINS ( \
1994 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1995 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1996 BIT_ULL(POWER_DOMAIN_INIT))
1998 #define ICL_PW_3_POWER_DOMAINS ( \
1999 ICL_PW_4_POWER_DOMAINS | \
2000 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2001 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2002 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2003 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2004 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2005 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2006 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2007 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2008 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2009 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2010 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2011 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2012 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2013 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2014 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2015 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2016 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2017 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2018 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2019 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2020 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2021 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2022 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2023 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2024 BIT_ULL(POWER_DOMAIN_VGA) | \
2025 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2026 BIT_ULL(POWER_DOMAIN_INIT))
2029 * - KVMR (HW control)
2031 #define ICL_PW_2_POWER_DOMAINS ( \
2032 ICL_PW_3_POWER_DOMAINS | \
2033 BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
2034 BIT_ULL(POWER_DOMAIN_INIT))
2036 * - KVMR (HW control)
2038 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2039 ICL_PW_2_POWER_DOMAINS | \
2040 BIT_ULL(POWER_DOMAIN_MODESET) | \
2041 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2042 BIT_ULL(POWER_DOMAIN_INIT))
2044 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2045 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2046 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2047 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2048 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2049 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2050 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2051 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2052 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2053 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2054 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2055 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2057 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2058 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2059 BIT_ULL(POWER_DOMAIN_AUX_A))
2060 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2061 BIT_ULL(POWER_DOMAIN_AUX_B))
2062 #define ICL_AUX_C_IO_POWER_DOMAINS ( \
2063 BIT_ULL(POWER_DOMAIN_AUX_C))
2064 #define ICL_AUX_D_IO_POWER_DOMAINS ( \
2065 BIT_ULL(POWER_DOMAIN_AUX_D))
2066 #define ICL_AUX_E_IO_POWER_DOMAINS ( \
2067 BIT_ULL(POWER_DOMAIN_AUX_E))
2068 #define ICL_AUX_F_IO_POWER_DOMAINS ( \
2069 BIT_ULL(POWER_DOMAIN_AUX_F))
2070 #define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
2071 BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2072 #define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
2073 BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2074 #define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
2075 BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2076 #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
2077 BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2079 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2080 .sync_hw = i9xx_power_well_sync_hw_noop,
2081 .enable = i9xx_always_on_power_well_noop,
2082 .disable = i9xx_always_on_power_well_noop,
2083 .is_enabled = i9xx_always_on_power_well_enabled,
2086 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2087 .sync_hw = i9xx_power_well_sync_hw_noop,
2088 .enable = chv_pipe_power_well_enable,
2089 .disable = chv_pipe_power_well_disable,
2090 .is_enabled = chv_pipe_power_well_enabled,
2093 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2094 .sync_hw = i9xx_power_well_sync_hw_noop,
2095 .enable = chv_dpio_cmn_power_well_enable,
2096 .disable = chv_dpio_cmn_power_well_disable,
2097 .is_enabled = vlv_power_well_enabled,
2100 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2102 .name = "always-on",
2104 .domains = POWER_DOMAIN_MASK,
2105 .ops = &i9xx_always_on_power_well_ops,
2106 .id = DISP_PW_ID_NONE,
2110 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2111 .sync_hw = i830_pipes_power_well_sync_hw,
2112 .enable = i830_pipes_power_well_enable,
2113 .disable = i830_pipes_power_well_disable,
2114 .is_enabled = i830_pipes_power_well_enabled,
2117 static const struct i915_power_well_desc i830_power_wells[] = {
2119 .name = "always-on",
2121 .domains = POWER_DOMAIN_MASK,
2122 .ops = &i9xx_always_on_power_well_ops,
2123 .id = DISP_PW_ID_NONE,
2127 .domains = I830_PIPES_POWER_DOMAINS,
2128 .ops = &i830_pipes_power_well_ops,
2129 .id = DISP_PW_ID_NONE,
2133 static const struct i915_power_well_ops hsw_power_well_ops = {
2134 .sync_hw = hsw_power_well_sync_hw,
2135 .enable = hsw_power_well_enable,
2136 .disable = hsw_power_well_disable,
2137 .is_enabled = hsw_power_well_enabled,
2140 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2141 .sync_hw = i9xx_power_well_sync_hw_noop,
2142 .enable = gen9_dc_off_power_well_enable,
2143 .disable = gen9_dc_off_power_well_disable,
2144 .is_enabled = gen9_dc_off_power_well_enabled,
2147 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2148 .sync_hw = i9xx_power_well_sync_hw_noop,
2149 .enable = bxt_dpio_cmn_power_well_enable,
2150 .disable = bxt_dpio_cmn_power_well_disable,
2151 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2154 static const struct i915_power_well_regs hsw_power_well_regs = {
2155 .bios = HSW_PWR_WELL_CTL1,
2156 .driver = HSW_PWR_WELL_CTL2,
2157 .kvmr = HSW_PWR_WELL_CTL3,
2158 .debug = HSW_PWR_WELL_CTL4,
2161 static const struct i915_power_well_desc hsw_power_wells[] = {
2163 .name = "always-on",
2165 .domains = POWER_DOMAIN_MASK,
2166 .ops = &i9xx_always_on_power_well_ops,
2167 .id = DISP_PW_ID_NONE,
2171 .domains = HSW_DISPLAY_POWER_DOMAINS,
2172 .ops = &hsw_power_well_ops,
2173 .id = HSW_DISP_PW_GLOBAL,
2175 .hsw.regs = &hsw_power_well_regs,
2176 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2177 .hsw.has_vga = true,
2182 static const struct i915_power_well_desc bdw_power_wells[] = {
2184 .name = "always-on",
2186 .domains = POWER_DOMAIN_MASK,
2187 .ops = &i9xx_always_on_power_well_ops,
2188 .id = DISP_PW_ID_NONE,
2192 .domains = BDW_DISPLAY_POWER_DOMAINS,
2193 .ops = &hsw_power_well_ops,
2194 .id = HSW_DISP_PW_GLOBAL,
2196 .hsw.regs = &hsw_power_well_regs,
2197 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2198 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2199 .hsw.has_vga = true,
2204 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2205 .sync_hw = i9xx_power_well_sync_hw_noop,
2206 .enable = vlv_display_power_well_enable,
2207 .disable = vlv_display_power_well_disable,
2208 .is_enabled = vlv_power_well_enabled,
2211 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2212 .sync_hw = i9xx_power_well_sync_hw_noop,
2213 .enable = vlv_dpio_cmn_power_well_enable,
2214 .disable = vlv_dpio_cmn_power_well_disable,
2215 .is_enabled = vlv_power_well_enabled,
2218 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2219 .sync_hw = i9xx_power_well_sync_hw_noop,
2220 .enable = vlv_power_well_enable,
2221 .disable = vlv_power_well_disable,
2222 .is_enabled = vlv_power_well_enabled,
2225 static const struct i915_power_well_desc vlv_power_wells[] = {
2227 .name = "always-on",
2229 .domains = POWER_DOMAIN_MASK,
2230 .ops = &i9xx_always_on_power_well_ops,
2231 .id = DISP_PW_ID_NONE,
2235 .domains = VLV_DISPLAY_POWER_DOMAINS,
2236 .ops = &vlv_display_power_well_ops,
2237 .id = VLV_DISP_PW_DISP2D,
2239 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2243 .name = "dpio-tx-b-01",
2244 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2245 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2246 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2247 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2248 .ops = &vlv_dpio_power_well_ops,
2249 .id = DISP_PW_ID_NONE,
2251 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2255 .name = "dpio-tx-b-23",
2256 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2257 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2258 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2259 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2260 .ops = &vlv_dpio_power_well_ops,
2261 .id = DISP_PW_ID_NONE,
2263 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2267 .name = "dpio-tx-c-01",
2268 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2269 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2270 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2271 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2272 .ops = &vlv_dpio_power_well_ops,
2273 .id = DISP_PW_ID_NONE,
2275 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2279 .name = "dpio-tx-c-23",
2280 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2281 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2282 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2283 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2284 .ops = &vlv_dpio_power_well_ops,
2285 .id = DISP_PW_ID_NONE,
2287 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2291 .name = "dpio-common",
2292 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2293 .ops = &vlv_dpio_cmn_power_well_ops,
2294 .id = VLV_DISP_PW_DPIO_CMN_BC,
2296 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2301 static const struct i915_power_well_desc chv_power_wells[] = {
2303 .name = "always-on",
2305 .domains = POWER_DOMAIN_MASK,
2306 .ops = &i9xx_always_on_power_well_ops,
2307 .id = DISP_PW_ID_NONE,
2312 * Pipe A power well is the new disp2d well. Pipe B and C
2313 * power wells don't actually exist. Pipe A power well is
2314 * required for any pipe to work.
2316 .domains = CHV_DISPLAY_POWER_DOMAINS,
2317 .ops = &chv_pipe_power_well_ops,
2318 .id = DISP_PW_ID_NONE,
2321 .name = "dpio-common-bc",
2322 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2323 .ops = &chv_dpio_cmn_power_well_ops,
2324 .id = VLV_DISP_PW_DPIO_CMN_BC,
2326 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2330 .name = "dpio-common-d",
2331 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2332 .ops = &chv_dpio_cmn_power_well_ops,
2333 .id = CHV_DISP_PW_DPIO_CMN_D,
2335 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2340 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2341 enum i915_power_well_id power_well_id)
2343 struct i915_power_well *power_well;
2346 power_well = lookup_power_well(dev_priv, power_well_id);
2347 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2352 static const struct i915_power_well_desc skl_power_wells[] = {
2354 .name = "always-on",
2356 .domains = POWER_DOMAIN_MASK,
2357 .ops = &i9xx_always_on_power_well_ops,
2358 .id = DISP_PW_ID_NONE,
2361 .name = "power well 1",
2362 /* Handled by the DMC firmware */
2365 .ops = &hsw_power_well_ops,
2366 .id = SKL_DISP_PW_1,
2368 .hsw.regs = &hsw_power_well_regs,
2369 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2370 .hsw.has_fuses = true,
2374 .name = "MISC IO power well",
2375 /* Handled by the DMC firmware */
2378 .ops = &hsw_power_well_ops,
2379 .id = SKL_DISP_PW_MISC_IO,
2381 .hsw.regs = &hsw_power_well_regs,
2382 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2387 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2388 .ops = &gen9_dc_off_power_well_ops,
2389 .id = DISP_PW_ID_NONE,
2392 .name = "power well 2",
2393 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2394 .ops = &hsw_power_well_ops,
2395 .id = SKL_DISP_PW_2,
2397 .hsw.regs = &hsw_power_well_regs,
2398 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2399 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2400 .hsw.has_vga = true,
2401 .hsw.has_fuses = true,
2405 .name = "DDI A/E IO power well",
2406 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2407 .ops = &hsw_power_well_ops,
2408 .id = DISP_PW_ID_NONE,
2410 .hsw.regs = &hsw_power_well_regs,
2411 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2415 .name = "DDI B IO power well",
2416 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2417 .ops = &hsw_power_well_ops,
2418 .id = DISP_PW_ID_NONE,
2420 .hsw.regs = &hsw_power_well_regs,
2421 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2425 .name = "DDI C IO power well",
2426 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2427 .ops = &hsw_power_well_ops,
2428 .id = DISP_PW_ID_NONE,
2430 .hsw.regs = &hsw_power_well_regs,
2431 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2435 .name = "DDI D IO power well",
2436 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2437 .ops = &hsw_power_well_ops,
2438 .id = DISP_PW_ID_NONE,
2440 .hsw.regs = &hsw_power_well_regs,
2441 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2446 static const struct i915_power_well_desc bxt_power_wells[] = {
2448 .name = "always-on",
2450 .domains = POWER_DOMAIN_MASK,
2451 .ops = &i9xx_always_on_power_well_ops,
2452 .id = DISP_PW_ID_NONE,
2455 .name = "power well 1",
2456 /* Handled by the DMC firmware */
2459 .ops = &hsw_power_well_ops,
2460 .id = SKL_DISP_PW_1,
2462 .hsw.regs = &hsw_power_well_regs,
2463 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2464 .hsw.has_fuses = true,
2469 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2470 .ops = &gen9_dc_off_power_well_ops,
2471 .id = DISP_PW_ID_NONE,
2474 .name = "power well 2",
2475 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2476 .ops = &hsw_power_well_ops,
2477 .id = SKL_DISP_PW_2,
2479 .hsw.regs = &hsw_power_well_regs,
2480 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2481 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2482 .hsw.has_vga = true,
2483 .hsw.has_fuses = true,
2487 .name = "dpio-common-a",
2488 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2489 .ops = &bxt_dpio_cmn_power_well_ops,
2490 .id = BXT_DISP_PW_DPIO_CMN_A,
2492 .bxt.phy = DPIO_PHY1,
2496 .name = "dpio-common-bc",
2497 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2498 .ops = &bxt_dpio_cmn_power_well_ops,
2499 .id = VLV_DISP_PW_DPIO_CMN_BC,
2501 .bxt.phy = DPIO_PHY0,
2506 static const struct i915_power_well_desc glk_power_wells[] = {
2508 .name = "always-on",
2510 .domains = POWER_DOMAIN_MASK,
2511 .ops = &i9xx_always_on_power_well_ops,
2512 .id = DISP_PW_ID_NONE,
2515 .name = "power well 1",
2516 /* Handled by the DMC firmware */
2519 .ops = &hsw_power_well_ops,
2520 .id = SKL_DISP_PW_1,
2522 .hsw.regs = &hsw_power_well_regs,
2523 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2524 .hsw.has_fuses = true,
2529 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2530 .ops = &gen9_dc_off_power_well_ops,
2531 .id = DISP_PW_ID_NONE,
2534 .name = "power well 2",
2535 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2536 .ops = &hsw_power_well_ops,
2537 .id = SKL_DISP_PW_2,
2539 .hsw.regs = &hsw_power_well_regs,
2540 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2541 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2542 .hsw.has_vga = true,
2543 .hsw.has_fuses = true,
2547 .name = "dpio-common-a",
2548 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2549 .ops = &bxt_dpio_cmn_power_well_ops,
2550 .id = BXT_DISP_PW_DPIO_CMN_A,
2552 .bxt.phy = DPIO_PHY1,
2556 .name = "dpio-common-b",
2557 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2558 .ops = &bxt_dpio_cmn_power_well_ops,
2559 .id = VLV_DISP_PW_DPIO_CMN_BC,
2561 .bxt.phy = DPIO_PHY0,
2565 .name = "dpio-common-c",
2566 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2567 .ops = &bxt_dpio_cmn_power_well_ops,
2568 .id = GLK_DISP_PW_DPIO_CMN_C,
2570 .bxt.phy = DPIO_PHY2,
2575 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2576 .ops = &hsw_power_well_ops,
2577 .id = DISP_PW_ID_NONE,
2579 .hsw.regs = &hsw_power_well_regs,
2580 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2585 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2586 .ops = &hsw_power_well_ops,
2587 .id = DISP_PW_ID_NONE,
2589 .hsw.regs = &hsw_power_well_regs,
2590 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2595 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2596 .ops = &hsw_power_well_ops,
2597 .id = DISP_PW_ID_NONE,
2599 .hsw.regs = &hsw_power_well_regs,
2600 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2604 .name = "DDI A IO power well",
2605 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2606 .ops = &hsw_power_well_ops,
2607 .id = DISP_PW_ID_NONE,
2609 .hsw.regs = &hsw_power_well_regs,
2610 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2614 .name = "DDI B IO power well",
2615 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2616 .ops = &hsw_power_well_ops,
2617 .id = DISP_PW_ID_NONE,
2619 .hsw.regs = &hsw_power_well_regs,
2620 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2624 .name = "DDI C IO power well",
2625 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2626 .ops = &hsw_power_well_ops,
2627 .id = DISP_PW_ID_NONE,
2629 .hsw.regs = &hsw_power_well_regs,
2630 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2635 static const struct i915_power_well_desc cnl_power_wells[] = {
2637 .name = "always-on",
2639 .domains = POWER_DOMAIN_MASK,
2640 .ops = &i9xx_always_on_power_well_ops,
2641 .id = DISP_PW_ID_NONE,
2644 .name = "power well 1",
2645 /* Handled by the DMC firmware */
2648 .ops = &hsw_power_well_ops,
2649 .id = SKL_DISP_PW_1,
2651 .hsw.regs = &hsw_power_well_regs,
2652 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2653 .hsw.has_fuses = true,
2658 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2659 .ops = &hsw_power_well_ops,
2660 .id = DISP_PW_ID_NONE,
2662 .hsw.regs = &hsw_power_well_regs,
2663 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2668 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2669 .ops = &hsw_power_well_ops,
2670 .id = DISP_PW_ID_NONE,
2672 .hsw.regs = &hsw_power_well_regs,
2673 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2678 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2679 .ops = &hsw_power_well_ops,
2680 .id = DISP_PW_ID_NONE,
2682 .hsw.regs = &hsw_power_well_regs,
2683 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2688 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2689 .ops = &hsw_power_well_ops,
2690 .id = DISP_PW_ID_NONE,
2692 .hsw.regs = &hsw_power_well_regs,
2693 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2698 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2699 .ops = &gen9_dc_off_power_well_ops,
2700 .id = DISP_PW_ID_NONE,
2703 .name = "power well 2",
2704 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2705 .ops = &hsw_power_well_ops,
2706 .id = SKL_DISP_PW_2,
2708 .hsw.regs = &hsw_power_well_regs,
2709 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2710 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2711 .hsw.has_vga = true,
2712 .hsw.has_fuses = true,
2716 .name = "DDI A IO power well",
2717 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2718 .ops = &hsw_power_well_ops,
2719 .id = DISP_PW_ID_NONE,
2721 .hsw.regs = &hsw_power_well_regs,
2722 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2726 .name = "DDI B IO power well",
2727 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2728 .ops = &hsw_power_well_ops,
2729 .id = DISP_PW_ID_NONE,
2731 .hsw.regs = &hsw_power_well_regs,
2732 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2736 .name = "DDI C IO power well",
2737 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2738 .ops = &hsw_power_well_ops,
2739 .id = DISP_PW_ID_NONE,
2741 .hsw.regs = &hsw_power_well_regs,
2742 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2746 .name = "DDI D IO power well",
2747 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2748 .ops = &hsw_power_well_ops,
2749 .id = DISP_PW_ID_NONE,
2751 .hsw.regs = &hsw_power_well_regs,
2752 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2756 .name = "DDI F IO power well",
2757 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2758 .ops = &hsw_power_well_ops,
2759 .id = DISP_PW_ID_NONE,
2761 .hsw.regs = &hsw_power_well_regs,
2762 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
2767 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2768 .ops = &hsw_power_well_ops,
2769 .id = DISP_PW_ID_NONE,
2771 .hsw.regs = &hsw_power_well_regs,
2772 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
2777 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2778 .sync_hw = hsw_power_well_sync_hw,
2779 .enable = icl_combo_phy_aux_power_well_enable,
2780 .disable = icl_combo_phy_aux_power_well_disable,
2781 .is_enabled = hsw_power_well_enabled,
2784 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
2785 .sync_hw = hsw_power_well_sync_hw,
2786 .enable = icl_tc_phy_aux_power_well_enable,
2787 .disable = hsw_power_well_disable,
2788 .is_enabled = hsw_power_well_enabled,
2791 static const struct i915_power_well_regs icl_aux_power_well_regs = {
2792 .bios = ICL_PWR_WELL_CTL_AUX1,
2793 .driver = ICL_PWR_WELL_CTL_AUX2,
2794 .debug = ICL_PWR_WELL_CTL_AUX4,
2797 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
2798 .bios = ICL_PWR_WELL_CTL_DDI1,
2799 .driver = ICL_PWR_WELL_CTL_DDI2,
2800 .debug = ICL_PWR_WELL_CTL_DDI4,
2803 static const struct i915_power_well_desc icl_power_wells[] = {
2805 .name = "always-on",
2807 .domains = POWER_DOMAIN_MASK,
2808 .ops = &i9xx_always_on_power_well_ops,
2809 .id = DISP_PW_ID_NONE,
2812 .name = "power well 1",
2813 /* Handled by the DMC firmware */
2816 .ops = &hsw_power_well_ops,
2817 .id = SKL_DISP_PW_1,
2819 .hsw.regs = &hsw_power_well_regs,
2820 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
2821 .hsw.has_fuses = true,
2826 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2827 .ops = &gen9_dc_off_power_well_ops,
2828 .id = DISP_PW_ID_NONE,
2831 .name = "power well 2",
2832 .domains = ICL_PW_2_POWER_DOMAINS,
2833 .ops = &hsw_power_well_ops,
2834 .id = SKL_DISP_PW_2,
2836 .hsw.regs = &hsw_power_well_regs,
2837 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
2838 .hsw.has_fuses = true,
2842 .name = "power well 3",
2843 .domains = ICL_PW_3_POWER_DOMAINS,
2844 .ops = &hsw_power_well_ops,
2845 .id = DISP_PW_ID_NONE,
2847 .hsw.regs = &hsw_power_well_regs,
2848 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
2849 .hsw.irq_pipe_mask = BIT(PIPE_B),
2850 .hsw.has_vga = true,
2851 .hsw.has_fuses = true,
2856 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
2857 .ops = &hsw_power_well_ops,
2858 .id = DISP_PW_ID_NONE,
2860 .hsw.regs = &icl_ddi_power_well_regs,
2861 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
2866 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
2867 .ops = &hsw_power_well_ops,
2868 .id = DISP_PW_ID_NONE,
2870 .hsw.regs = &icl_ddi_power_well_regs,
2871 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
2876 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
2877 .ops = &hsw_power_well_ops,
2878 .id = DISP_PW_ID_NONE,
2880 .hsw.regs = &icl_ddi_power_well_regs,
2881 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
2886 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
2887 .ops = &hsw_power_well_ops,
2888 .id = DISP_PW_ID_NONE,
2890 .hsw.regs = &icl_ddi_power_well_regs,
2891 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
2896 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
2897 .ops = &hsw_power_well_ops,
2898 .id = DISP_PW_ID_NONE,
2900 .hsw.regs = &icl_ddi_power_well_regs,
2901 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
2906 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
2907 .ops = &hsw_power_well_ops,
2908 .id = DISP_PW_ID_NONE,
2910 .hsw.regs = &icl_ddi_power_well_regs,
2911 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
2916 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
2917 .ops = &icl_combo_phy_aux_power_well_ops,
2918 .id = DISP_PW_ID_NONE,
2920 .hsw.regs = &icl_aux_power_well_regs,
2921 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
2926 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
2927 .ops = &icl_combo_phy_aux_power_well_ops,
2928 .id = DISP_PW_ID_NONE,
2930 .hsw.regs = &icl_aux_power_well_regs,
2931 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
2936 .domains = ICL_AUX_C_IO_POWER_DOMAINS,
2937 .ops = &icl_tc_phy_aux_power_well_ops,
2938 .id = DISP_PW_ID_NONE,
2940 .hsw.regs = &icl_aux_power_well_regs,
2941 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
2942 .hsw.is_tc_tbt = false,
2947 .domains = ICL_AUX_D_IO_POWER_DOMAINS,
2948 .ops = &icl_tc_phy_aux_power_well_ops,
2949 .id = DISP_PW_ID_NONE,
2951 .hsw.regs = &icl_aux_power_well_regs,
2952 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
2953 .hsw.is_tc_tbt = false,
2958 .domains = ICL_AUX_E_IO_POWER_DOMAINS,
2959 .ops = &icl_tc_phy_aux_power_well_ops,
2960 .id = DISP_PW_ID_NONE,
2962 .hsw.regs = &icl_aux_power_well_regs,
2963 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
2964 .hsw.is_tc_tbt = false,
2969 .domains = ICL_AUX_F_IO_POWER_DOMAINS,
2970 .ops = &icl_tc_phy_aux_power_well_ops,
2971 .id = DISP_PW_ID_NONE,
2973 .hsw.regs = &icl_aux_power_well_regs,
2974 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
2975 .hsw.is_tc_tbt = false,
2980 .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2981 .ops = &icl_tc_phy_aux_power_well_ops,
2982 .id = DISP_PW_ID_NONE,
2984 .hsw.regs = &icl_aux_power_well_regs,
2985 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
2986 .hsw.is_tc_tbt = true,
2991 .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2992 .ops = &icl_tc_phy_aux_power_well_ops,
2993 .id = DISP_PW_ID_NONE,
2995 .hsw.regs = &icl_aux_power_well_regs,
2996 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
2997 .hsw.is_tc_tbt = true,
3002 .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3003 .ops = &icl_tc_phy_aux_power_well_ops,
3004 .id = DISP_PW_ID_NONE,
3006 .hsw.regs = &icl_aux_power_well_regs,
3007 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3008 .hsw.is_tc_tbt = true,
3013 .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3014 .ops = &icl_tc_phy_aux_power_well_ops,
3015 .id = DISP_PW_ID_NONE,
3017 .hsw.regs = &icl_aux_power_well_regs,
3018 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3019 .hsw.is_tc_tbt = true,
3023 .name = "power well 4",
3024 .domains = ICL_PW_4_POWER_DOMAINS,
3025 .ops = &hsw_power_well_ops,
3026 .id = DISP_PW_ID_NONE,
3028 .hsw.regs = &hsw_power_well_regs,
3029 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3030 .hsw.has_fuses = true,
3031 .hsw.irq_pipe_mask = BIT(PIPE_C),
3037 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3038 int disable_power_well)
3040 if (disable_power_well >= 0)
3041 return !!disable_power_well;
3046 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3053 if (INTEL_GEN(dev_priv) >= 11) {
3056 * DC9 has a separate HW flow from the rest of the DC states,
3057 * not depending on the DMC firmware. It's needed by system
3058 * suspend/resume, so allow it unconditionally.
3060 mask = DC_STATE_EN_DC9;
3061 } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
3064 } else if (IS_GEN9_LP(dev_priv)) {
3066 mask = DC_STATE_EN_DC9;
3072 if (!i915_modparams.disable_power_well)
3075 if (enable_dc >= 0 && enable_dc <= max_dc) {
3076 requested_dc = enable_dc;
3077 } else if (enable_dc == -1) {
3078 requested_dc = max_dc;
3079 } else if (enable_dc > max_dc && enable_dc <= 2) {
3080 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3082 requested_dc = max_dc;
3084 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3085 requested_dc = max_dc;
3088 if (requested_dc > 1)
3089 mask |= DC_STATE_EN_UPTO_DC6;
3090 if (requested_dc > 0)
3091 mask |= DC_STATE_EN_UPTO_DC5;
3093 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3099 __set_power_wells(struct i915_power_domains *power_domains,
3100 const struct i915_power_well_desc *power_well_descs,
3101 int power_well_count)
3103 u64 power_well_ids = 0;
3106 power_domains->power_well_count = power_well_count;
3107 power_domains->power_wells =
3108 kcalloc(power_well_count,
3109 sizeof(*power_domains->power_wells),
3111 if (!power_domains->power_wells)
3114 for (i = 0; i < power_well_count; i++) {
3115 enum i915_power_well_id id = power_well_descs[i].id;
3117 power_domains->power_wells[i].desc = &power_well_descs[i];
3119 if (id == DISP_PW_ID_NONE)
3122 WARN_ON(id >= sizeof(power_well_ids) * 8);
3123 WARN_ON(power_well_ids & BIT_ULL(id));
3124 power_well_ids |= BIT_ULL(id);
3130 #define set_power_wells(power_domains, __power_well_descs) \
3131 __set_power_wells(power_domains, __power_well_descs, \
3132 ARRAY_SIZE(__power_well_descs))
3135 * intel_power_domains_init - initializes the power domain structures
3136 * @dev_priv: i915 device instance
3138 * Initializes the power domain structures for @dev_priv depending upon the
3139 * supported platform.
3141 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3143 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3146 i915_modparams.disable_power_well =
3147 sanitize_disable_power_well_option(dev_priv,
3148 i915_modparams.disable_power_well);
3149 dev_priv->csr.allowed_dc_mask =
3150 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3152 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3154 mutex_init(&power_domains->lock);
3157 * The enabling order will be from lower to higher indexed wells,
3158 * the disabling order is reversed.
3160 if (IS_ICELAKE(dev_priv)) {
3161 err = set_power_wells(power_domains, icl_power_wells);
3162 } else if (IS_CANNONLAKE(dev_priv)) {
3163 err = set_power_wells(power_domains, cnl_power_wells);
3166 * DDI and Aux IO are getting enabled for all ports
3167 * regardless the presence or use. So, in order to avoid
3168 * timeouts, lets remove them from the list
3169 * for the SKUs without port F.
3171 if (!IS_CNL_WITH_PORT_F(dev_priv))
3172 power_domains->power_well_count -= 2;
3173 } else if (IS_GEMINILAKE(dev_priv)) {
3174 err = set_power_wells(power_domains, glk_power_wells);
3175 } else if (IS_BROXTON(dev_priv)) {
3176 err = set_power_wells(power_domains, bxt_power_wells);
3177 } else if (IS_GEN9_BC(dev_priv)) {
3178 err = set_power_wells(power_domains, skl_power_wells);
3179 } else if (IS_CHERRYVIEW(dev_priv)) {
3180 err = set_power_wells(power_domains, chv_power_wells);
3181 } else if (IS_BROADWELL(dev_priv)) {
3182 err = set_power_wells(power_domains, bdw_power_wells);
3183 } else if (IS_HASWELL(dev_priv)) {
3184 err = set_power_wells(power_domains, hsw_power_wells);
3185 } else if (IS_VALLEYVIEW(dev_priv)) {
3186 err = set_power_wells(power_domains, vlv_power_wells);
3187 } else if (IS_I830(dev_priv)) {
3188 err = set_power_wells(power_domains, i830_power_wells);
3190 err = set_power_wells(power_domains, i9xx_always_on_power_well);
3197 * intel_power_domains_cleanup - clean up power domains resources
3198 * @dev_priv: i915 device instance
3200 * Release any resources acquired by intel_power_domains_init()
3202 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3204 kfree(dev_priv->power_domains.power_wells);
3207 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3209 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3210 struct i915_power_well *power_well;
3212 mutex_lock(&power_domains->lock);
3213 for_each_power_well(dev_priv, power_well) {
3214 power_well->desc->ops->sync_hw(dev_priv, power_well);
3215 power_well->hw_enabled =
3216 power_well->desc->ops->is_enabled(dev_priv, power_well);
3218 mutex_unlock(&power_domains->lock);
3222 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3223 i915_reg_t reg, bool enable)
3227 val = I915_READ(reg);
3228 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3229 I915_WRITE(reg, val);
3233 status = I915_READ(reg) & DBUF_POWER_STATE;
3234 if ((enable && !status) || (!enable && status)) {
3235 DRM_ERROR("DBus power %s timeout!\n",
3236 enable ? "enable" : "disable");
3242 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3244 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3247 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3249 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3252 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3254 if (INTEL_GEN(dev_priv) < 11)
3259 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3262 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3265 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3266 DRM_ERROR("Invalid number of dbuf slices requested\n");
3270 if (req_slices == hw_enabled_slices || req_slices == 0)
3273 if (req_slices > hw_enabled_slices)
3274 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3276 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3279 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3282 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3284 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3285 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3286 POSTING_READ(DBUF_CTL_S2);
3290 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3291 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3292 DRM_ERROR("DBuf power enable timeout\n");
3294 dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
3297 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3299 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3300 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3301 POSTING_READ(DBUF_CTL_S2);
3305 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3306 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3307 DRM_ERROR("DBuf power disable timeout!\n");
3309 dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
3312 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3316 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3317 MBUS_ABOX_BT_CREDIT_POOL2(16) |
3318 MBUS_ABOX_B_CREDIT(1) |
3319 MBUS_ABOX_BW_CREDIT(1);
3321 I915_WRITE(MBUS_ABOX_CTL, val);
3324 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3328 u32 reset_bits, val;
3330 if (IS_IVYBRIDGE(dev_priv)) {
3332 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3334 reg = HSW_NDE_RSTWRN_OPT;
3335 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3338 val = I915_READ(reg);
3345 I915_WRITE(reg, val);
3348 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3351 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3352 struct i915_power_well *well;
3354 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3356 /* enable PCH reset handshake */
3357 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3359 /* enable PG1 and Misc I/O */
3360 mutex_lock(&power_domains->lock);
3362 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3363 intel_power_well_enable(dev_priv, well);
3365 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3366 intel_power_well_enable(dev_priv, well);
3368 mutex_unlock(&power_domains->lock);
3370 skl_init_cdclk(dev_priv);
3372 gen9_dbuf_enable(dev_priv);
3374 if (resume && dev_priv->csr.dmc_payload)
3375 intel_csr_load_program(dev_priv);
3378 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3380 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3381 struct i915_power_well *well;
3383 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3385 gen9_dbuf_disable(dev_priv);
3387 skl_uninit_cdclk(dev_priv);
3389 /* The spec doesn't call for removing the reset handshake flag */
3390 /* disable PG1 and Misc I/O */
3392 mutex_lock(&power_domains->lock);
3395 * BSpec says to keep the MISC IO power well enabled here, only
3396 * remove our request for power well 1.
3397 * Note that even though the driver's request is removed power well 1
3398 * may stay enabled after this due to DMC's own request on it.
3400 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3401 intel_power_well_disable(dev_priv, well);
3403 mutex_unlock(&power_domains->lock);
3405 usleep_range(10, 30); /* 10 us delay per Bspec */
3408 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3411 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3412 struct i915_power_well *well;
3414 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3417 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3418 * or else the reset will hang because there is no PCH to respond.
3419 * Move the handshake programming to initialization sequence.
3420 * Previously was left up to BIOS.
3422 intel_pch_reset_handshake(dev_priv, false);
3425 mutex_lock(&power_domains->lock);
3427 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3428 intel_power_well_enable(dev_priv, well);
3430 mutex_unlock(&power_domains->lock);
3432 bxt_init_cdclk(dev_priv);
3434 gen9_dbuf_enable(dev_priv);
3436 if (resume && dev_priv->csr.dmc_payload)
3437 intel_csr_load_program(dev_priv);
3440 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3442 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3443 struct i915_power_well *well;
3445 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3447 gen9_dbuf_disable(dev_priv);
3449 bxt_uninit_cdclk(dev_priv);
3451 /* The spec doesn't call for removing the reset handshake flag */
3454 * Disable PW1 (PG1).
3455 * Note that even though the driver's request is removed power well 1
3456 * may stay enabled after this due to DMC's own request on it.
3458 mutex_lock(&power_domains->lock);
3460 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3461 intel_power_well_disable(dev_priv, well);
3463 mutex_unlock(&power_domains->lock);
3465 usleep_range(10, 30); /* 10 us delay per Bspec */
3468 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3470 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3471 struct i915_power_well *well;
3473 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3475 /* 1. Enable PCH Reset Handshake */
3476 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3479 cnl_combo_phys_init(dev_priv);
3482 * 4. Enable Power Well 1 (PG1).
3483 * The AUX IO power wells will be enabled on demand.
3485 mutex_lock(&power_domains->lock);
3486 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3487 intel_power_well_enable(dev_priv, well);
3488 mutex_unlock(&power_domains->lock);
3490 /* 5. Enable CD clock */
3491 cnl_init_cdclk(dev_priv);
3493 /* 6. Enable DBUF */
3494 gen9_dbuf_enable(dev_priv);
3496 if (resume && dev_priv->csr.dmc_payload)
3497 intel_csr_load_program(dev_priv);
3500 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3502 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3503 struct i915_power_well *well;
3505 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3507 /* 1. Disable all display engine functions -> aready done */
3509 /* 2. Disable DBUF */
3510 gen9_dbuf_disable(dev_priv);
3512 /* 3. Disable CD clock */
3513 cnl_uninit_cdclk(dev_priv);
3516 * 4. Disable Power Well 1 (PG1).
3517 * The AUX IO power wells are toggled on demand, so they are already
3518 * disabled at this point.
3520 mutex_lock(&power_domains->lock);
3521 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3522 intel_power_well_disable(dev_priv, well);
3523 mutex_unlock(&power_domains->lock);
3525 usleep_range(10, 30); /* 10 us delay per Bspec */
3528 cnl_combo_phys_uninit(dev_priv);
3531 void icl_display_core_init(struct drm_i915_private *dev_priv,
3534 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3535 struct i915_power_well *well;
3537 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3539 /* 1. Enable PCH reset handshake. */
3540 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3543 icl_combo_phys_init(dev_priv);
3546 * 4. Enable Power Well 1 (PG1).
3547 * The AUX IO power wells will be enabled on demand.
3549 mutex_lock(&power_domains->lock);
3550 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3551 intel_power_well_enable(dev_priv, well);
3552 mutex_unlock(&power_domains->lock);
3554 /* 5. Enable CDCLK. */
3555 icl_init_cdclk(dev_priv);
3557 /* 6. Enable DBUF. */
3558 icl_dbuf_enable(dev_priv);
3560 /* 7. Setup MBUS. */
3561 icl_mbus_init(dev_priv);
3563 if (resume && dev_priv->csr.dmc_payload)
3564 intel_csr_load_program(dev_priv);
3567 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3569 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3570 struct i915_power_well *well;
3572 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3574 /* 1. Disable all display engine functions -> aready done */
3576 /* 2. Disable DBUF */
3577 icl_dbuf_disable(dev_priv);
3579 /* 3. Disable CD clock */
3580 icl_uninit_cdclk(dev_priv);
3583 * 4. Disable Power Well 1 (PG1).
3584 * The AUX IO power wells are toggled on demand, so they are already
3585 * disabled at this point.
3587 mutex_lock(&power_domains->lock);
3588 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3589 intel_power_well_disable(dev_priv, well);
3590 mutex_unlock(&power_domains->lock);
3593 icl_combo_phys_uninit(dev_priv);
3596 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3598 struct i915_power_well *cmn_bc =
3599 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3600 struct i915_power_well *cmn_d =
3601 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
3604 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3605 * workaround never ever read DISPLAY_PHY_CONTROL, and
3606 * instead maintain a shadow copy ourselves. Use the actual
3607 * power well state and lane status to reconstruct the
3608 * expected initial value.
3610 dev_priv->chv_phy_control =
3611 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3612 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3613 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3614 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3615 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3618 * If all lanes are disabled we leave the override disabled
3619 * with all power down bits cleared to match the state we
3620 * would use after disabling the port. Otherwise enable the
3621 * override and set the lane powerdown bits accding to the
3622 * current lane status.
3624 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
3625 uint32_t status = I915_READ(DPLL(PIPE_A));
3628 mask = status & DPLL_PORTB_READY_MASK;
3632 dev_priv->chv_phy_control |=
3633 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3635 dev_priv->chv_phy_control |=
3636 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3638 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3642 dev_priv->chv_phy_control |=
3643 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3645 dev_priv->chv_phy_control |=
3646 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3648 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3650 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3652 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3655 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
3656 uint32_t status = I915_READ(DPIO_PHY_STATUS);
3659 mask = status & DPLL_PORTD_READY_MASK;
3664 dev_priv->chv_phy_control |=
3665 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3667 dev_priv->chv_phy_control |=
3668 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3670 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3672 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3674 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3677 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3679 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3680 dev_priv->chv_phy_control);
3683 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3685 struct i915_power_well *cmn =
3686 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3687 struct i915_power_well *disp2d =
3688 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
3690 /* If the display might be already active skip this */
3691 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3692 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
3693 I915_READ(DPIO_CTL) & DPIO_CMNRST)
3696 DRM_DEBUG_KMS("toggling display PHY side reset\n");
3698 /* cmnlane needs DPLL registers */
3699 disp2d->desc->ops->enable(dev_priv, disp2d);
3702 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3703 * Need to assert and de-assert PHY SB reset by gating the
3704 * common lane power, then un-gating it.
3705 * Simply ungating isn't enough to reset the PHY enough to get
3706 * ports and lanes running.
3708 cmn->desc->ops->disable(dev_priv, cmn);
3711 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
3714 * intel_power_domains_init_hw - initialize hardware power domain state
3715 * @dev_priv: i915 device instance
3716 * @resume: Called from resume code paths or not
3718 * This function initializes the hardware power domain state and enables all
3719 * power wells belonging to the INIT power domain. Power wells in other
3720 * domains (and not in the INIT domain) are referenced or disabled by
3721 * intel_modeset_readout_hw_state(). After that the reference count of each
3722 * power well must match its HW enabled state, see
3723 * intel_power_domains_verify_state().
3725 * It will return with power domains disabled (to be enabled later by
3726 * intel_power_domains_enable()) and must be paired with
3727 * intel_power_domains_fini_hw().
3729 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3731 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3733 power_domains->initializing = true;
3735 if (IS_ICELAKE(dev_priv)) {
3736 icl_display_core_init(dev_priv, resume);
3737 } else if (IS_CANNONLAKE(dev_priv)) {
3738 cnl_display_core_init(dev_priv, resume);
3739 } else if (IS_GEN9_BC(dev_priv)) {
3740 skl_display_core_init(dev_priv, resume);
3741 } else if (IS_GEN9_LP(dev_priv)) {
3742 bxt_display_core_init(dev_priv, resume);
3743 } else if (IS_CHERRYVIEW(dev_priv)) {
3744 mutex_lock(&power_domains->lock);
3745 chv_phy_control_init(dev_priv);
3746 mutex_unlock(&power_domains->lock);
3747 } else if (IS_VALLEYVIEW(dev_priv)) {
3748 mutex_lock(&power_domains->lock);
3749 vlv_cmnlane_wa(dev_priv);
3750 mutex_unlock(&power_domains->lock);
3751 } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
3752 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3755 * Keep all power wells enabled for any dependent HW access during
3756 * initialization and to make sure we keep BIOS enabled display HW
3757 * resources powered until display HW readout is complete. We drop
3758 * this reference in intel_power_domains_enable().
3760 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3761 /* Disable power support if the user asked so. */
3762 if (!i915_modparams.disable_power_well)
3763 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3764 intel_power_domains_sync_hw(dev_priv);
3766 power_domains->initializing = false;
3770 * intel_power_domains_fini_hw - deinitialize hw power domain state
3771 * @dev_priv: i915 device instance
3773 * De-initializes the display power domain HW state. It also ensures that the
3774 * device stays powered up so that the driver can be reloaded.
3776 * It must be called with power domains already disabled (after a call to
3777 * intel_power_domains_disable()) and must be paired with
3778 * intel_power_domains_init_hw().
3780 void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
3782 /* Keep the power well enabled, but cancel its rpm wakeref. */
3783 intel_runtime_pm_put(dev_priv);
3785 /* Remove the refcount we took to keep power well support disabled. */
3786 if (!i915_modparams.disable_power_well)
3787 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3789 intel_power_domains_verify_state(dev_priv);
3793 * intel_power_domains_enable - enable toggling of display power wells
3794 * @dev_priv: i915 device instance
3796 * Enable the ondemand enabling/disabling of the display power wells. Note that
3797 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
3798 * only at specific points of the display modeset sequence, thus they are not
3799 * affected by the intel_power_domains_enable()/disable() calls. The purpose
3800 * of these function is to keep the rest of power wells enabled until the end
3801 * of display HW readout (which will acquire the power references reflecting
3802 * the current HW state).
3804 void intel_power_domains_enable(struct drm_i915_private *dev_priv)
3806 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3808 intel_power_domains_verify_state(dev_priv);
3812 * intel_power_domains_disable - disable toggling of display power wells
3813 * @dev_priv: i915 device instance
3815 * Disable the ondemand enabling/disabling of the display power wells. See
3816 * intel_power_domains_enable() for which power wells this call controls.
3818 void intel_power_domains_disable(struct drm_i915_private *dev_priv)
3820 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3822 intel_power_domains_verify_state(dev_priv);
3826 * intel_power_domains_suspend - suspend power domain state
3827 * @dev_priv: i915 device instance
3828 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
3830 * This function prepares the hardware power domain state before entering
3833 * It must be called with power domains already disabled (after a call to
3834 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
3836 void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
3837 enum i915_drm_suspend_mode suspend_mode)
3839 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3841 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3844 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
3845 * support don't manually deinit the power domains. This also means the
3846 * CSR/DMC firmware will stay active, it will power down any HW
3847 * resources as required and also enable deeper system power states
3848 * that would be blocked if the firmware was inactive.
3850 if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
3851 suspend_mode == I915_DRM_SUSPEND_IDLE &&
3852 dev_priv->csr.dmc_payload != NULL) {
3853 intel_power_domains_verify_state(dev_priv);
3858 * Even if power well support was disabled we still want to disable
3859 * power wells if power domains must be deinitialized for suspend.
3861 if (!i915_modparams.disable_power_well) {
3862 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3863 intel_power_domains_verify_state(dev_priv);
3866 if (IS_ICELAKE(dev_priv))
3867 icl_display_core_uninit(dev_priv);
3868 else if (IS_CANNONLAKE(dev_priv))
3869 cnl_display_core_uninit(dev_priv);
3870 else if (IS_GEN9_BC(dev_priv))
3871 skl_display_core_uninit(dev_priv);
3872 else if (IS_GEN9_LP(dev_priv))
3873 bxt_display_core_uninit(dev_priv);
3875 power_domains->display_core_suspended = true;
3879 * intel_power_domains_resume - resume power domain state
3880 * @dev_priv: i915 device instance
3882 * This function resume the hardware power domain state during system resume.
3884 * It will return with power domain support disabled (to be enabled later by
3885 * intel_power_domains_enable()) and must be paired with
3886 * intel_power_domains_suspend().
3888 void intel_power_domains_resume(struct drm_i915_private *dev_priv)
3890 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3892 if (power_domains->display_core_suspended) {
3893 intel_power_domains_init_hw(dev_priv, true);
3894 power_domains->display_core_suspended = false;
3896 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3899 intel_power_domains_verify_state(dev_priv);
3902 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
3904 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3906 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3907 struct i915_power_well *power_well;
3909 for_each_power_well(dev_priv, power_well) {
3910 enum intel_display_power_domain domain;
3912 DRM_DEBUG_DRIVER("%-25s %d\n",
3913 power_well->desc->name, power_well->count);
3915 for_each_power_domain(domain, power_well->desc->domains)
3916 DRM_DEBUG_DRIVER(" %-23s %d\n",
3917 intel_display_power_domain_str(domain),
3918 power_domains->domain_use_count[domain]);
3923 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3924 * @dev_priv: i915 device instance
3926 * Verify if the reference count of each power well matches its HW enabled
3927 * state and the total refcount of the domains it belongs to. This must be
3928 * called after modeset HW state sanitization, which is responsible for
3929 * acquiring reference counts for any power wells in use and disabling the
3930 * ones left on by BIOS but not required by any active output.
3932 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3934 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3935 struct i915_power_well *power_well;
3936 bool dump_domain_info;
3938 mutex_lock(&power_domains->lock);
3940 dump_domain_info = false;
3941 for_each_power_well(dev_priv, power_well) {
3942 enum intel_display_power_domain domain;
3946 enabled = power_well->desc->ops->is_enabled(dev_priv,
3948 if ((power_well->count || power_well->desc->always_on) !=
3950 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3951 power_well->desc->name,
3952 power_well->count, enabled);
3955 for_each_power_domain(domain, power_well->desc->domains)
3956 domains_count += power_domains->domain_use_count[domain];
3958 if (power_well->count != domains_count) {
3959 DRM_ERROR("power well %s refcount/domain refcount mismatch "
3960 "(refcount %d/domains refcount %d)\n",
3961 power_well->desc->name, power_well->count,
3963 dump_domain_info = true;
3967 if (dump_domain_info) {
3971 intel_power_domains_dump_info(dev_priv);
3976 mutex_unlock(&power_domains->lock);
3981 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3988 * intel_runtime_pm_get - grab a runtime pm reference
3989 * @dev_priv: i915 device instance
3991 * This function grabs a device-level runtime pm reference (mostly used for GEM
3992 * code to ensure the GTT or GT is on) and ensures that it is powered up.
3994 * Any runtime pm reference obtained by this function must have a symmetric
3995 * call to intel_runtime_pm_put() to release the reference again.
3997 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3999 struct pci_dev *pdev = dev_priv->drm.pdev;
4000 struct device *kdev = &pdev->dev;
4003 ret = pm_runtime_get_sync(kdev);
4004 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4006 atomic_inc(&dev_priv->runtime_pm.wakeref_count);
4007 assert_rpm_wakelock_held(dev_priv);
4011 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
4012 * @dev_priv: i915 device instance
4014 * This function grabs a device-level runtime pm reference if the device is
4015 * already in use and ensures that it is powered up. It is illegal to try
4016 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
4018 * Any runtime pm reference obtained by this function must have a symmetric
4019 * call to intel_runtime_pm_put() to release the reference again.
4021 * Returns: True if the wakeref was acquired, or False otherwise.
4023 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
4025 if (IS_ENABLED(CONFIG_PM)) {
4026 struct pci_dev *pdev = dev_priv->drm.pdev;
4027 struct device *kdev = &pdev->dev;
4030 * In cases runtime PM is disabled by the RPM core and we get
4031 * an -EINVAL return value we are not supposed to call this
4032 * function, since the power state is undefined. This applies
4033 * atm to the late/early system suspend/resume handlers.
4035 if (pm_runtime_get_if_in_use(kdev) <= 0)
4039 atomic_inc(&dev_priv->runtime_pm.wakeref_count);
4040 assert_rpm_wakelock_held(dev_priv);
4046 * intel_runtime_pm_get_noresume - grab a runtime pm reference
4047 * @dev_priv: i915 device instance
4049 * This function grabs a device-level runtime pm reference (mostly used for GEM
4050 * code to ensure the GTT or GT is on).
4052 * It will _not_ power up the device but instead only check that it's powered
4053 * on. Therefore it is only valid to call this functions from contexts where
4054 * the device is known to be powered up and where trying to power it up would
4055 * result in hilarity and deadlocks. That pretty much means only the system
4056 * suspend/resume code where this is used to grab runtime pm references for
4057 * delayed setup down in work items.
4059 * Any runtime pm reference obtained by this function must have a symmetric
4060 * call to intel_runtime_pm_put() to release the reference again.
4062 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
4064 struct pci_dev *pdev = dev_priv->drm.pdev;
4065 struct device *kdev = &pdev->dev;
4067 assert_rpm_wakelock_held(dev_priv);
4068 pm_runtime_get_noresume(kdev);
4070 atomic_inc(&dev_priv->runtime_pm.wakeref_count);
4074 * intel_runtime_pm_put - release a runtime pm reference
4075 * @dev_priv: i915 device instance
4077 * This function drops the device-level runtime pm reference obtained by
4078 * intel_runtime_pm_get() and might power down the corresponding
4079 * hardware block right away if this is the last reference.
4081 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
4083 struct pci_dev *pdev = dev_priv->drm.pdev;
4084 struct device *kdev = &pdev->dev;
4086 assert_rpm_wakelock_held(dev_priv);
4087 atomic_dec(&dev_priv->runtime_pm.wakeref_count);
4089 pm_runtime_mark_last_busy(kdev);
4090 pm_runtime_put_autosuspend(kdev);
4094 * intel_runtime_pm_enable - enable runtime pm
4095 * @dev_priv: i915 device instance
4097 * This function enables runtime pm at the end of the driver load sequence.
4099 * Note that this function does currently not enable runtime pm for the
4100 * subordinate display power domains. That is done by
4101 * intel_power_domains_enable().
4103 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
4105 struct pci_dev *pdev = dev_priv->drm.pdev;
4106 struct device *kdev = &pdev->dev;
4109 * Disable the system suspend direct complete optimization, which can
4110 * leave the device suspended skipping the driver's suspend handlers
4111 * if the device was already runtime suspended. This is needed due to
4112 * the difference in our runtime and system suspend sequence and
4113 * becaue the HDA driver may require us to enable the audio power
4114 * domain during system suspend.
4116 dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4118 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4119 pm_runtime_mark_last_busy(kdev);
4122 * Take a permanent reference to disable the RPM functionality and drop
4123 * it only when unloading the driver. Use the low level get/put helpers,
4124 * so the driver's own RPM reference tracking asserts also work on
4125 * platforms without RPM support.
4127 if (!HAS_RUNTIME_PM(dev_priv)) {
4130 pm_runtime_dont_use_autosuspend(kdev);
4131 ret = pm_runtime_get_sync(kdev);
4132 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4134 pm_runtime_use_autosuspend(kdev);
4138 * The core calls the driver load handler with an RPM reference held.
4139 * We drop that here and will reacquire it during unloading in
4140 * intel_power_domains_fini().
4142 pm_runtime_put_autosuspend(kdev);
4145 void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
4147 struct pci_dev *pdev = dev_priv->drm.pdev;
4148 struct device *kdev = &pdev->dev;
4150 /* Transfer rpm ownership back to core */
4151 WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
4152 "Failed to pass rpm ownership back to core\n");
4154 pm_runtime_dont_use_autosuspend(kdev);
4156 if (!HAS_RUNTIME_PM(dev_priv))
4157 pm_runtime_put(kdev);