2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <linux/string_helpers.h>
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_hti.h"
35 #include "intel_mg_phy_regs.h"
36 #include "intel_pch_refclk.h"
42 * Display PLLs used for driving outputs vary by platform. While some have
43 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44 * from a pool. In the latter scenario, it is possible that multiple pipes
45 * share a PLL if their configurations match.
47 * This file provides an abstraction over display PLLs. The function
48 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
49 * users of a PLL are tracked and that tracking is integrated with the atomic
50 * modset interface. During an atomic operation, required PLLs can be reserved
51 * for a given CRTC and encoder configuration by calling
52 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53 * with intel_release_shared_dplls().
54 * Changes to the users are first staged in the atomic state, and then made
55 * effective by calling intel_shared_dpll_swap_state() during the atomic
59 /* platform specific hooks for managing DPLLs */
60 struct intel_shared_dpll_funcs {
62 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 * the pll is not already enabled.
65 void (*enable)(struct drm_i915_private *i915,
66 struct intel_shared_dpll *pll);
69 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 * only when it is safe to disable the pll, i.e., there are no more
71 * tracked users for it.
73 void (*disable)(struct drm_i915_private *i915,
74 struct intel_shared_dpll *pll);
77 * Hook for reading the values currently programmed to the DPLL
78 * registers. This is used for initial hw state readout and state
79 * verification after a mode set.
81 bool (*get_hw_state)(struct drm_i915_private *i915,
82 struct intel_shared_dpll *pll,
83 struct intel_dpll_hw_state *hw_state);
86 * Hook for calculating the pll's output frequency based on its passed
89 int (*get_freq)(struct drm_i915_private *i915,
90 const struct intel_shared_dpll *pll,
91 const struct intel_dpll_hw_state *pll_state);
94 struct intel_dpll_mgr {
95 const struct dpll_info *dpll_info;
97 int (*compute_dplls)(struct intel_atomic_state *state,
98 struct intel_crtc *crtc,
99 struct intel_encoder *encoder);
100 int (*get_dplls)(struct intel_atomic_state *state,
101 struct intel_crtc *crtc,
102 struct intel_encoder *encoder);
103 void (*put_dplls)(struct intel_atomic_state *state,
104 struct intel_crtc *crtc);
105 void (*update_active_dpll)(struct intel_atomic_state *state,
106 struct intel_crtc *crtc,
107 struct intel_encoder *encoder);
108 void (*update_ref_clks)(struct drm_i915_private *i915);
109 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 const struct intel_dpll_hw_state *hw_state);
114 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 struct intel_shared_dpll_state *shared_dpll)
117 enum intel_dpll_id i;
119 /* Copy shared dpll state */
120 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
123 shared_dpll[i] = pll->state;
127 static struct intel_shared_dpll_state *
128 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
130 struct intel_atomic_state *state = to_intel_atomic_state(s);
132 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
134 if (!state->dpll_set) {
135 state->dpll_set = true;
137 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
141 return state->shared_dpll;
145 * intel_get_shared_dpll_by_id - get a DPLL given its id
146 * @dev_priv: i915 device instance
150 * A pointer to the DPLL with @id
152 struct intel_shared_dpll *
153 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 enum intel_dpll_id id)
156 return &dev_priv->display.dpll.shared_dplls[id];
160 void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll,
165 struct intel_dpll_hw_state hw_state;
167 if (drm_WARN(&dev_priv->drm, !pll,
168 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
171 cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 I915_STATE_WARN(cur_state != state,
173 "%s assertion failure (expected %s, current %s)\n",
174 pll->info->name, str_on_off(state),
175 str_on_off(cur_state));
178 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
180 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
183 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
185 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
189 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 struct intel_shared_dpll *pll)
193 return DG1_DPLL_ENABLE(pll->info->id);
194 else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
195 return MG_PLL_ENABLE(0);
197 return ICL_DPLL_ENABLE(pll->info->id);
201 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
202 struct intel_shared_dpll *pll)
204 const enum intel_dpll_id id = pll->info->id;
205 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
207 if (IS_ALDERLAKE_P(i915))
208 return ADLP_PORTTC_PLL_ENABLE(tc_port);
210 return MG_PLL_ENABLE(tc_port);
214 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
215 * @crtc_state: CRTC, and its state, which has a shared DPLL
217 * Enable the shared DPLL used by @crtc.
219 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
221 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
222 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
223 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
224 unsigned int pipe_mask = BIT(crtc->pipe);
225 unsigned int old_mask;
227 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
230 mutex_lock(&dev_priv->display.dpll.lock);
231 old_mask = pll->active_mask;
233 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
234 drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
237 pll->active_mask |= pipe_mask;
239 drm_dbg_kms(&dev_priv->drm,
240 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
241 pll->info->name, pll->active_mask, pll->on,
242 crtc->base.base.id, crtc->base.name);
245 drm_WARN_ON(&dev_priv->drm, !pll->on);
246 assert_shared_dpll_enabled(dev_priv, pll);
249 drm_WARN_ON(&dev_priv->drm, pll->on);
251 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
252 pll->info->funcs->enable(dev_priv, pll);
256 mutex_unlock(&dev_priv->display.dpll.lock);
260 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
261 * @crtc_state: CRTC, and its state, which has a shared DPLL
263 * Disable the shared DPLL used by @crtc.
265 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
270 unsigned int pipe_mask = BIT(crtc->pipe);
272 /* PCH only available on ILK+ */
273 if (DISPLAY_VER(dev_priv) < 5)
279 mutex_lock(&dev_priv->display.dpll.lock);
280 if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
281 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
282 crtc->base.base.id, crtc->base.name))
285 drm_dbg_kms(&dev_priv->drm,
286 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
287 pll->info->name, pll->active_mask, pll->on,
288 crtc->base.base.id, crtc->base.name);
290 assert_shared_dpll_enabled(dev_priv, pll);
291 drm_WARN_ON(&dev_priv->drm, !pll->on);
293 pll->active_mask &= ~pipe_mask;
294 if (pll->active_mask)
297 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
298 pll->info->funcs->disable(dev_priv, pll);
302 mutex_unlock(&dev_priv->display.dpll.lock);
305 static struct intel_shared_dpll *
306 intel_find_shared_dpll(struct intel_atomic_state *state,
307 const struct intel_crtc *crtc,
308 const struct intel_dpll_hw_state *pll_state,
309 unsigned long dpll_mask)
311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 struct intel_shared_dpll *pll, *unused_pll = NULL;
313 struct intel_shared_dpll_state *shared_dpll;
314 enum intel_dpll_id i;
316 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
318 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
320 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
321 pll = &dev_priv->display.dpll.shared_dplls[i];
323 /* Only want to check enabled timings first */
324 if (shared_dpll[i].pipe_mask == 0) {
330 if (memcmp(pll_state,
331 &shared_dpll[i].hw_state,
332 sizeof(*pll_state)) == 0) {
333 drm_dbg_kms(&dev_priv->drm,
334 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
335 crtc->base.base.id, crtc->base.name,
337 shared_dpll[i].pipe_mask,
343 /* Ok no matching timings, maybe there's a free one? */
345 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
346 crtc->base.base.id, crtc->base.name,
347 unused_pll->info->name);
355 intel_reference_shared_dpll(struct intel_atomic_state *state,
356 const struct intel_crtc *crtc,
357 const struct intel_shared_dpll *pll,
358 const struct intel_dpll_hw_state *pll_state)
360 struct drm_i915_private *i915 = to_i915(state->base.dev);
361 struct intel_shared_dpll_state *shared_dpll;
362 const enum intel_dpll_id id = pll->info->id;
364 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
366 if (shared_dpll[id].pipe_mask == 0)
367 shared_dpll[id].hw_state = *pll_state;
369 drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
371 shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
373 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
374 crtc->base.base.id, crtc->base.name, pll->info->name);
377 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
378 const struct intel_crtc *crtc,
379 const struct intel_shared_dpll *pll)
381 struct drm_i915_private *i915 = to_i915(state->base.dev);
382 struct intel_shared_dpll_state *shared_dpll;
383 const enum intel_dpll_id id = pll->info->id;
385 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
387 drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
389 shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
391 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
392 crtc->base.base.id, crtc->base.name, pll->info->name);
395 static void intel_put_dpll(struct intel_atomic_state *state,
396 struct intel_crtc *crtc)
398 const struct intel_crtc_state *old_crtc_state =
399 intel_atomic_get_old_crtc_state(state, crtc);
400 struct intel_crtc_state *new_crtc_state =
401 intel_atomic_get_new_crtc_state(state, crtc);
403 new_crtc_state->shared_dpll = NULL;
405 if (!old_crtc_state->shared_dpll)
408 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
412 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
413 * @state: atomic state
415 * This is the dpll version of drm_atomic_helper_swap_state() since the
416 * helper does not handle driver-specific global state.
418 * For consistency with atomic helpers this function does a complete swap,
419 * i.e. it also puts the current state into @state, even though there is no
420 * need for that at this moment.
422 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
424 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
425 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
426 enum intel_dpll_id i;
428 if (!state->dpll_set)
431 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
432 struct intel_shared_dpll *pll =
433 &dev_priv->display.dpll.shared_dplls[i];
435 swap(pll->state, shared_dpll[i]);
439 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
440 struct intel_shared_dpll *pll,
441 struct intel_dpll_hw_state *hw_state)
443 const enum intel_dpll_id id = pll->info->id;
444 intel_wakeref_t wakeref;
447 wakeref = intel_display_power_get_if_enabled(dev_priv,
448 POWER_DOMAIN_DISPLAY_CORE);
452 val = intel_de_read(dev_priv, PCH_DPLL(id));
453 hw_state->dpll = val;
454 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
455 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
457 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
459 return val & DPLL_VCO_ENABLE;
462 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
467 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
469 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
470 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
471 DREF_SUPERSPREAD_SOURCE_MASK));
472 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
475 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
476 struct intel_shared_dpll *pll)
478 const enum intel_dpll_id id = pll->info->id;
480 /* PCH refclock must be enabled first */
481 ibx_assert_pch_refclk_enabled(dev_priv);
483 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
484 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
486 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
488 /* Wait for the clocks to stabilize. */
489 intel_de_posting_read(dev_priv, PCH_DPLL(id));
492 /* The pixel multiplier can only be updated once the
493 * DPLL is enabled and the clocks are stable.
497 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
498 intel_de_posting_read(dev_priv, PCH_DPLL(id));
502 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
503 struct intel_shared_dpll *pll)
505 const enum intel_dpll_id id = pll->info->id;
507 intel_de_write(dev_priv, PCH_DPLL(id), 0);
508 intel_de_posting_read(dev_priv, PCH_DPLL(id));
512 static int ibx_compute_dpll(struct intel_atomic_state *state,
513 struct intel_crtc *crtc,
514 struct intel_encoder *encoder)
519 static int ibx_get_dpll(struct intel_atomic_state *state,
520 struct intel_crtc *crtc,
521 struct intel_encoder *encoder)
523 struct intel_crtc_state *crtc_state =
524 intel_atomic_get_new_crtc_state(state, crtc);
525 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
526 struct intel_shared_dpll *pll;
527 enum intel_dpll_id i;
529 if (HAS_PCH_IBX(dev_priv)) {
530 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
531 i = (enum intel_dpll_id) crtc->pipe;
532 pll = &dev_priv->display.dpll.shared_dplls[i];
534 drm_dbg_kms(&dev_priv->drm,
535 "[CRTC:%d:%s] using pre-allocated %s\n",
536 crtc->base.base.id, crtc->base.name,
539 pll = intel_find_shared_dpll(state, crtc,
540 &crtc_state->dpll_hw_state,
541 BIT(DPLL_ID_PCH_PLL_B) |
542 BIT(DPLL_ID_PCH_PLL_A));
548 /* reference the pll */
549 intel_reference_shared_dpll(state, crtc,
550 pll, &crtc_state->dpll_hw_state);
552 crtc_state->shared_dpll = pll;
557 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
558 const struct intel_dpll_hw_state *hw_state)
560 drm_dbg_kms(&dev_priv->drm,
561 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
562 "fp0: 0x%x, fp1: 0x%x\n",
569 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
570 .enable = ibx_pch_dpll_enable,
571 .disable = ibx_pch_dpll_disable,
572 .get_hw_state = ibx_pch_dpll_get_hw_state,
575 static const struct dpll_info pch_plls[] = {
576 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
577 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
581 static const struct intel_dpll_mgr pch_pll_mgr = {
582 .dpll_info = pch_plls,
583 .compute_dplls = ibx_compute_dpll,
584 .get_dplls = ibx_get_dpll,
585 .put_dplls = intel_put_dpll,
586 .dump_hw_state = ibx_dump_hw_state,
589 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
590 struct intel_shared_dpll *pll)
592 const enum intel_dpll_id id = pll->info->id;
594 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
595 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
599 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
600 struct intel_shared_dpll *pll)
602 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
603 intel_de_posting_read(dev_priv, SPLL_CTL);
607 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
608 struct intel_shared_dpll *pll)
610 const enum intel_dpll_id id = pll->info->id;
612 intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
613 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
616 * Try to set up the PCH reference clock once all DPLLs
617 * that depend on it have been shut down.
619 if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
620 intel_init_pch_refclk(dev_priv);
623 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
624 struct intel_shared_dpll *pll)
626 enum intel_dpll_id id = pll->info->id;
628 intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
629 intel_de_posting_read(dev_priv, SPLL_CTL);
632 * Try to set up the PCH reference clock once all DPLLs
633 * that depend on it have been shut down.
635 if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
636 intel_init_pch_refclk(dev_priv);
639 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
640 struct intel_shared_dpll *pll,
641 struct intel_dpll_hw_state *hw_state)
643 const enum intel_dpll_id id = pll->info->id;
644 intel_wakeref_t wakeref;
647 wakeref = intel_display_power_get_if_enabled(dev_priv,
648 POWER_DOMAIN_DISPLAY_CORE);
652 val = intel_de_read(dev_priv, WRPLL_CTL(id));
653 hw_state->wrpll = val;
655 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
657 return val & WRPLL_PLL_ENABLE;
660 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
661 struct intel_shared_dpll *pll,
662 struct intel_dpll_hw_state *hw_state)
664 intel_wakeref_t wakeref;
667 wakeref = intel_display_power_get_if_enabled(dev_priv,
668 POWER_DOMAIN_DISPLAY_CORE);
672 val = intel_de_read(dev_priv, SPLL_CTL);
673 hw_state->spll = val;
675 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
677 return val & SPLL_PLL_ENABLE;
681 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
687 /* Constraints for PLL good behavior */
693 struct hsw_wrpll_rnp {
697 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
761 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
762 unsigned int r2, unsigned int n2,
764 struct hsw_wrpll_rnp *best)
766 u64 a, b, c, d, diff, diff_best;
768 /* No best (r,n,p) yet */
777 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
781 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
784 * and we would like delta <= budget.
786 * If the discrepancy is above the PPM-based budget, always prefer to
787 * improve upon the previous solution. However, if you're within the
788 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
790 a = freq2k * budget * p * r2;
791 b = freq2k * budget * best->p * best->r2;
792 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
793 diff_best = abs_diff(freq2k * best->p * best->r2,
794 LC_FREQ_2K * best->n2);
796 d = 1000000 * diff_best;
798 if (a < c && b < d) {
799 /* If both are above the budget, pick the closer */
800 if (best->p * best->r2 * diff < p * r2 * diff_best) {
805 } else if (a >= c && b < d) {
806 /* If A is below the threshold but B is above it? Update. */
810 } else if (a >= c && b >= d) {
811 /* Both are below the limit, so pick the higher n2/(r2*r2) */
812 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
818 /* Otherwise a < c && b >= d, do nothing */
822 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
823 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
827 struct hsw_wrpll_rnp best = {};
830 freq2k = clock / 100;
832 budget = hsw_wrpll_get_budget_for_freq(clock);
834 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
835 * and directly pass the LC PLL to it. */
836 if (freq2k == 5400000) {
844 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
847 * We want R so that REF_MIN <= Ref <= REF_MAX.
848 * Injecting R2 = 2 * R gives:
849 * REF_MAX * r2 > LC_FREQ * 2 and
850 * REF_MIN * r2 < LC_FREQ * 2
852 * Which means the desired boundaries for r2 are:
853 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
856 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
857 r2 <= LC_FREQ * 2 / REF_MIN;
861 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
863 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
864 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
865 * VCO_MAX * r2 > n2 * LC_FREQ and
866 * VCO_MIN * r2 < n2 * LC_FREQ)
868 * Which means the desired boundaries for n2 are:
869 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
871 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
872 n2 <= VCO_MAX * r2 / LC_FREQ;
875 for (p = P_MIN; p <= P_MAX; p += P_INC)
876 hsw_wrpll_update_rnp(freq2k, budget,
886 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
887 const struct intel_shared_dpll *pll,
888 const struct intel_dpll_hw_state *pll_state)
892 u32 wrpll = pll_state->wrpll;
894 switch (wrpll & WRPLL_REF_MASK) {
895 case WRPLL_REF_SPECIAL_HSW:
896 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
897 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
898 refclk = dev_priv->display.dpll.ref_clks.nssc;
902 case WRPLL_REF_PCH_SSC:
904 * We could calculate spread here, but our checking
905 * code only cares about 5% accuracy, and spread is a max of
908 refclk = dev_priv->display.dpll.ref_clks.ssc;
910 case WRPLL_REF_LCPLL:
918 r = wrpll & WRPLL_DIVIDER_REF_MASK;
919 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
920 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
922 /* Convert to KHz, p & r have a fixed point portion */
923 return (refclk * n / 10) / (p * r) * 2;
927 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
928 struct intel_crtc *crtc)
930 struct drm_i915_private *i915 = to_i915(state->base.dev);
931 struct intel_crtc_state *crtc_state =
932 intel_atomic_get_new_crtc_state(state, crtc);
933 unsigned int p, n2, r2;
935 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
937 crtc_state->dpll_hw_state.wrpll =
938 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
939 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
940 WRPLL_DIVIDER_POST(p);
942 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
943 &crtc_state->dpll_hw_state);
948 static struct intel_shared_dpll *
949 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
950 struct intel_crtc *crtc)
952 struct intel_crtc_state *crtc_state =
953 intel_atomic_get_new_crtc_state(state, crtc);
955 return intel_find_shared_dpll(state, crtc,
956 &crtc_state->dpll_hw_state,
957 BIT(DPLL_ID_WRPLL2) |
958 BIT(DPLL_ID_WRPLL1));
962 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
964 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
965 int clock = crtc_state->port_clock;
973 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
979 static struct intel_shared_dpll *
980 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
982 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
983 struct intel_shared_dpll *pll;
984 enum intel_dpll_id pll_id;
985 int clock = crtc_state->port_clock;
989 pll_id = DPLL_ID_LCPLL_810;
992 pll_id = DPLL_ID_LCPLL_1350;
995 pll_id = DPLL_ID_LCPLL_2700;
998 MISSING_CASE(clock / 2);
1002 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1010 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1011 const struct intel_shared_dpll *pll,
1012 const struct intel_dpll_hw_state *pll_state)
1016 switch (pll->info->id) {
1017 case DPLL_ID_LCPLL_810:
1020 case DPLL_ID_LCPLL_1350:
1021 link_clock = 135000;
1023 case DPLL_ID_LCPLL_2700:
1024 link_clock = 270000;
1027 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1031 return link_clock * 2;
1035 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1036 struct intel_crtc *crtc)
1038 struct intel_crtc_state *crtc_state =
1039 intel_atomic_get_new_crtc_state(state, crtc);
1041 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1044 crtc_state->dpll_hw_state.spll =
1045 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1050 static struct intel_shared_dpll *
1051 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1052 struct intel_crtc *crtc)
1054 struct intel_crtc_state *crtc_state =
1055 intel_atomic_get_new_crtc_state(state, crtc);
1057 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1061 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1062 const struct intel_shared_dpll *pll,
1063 const struct intel_dpll_hw_state *pll_state)
1067 switch (pll_state->spll & SPLL_FREQ_MASK) {
1068 case SPLL_FREQ_810MHz:
1071 case SPLL_FREQ_1350MHz:
1072 link_clock = 135000;
1074 case SPLL_FREQ_2700MHz:
1075 link_clock = 270000;
1078 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1082 return link_clock * 2;
1085 static int hsw_compute_dpll(struct intel_atomic_state *state,
1086 struct intel_crtc *crtc,
1087 struct intel_encoder *encoder)
1089 struct intel_crtc_state *crtc_state =
1090 intel_atomic_get_new_crtc_state(state, crtc);
1092 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1093 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1094 else if (intel_crtc_has_dp_encoder(crtc_state))
1095 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1096 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1097 return hsw_ddi_spll_compute_dpll(state, crtc);
1102 static int hsw_get_dpll(struct intel_atomic_state *state,
1103 struct intel_crtc *crtc,
1104 struct intel_encoder *encoder)
1106 struct intel_crtc_state *crtc_state =
1107 intel_atomic_get_new_crtc_state(state, crtc);
1108 struct intel_shared_dpll *pll = NULL;
1110 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1111 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1112 else if (intel_crtc_has_dp_encoder(crtc_state))
1113 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1114 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1115 pll = hsw_ddi_spll_get_dpll(state, crtc);
1120 intel_reference_shared_dpll(state, crtc,
1121 pll, &crtc_state->dpll_hw_state);
1123 crtc_state->shared_dpll = pll;
1128 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1130 i915->display.dpll.ref_clks.ssc = 135000;
1131 /* Non-SSC is only used on non-ULT HSW. */
1132 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1133 i915->display.dpll.ref_clks.nssc = 24000;
1135 i915->display.dpll.ref_clks.nssc = 135000;
1138 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1139 const struct intel_dpll_hw_state *hw_state)
1141 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1142 hw_state->wrpll, hw_state->spll);
1145 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1146 .enable = hsw_ddi_wrpll_enable,
1147 .disable = hsw_ddi_wrpll_disable,
1148 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1149 .get_freq = hsw_ddi_wrpll_get_freq,
1152 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1153 .enable = hsw_ddi_spll_enable,
1154 .disable = hsw_ddi_spll_disable,
1155 .get_hw_state = hsw_ddi_spll_get_hw_state,
1156 .get_freq = hsw_ddi_spll_get_freq,
1159 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1160 struct intel_shared_dpll *pll)
1164 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1165 struct intel_shared_dpll *pll)
1169 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1170 struct intel_shared_dpll *pll,
1171 struct intel_dpll_hw_state *hw_state)
1176 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1177 .enable = hsw_ddi_lcpll_enable,
1178 .disable = hsw_ddi_lcpll_disable,
1179 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1180 .get_freq = hsw_ddi_lcpll_get_freq,
1183 static const struct dpll_info hsw_plls[] = {
1184 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1185 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1186 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1187 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1188 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1189 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1193 static const struct intel_dpll_mgr hsw_pll_mgr = {
1194 .dpll_info = hsw_plls,
1195 .compute_dplls = hsw_compute_dpll,
1196 .get_dplls = hsw_get_dpll,
1197 .put_dplls = intel_put_dpll,
1198 .update_ref_clks = hsw_update_dpll_ref_clks,
1199 .dump_hw_state = hsw_dump_hw_state,
1202 struct skl_dpll_regs {
1203 i915_reg_t ctl, cfgcr1, cfgcr2;
1206 /* this array is indexed by the *shared* pll id */
1207 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1211 /* DPLL 0 doesn't support HDMI mode */
1216 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1217 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1221 .ctl = WRPLL_CTL(0),
1222 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1223 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1227 .ctl = WRPLL_CTL(1),
1228 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1229 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1233 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1234 struct intel_shared_dpll *pll)
1236 const enum intel_dpll_id id = pll->info->id;
1238 intel_de_rmw(dev_priv, DPLL_CTRL1,
1239 DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1240 pll->state.hw_state.ctrl1 << (id * 6));
1241 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1244 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1245 struct intel_shared_dpll *pll)
1247 const struct skl_dpll_regs *regs = skl_dpll_regs;
1248 const enum intel_dpll_id id = pll->info->id;
1250 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1252 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1253 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1254 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1255 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1257 /* the enable bit is always bit 31 */
1258 intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1260 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1261 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1264 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1265 struct intel_shared_dpll *pll)
1267 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1270 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1271 struct intel_shared_dpll *pll)
1273 const struct skl_dpll_regs *regs = skl_dpll_regs;
1274 const enum intel_dpll_id id = pll->info->id;
1276 /* the enable bit is always bit 31 */
1277 intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1278 intel_de_posting_read(dev_priv, regs[id].ctl);
1281 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1282 struct intel_shared_dpll *pll)
1286 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1287 struct intel_shared_dpll *pll,
1288 struct intel_dpll_hw_state *hw_state)
1291 const struct skl_dpll_regs *regs = skl_dpll_regs;
1292 const enum intel_dpll_id id = pll->info->id;
1293 intel_wakeref_t wakeref;
1296 wakeref = intel_display_power_get_if_enabled(dev_priv,
1297 POWER_DOMAIN_DISPLAY_CORE);
1303 val = intel_de_read(dev_priv, regs[id].ctl);
1304 if (!(val & LCPLL_PLL_ENABLE))
1307 val = intel_de_read(dev_priv, DPLL_CTRL1);
1308 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1310 /* avoid reading back stale values if HDMI mode is not enabled */
1311 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1312 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1313 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1318 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1323 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1324 struct intel_shared_dpll *pll,
1325 struct intel_dpll_hw_state *hw_state)
1327 const struct skl_dpll_regs *regs = skl_dpll_regs;
1328 const enum intel_dpll_id id = pll->info->id;
1329 intel_wakeref_t wakeref;
1333 wakeref = intel_display_power_get_if_enabled(dev_priv,
1334 POWER_DOMAIN_DISPLAY_CORE);
1340 /* DPLL0 is always enabled since it drives CDCLK */
1341 val = intel_de_read(dev_priv, regs[id].ctl);
1342 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1345 val = intel_de_read(dev_priv, DPLL_CTRL1);
1346 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1351 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1356 struct skl_wrpll_context {
1357 u64 min_deviation; /* current minimal deviation */
1358 u64 central_freq; /* chosen central freq */
1359 u64 dco_freq; /* chosen dco freq */
1360 unsigned int p; /* chosen divider */
1363 /* DCO freq must be within +1%/-6% of the DCO central freq */
1364 #define SKL_DCO_MAX_PDEVIATION 100
1365 #define SKL_DCO_MAX_NDEVIATION 600
1367 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1370 unsigned int divider)
1374 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1377 /* positive deviation */
1378 if (dco_freq >= central_freq) {
1379 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1380 deviation < ctx->min_deviation) {
1381 ctx->min_deviation = deviation;
1382 ctx->central_freq = central_freq;
1383 ctx->dco_freq = dco_freq;
1386 /* negative deviation */
1387 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1388 deviation < ctx->min_deviation) {
1389 ctx->min_deviation = deviation;
1390 ctx->central_freq = central_freq;
1391 ctx->dco_freq = dco_freq;
1396 static void skl_wrpll_get_multipliers(unsigned int p,
1397 unsigned int *p0 /* out */,
1398 unsigned int *p1 /* out */,
1399 unsigned int *p2 /* out */)
1403 unsigned int half = p / 2;
1405 if (half == 1 || half == 2 || half == 3 || half == 5) {
1409 } else if (half % 2 == 0) {
1413 } else if (half % 3 == 0) {
1417 } else if (half % 7 == 0) {
1422 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1426 } else if (p == 5 || p == 7) {
1430 } else if (p == 15) {
1434 } else if (p == 21) {
1438 } else if (p == 35) {
1445 struct skl_wrpll_params {
1455 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1459 u32 p0, u32 p1, u32 p2)
1463 switch (central_freq) {
1465 params->central_freq = 0;
1468 params->central_freq = 1;
1471 params->central_freq = 3;
1488 WARN(1, "Incorrect PDiv\n");
1505 WARN(1, "Incorrect KDiv\n");
1508 params->qdiv_ratio = p1;
1509 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1511 dco_freq = p0 * p1 * p2 * afe_clock;
1514 * Intermediate values are in Hz.
1515 * Divide by MHz to match bsepc
1517 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1518 params->dco_fraction =
1519 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1520 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1524 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1526 struct skl_wrpll_params *wrpll_params)
1528 static const u64 dco_central_freq[3] = { 8400000000ULL,
1531 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1532 24, 28, 30, 32, 36, 40, 42, 44,
1533 48, 52, 54, 56, 60, 64, 66, 68,
1534 70, 72, 76, 78, 80, 84, 88, 90,
1536 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1537 static const struct {
1541 { even_dividers, ARRAY_SIZE(even_dividers) },
1542 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1544 struct skl_wrpll_context ctx = {
1545 .min_deviation = U64_MAX,
1547 unsigned int dco, d, i;
1548 unsigned int p0, p1, p2;
1549 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1551 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1552 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1553 for (i = 0; i < dividers[d].n_dividers; i++) {
1554 unsigned int p = dividers[d].list[i];
1555 u64 dco_freq = p * afe_clock;
1557 skl_wrpll_try_divider(&ctx,
1558 dco_central_freq[dco],
1562 * Skip the remaining dividers if we're sure to
1563 * have found the definitive divider, we can't
1564 * improve a 0 deviation.
1566 if (ctx.min_deviation == 0)
1567 goto skip_remaining_dividers;
1571 skip_remaining_dividers:
1573 * If a solution is found with an even divider, prefer
1576 if (d == 0 && ctx.p)
1584 * gcc incorrectly analyses that these can be used without being
1585 * initialized. To be fair, it's hard to guess.
1588 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1589 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1590 ctx.central_freq, p0, p1, p2);
1595 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1596 const struct intel_shared_dpll *pll,
1597 const struct intel_dpll_hw_state *pll_state)
1599 int ref_clock = i915->display.dpll.ref_clks.nssc;
1600 u32 p0, p1, p2, dco_freq;
1602 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1603 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1605 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1606 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1612 case DPLL_CFGCR2_PDIV_1:
1615 case DPLL_CFGCR2_PDIV_2:
1618 case DPLL_CFGCR2_PDIV_3:
1621 case DPLL_CFGCR2_PDIV_7_INVALID:
1623 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1624 * handling it the same way as PDIV_7.
1626 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1628 case DPLL_CFGCR2_PDIV_7:
1637 case DPLL_CFGCR2_KDIV_5:
1640 case DPLL_CFGCR2_KDIV_2:
1643 case DPLL_CFGCR2_KDIV_3:
1646 case DPLL_CFGCR2_KDIV_1:
1654 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1657 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1660 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1663 return dco_freq / (p0 * p1 * p2 * 5);
1666 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1668 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1669 struct skl_wrpll_params wrpll_params = {};
1670 u32 ctrl1, cfgcr1, cfgcr2;
1674 * See comment in intel_dpll_hw_state to understand why we always use 0
1675 * as the DPLL id in this function.
1677 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1679 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1681 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1682 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1686 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1687 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1688 wrpll_params.dco_integer;
1690 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1691 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1692 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1693 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1694 wrpll_params.central_freq;
1696 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1697 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1698 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1700 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1701 &crtc_state->dpll_hw_state);
1707 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1712 * See comment in intel_dpll_hw_state to understand why we always use 0
1713 * as the DPLL id in this function.
1715 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1716 switch (crtc_state->port_clock / 2) {
1718 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1721 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1724 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1728 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1731 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1734 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1738 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1743 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1744 const struct intel_shared_dpll *pll,
1745 const struct intel_dpll_hw_state *pll_state)
1749 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1750 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1751 case DPLL_CTRL1_LINK_RATE_810:
1754 case DPLL_CTRL1_LINK_RATE_1080:
1755 link_clock = 108000;
1757 case DPLL_CTRL1_LINK_RATE_1350:
1758 link_clock = 135000;
1760 case DPLL_CTRL1_LINK_RATE_1620:
1761 link_clock = 162000;
1763 case DPLL_CTRL1_LINK_RATE_2160:
1764 link_clock = 216000;
1766 case DPLL_CTRL1_LINK_RATE_2700:
1767 link_clock = 270000;
1770 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1774 return link_clock * 2;
1777 static int skl_compute_dpll(struct intel_atomic_state *state,
1778 struct intel_crtc *crtc,
1779 struct intel_encoder *encoder)
1781 struct intel_crtc_state *crtc_state =
1782 intel_atomic_get_new_crtc_state(state, crtc);
1784 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1785 return skl_ddi_hdmi_pll_dividers(crtc_state);
1786 else if (intel_crtc_has_dp_encoder(crtc_state))
1787 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1792 static int skl_get_dpll(struct intel_atomic_state *state,
1793 struct intel_crtc *crtc,
1794 struct intel_encoder *encoder)
1796 struct intel_crtc_state *crtc_state =
1797 intel_atomic_get_new_crtc_state(state, crtc);
1798 struct intel_shared_dpll *pll;
1800 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1801 pll = intel_find_shared_dpll(state, crtc,
1802 &crtc_state->dpll_hw_state,
1803 BIT(DPLL_ID_SKL_DPLL0));
1805 pll = intel_find_shared_dpll(state, crtc,
1806 &crtc_state->dpll_hw_state,
1807 BIT(DPLL_ID_SKL_DPLL3) |
1808 BIT(DPLL_ID_SKL_DPLL2) |
1809 BIT(DPLL_ID_SKL_DPLL1));
1813 intel_reference_shared_dpll(state, crtc,
1814 pll, &crtc_state->dpll_hw_state);
1816 crtc_state->shared_dpll = pll;
1821 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1822 const struct intel_shared_dpll *pll,
1823 const struct intel_dpll_hw_state *pll_state)
1826 * ctrl1 register is already shifted for each pll, just use 0 to get
1827 * the internal shift for each field
1829 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1830 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1832 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1835 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1838 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1841 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1842 const struct intel_dpll_hw_state *hw_state)
1844 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1845 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1851 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1852 .enable = skl_ddi_pll_enable,
1853 .disable = skl_ddi_pll_disable,
1854 .get_hw_state = skl_ddi_pll_get_hw_state,
1855 .get_freq = skl_ddi_pll_get_freq,
1858 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1859 .enable = skl_ddi_dpll0_enable,
1860 .disable = skl_ddi_dpll0_disable,
1861 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1862 .get_freq = skl_ddi_pll_get_freq,
1865 static const struct dpll_info skl_plls[] = {
1866 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1867 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1868 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1869 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1873 static const struct intel_dpll_mgr skl_pll_mgr = {
1874 .dpll_info = skl_plls,
1875 .compute_dplls = skl_compute_dpll,
1876 .get_dplls = skl_get_dpll,
1877 .put_dplls = intel_put_dpll,
1878 .update_ref_clks = skl_update_dpll_ref_clks,
1879 .dump_hw_state = skl_dump_hw_state,
1882 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1883 struct intel_shared_dpll *pll)
1886 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1888 enum dpio_channel ch;
1890 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1892 /* Non-SSC reference */
1893 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1895 if (IS_GEMINILAKE(dev_priv)) {
1896 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1897 0, PORT_PLL_POWER_ENABLE);
1899 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1900 PORT_PLL_POWER_STATE), 200))
1901 drm_err(&dev_priv->drm,
1902 "Power state not set for PLL:%d\n", port);
1905 /* Disable 10 bit clock */
1906 intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1907 PORT_PLL_10BIT_CLK_ENABLE, 0);
1910 intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1911 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1913 /* Write M2 integer */
1914 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1915 PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1918 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1919 PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1921 /* Write M2 fraction */
1922 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1923 PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1925 /* Write M2 fraction enable */
1926 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1927 PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1930 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1931 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1932 temp &= ~PORT_PLL_INT_COEFF_MASK;
1933 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1934 temp |= pll->state.hw_state.pll6;
1935 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1937 /* Write calibration val */
1938 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1939 PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1941 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1942 PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1944 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1945 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1946 temp &= ~PORT_PLL_DCO_AMP_MASK;
1947 temp |= pll->state.hw_state.pll10;
1948 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1950 /* Recalibrate with new settings */
1951 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1952 temp |= PORT_PLL_RECALIBRATE;
1953 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1954 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1955 temp |= pll->state.hw_state.ebb4;
1956 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1959 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1960 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1962 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1964 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1966 if (IS_GEMINILAKE(dev_priv)) {
1967 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1968 temp |= DCC_DELAY_RANGE_2;
1969 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1973 * While we write to the group register to program all lanes at once we
1974 * can read only lane registers and we pick lanes 0/1 for that.
1976 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1977 temp &= ~LANE_STAGGER_MASK;
1978 temp &= ~LANESTAGGER_STRAP_OVRD;
1979 temp |= pll->state.hw_state.pcsdw12;
1980 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1983 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1984 struct intel_shared_dpll *pll)
1986 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1988 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
1989 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1991 if (IS_GEMINILAKE(dev_priv)) {
1992 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1993 PORT_PLL_POWER_ENABLE, 0);
1995 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1996 PORT_PLL_POWER_STATE), 200))
1997 drm_err(&dev_priv->drm,
1998 "Power state not reset for PLL:%d\n", port);
2002 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2003 struct intel_shared_dpll *pll,
2004 struct intel_dpll_hw_state *hw_state)
2006 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2007 intel_wakeref_t wakeref;
2009 enum dpio_channel ch;
2013 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2015 wakeref = intel_display_power_get_if_enabled(dev_priv,
2016 POWER_DOMAIN_DISPLAY_CORE);
2022 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2023 if (!(val & PORT_PLL_ENABLE))
2026 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2027 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2029 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2030 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2032 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2033 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2035 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2036 hw_state->pll1 &= PORT_PLL_N_MASK;
2038 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2039 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2041 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2042 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2044 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2045 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2046 PORT_PLL_INT_COEFF_MASK |
2047 PORT_PLL_GAIN_CTL_MASK;
2049 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2050 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2052 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2053 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2055 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2056 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2057 PORT_PLL_DCO_AMP_MASK;
2060 * While we write to the group register to program all lanes at once we
2061 * can read only lane registers. We configure all lanes the same way, so
2062 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2064 hw_state->pcsdw12 = intel_de_read(dev_priv,
2065 BXT_PORT_PCS_DW12_LN01(phy, ch));
2066 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2067 drm_dbg(&dev_priv->drm,
2068 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2070 intel_de_read(dev_priv,
2071 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2072 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2077 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2082 /* pre-calculated values for DP linkrates */
2083 static const struct dpll bxt_dp_clk_val[] = {
2084 /* m2 is .22 binary fixed point */
2085 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2086 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2087 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2088 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2089 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2090 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2091 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2095 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2096 struct dpll *clk_div)
2098 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2100 /* Calculate HDMI div */
2102 * FIXME: tie the following calculation into
2103 * i9xx_crtc_compute_clock
2105 if (!bxt_find_best_dpll(crtc_state, clk_div))
2108 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2113 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2114 struct dpll *clk_div)
2116 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2119 *clk_div = bxt_dp_clk_val[0];
2120 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2121 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2122 *clk_div = bxt_dp_clk_val[i];
2127 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2129 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2130 clk_div->dot != crtc_state->port_clock);
2133 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2134 const struct dpll *clk_div)
2136 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2137 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2138 int clock = crtc_state->port_clock;
2139 int vco = clk_div->vco;
2140 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2143 if (vco >= 6200000 && vco <= 6700000) {
2148 } else if ((vco > 5400000 && vco < 6200000) ||
2149 (vco >= 4800000 && vco < 5400000)) {
2154 } else if (vco == 5400000) {
2160 drm_err(&i915->drm, "Invalid VCO\n");
2166 else if (clock > 135000)
2168 else if (clock > 67000)
2170 else if (clock > 33000)
2175 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2176 dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2177 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2178 dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2180 if (clk_div->m2 & 0x3fffff)
2181 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2183 dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2184 PORT_PLL_INT_COEFF(int_coef) |
2185 PORT_PLL_GAIN_CTL(gain_ctl);
2187 dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2189 dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2191 dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2192 PORT_PLL_DCO_AMP_OVR_EN_H;
2194 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2196 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2201 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2202 const struct intel_shared_dpll *pll,
2203 const struct intel_dpll_hw_state *pll_state)
2208 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2209 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2210 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2211 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2212 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2213 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2215 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2219 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2221 struct dpll clk_div = {};
2223 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2225 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2229 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2231 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2232 struct dpll clk_div = {};
2235 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2237 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2241 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2242 &crtc_state->dpll_hw_state);
2247 static int bxt_compute_dpll(struct intel_atomic_state *state,
2248 struct intel_crtc *crtc,
2249 struct intel_encoder *encoder)
2251 struct intel_crtc_state *crtc_state =
2252 intel_atomic_get_new_crtc_state(state, crtc);
2254 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2255 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2256 else if (intel_crtc_has_dp_encoder(crtc_state))
2257 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2262 static int bxt_get_dpll(struct intel_atomic_state *state,
2263 struct intel_crtc *crtc,
2264 struct intel_encoder *encoder)
2266 struct intel_crtc_state *crtc_state =
2267 intel_atomic_get_new_crtc_state(state, crtc);
2268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2269 struct intel_shared_dpll *pll;
2270 enum intel_dpll_id id;
2272 /* 1:1 mapping between ports and PLLs */
2273 id = (enum intel_dpll_id) encoder->port;
2274 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2276 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2277 crtc->base.base.id, crtc->base.name, pll->info->name);
2279 intel_reference_shared_dpll(state, crtc,
2280 pll, &crtc_state->dpll_hw_state);
2282 crtc_state->shared_dpll = pll;
2287 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2289 i915->display.dpll.ref_clks.ssc = 100000;
2290 i915->display.dpll.ref_clks.nssc = 100000;
2291 /* DSI non-SSC ref 19.2MHz */
2294 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2295 const struct intel_dpll_hw_state *hw_state)
2297 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2298 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2299 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2313 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2314 .enable = bxt_ddi_pll_enable,
2315 .disable = bxt_ddi_pll_disable,
2316 .get_hw_state = bxt_ddi_pll_get_hw_state,
2317 .get_freq = bxt_ddi_pll_get_freq,
2320 static const struct dpll_info bxt_plls[] = {
2321 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2322 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2323 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2327 static const struct intel_dpll_mgr bxt_pll_mgr = {
2328 .dpll_info = bxt_plls,
2329 .compute_dplls = bxt_compute_dpll,
2330 .get_dplls = bxt_get_dpll,
2331 .put_dplls = intel_put_dpll,
2332 .update_ref_clks = bxt_update_dpll_ref_clks,
2333 .dump_hw_state = bxt_dump_hw_state,
2336 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2337 int *qdiv, int *kdiv)
2340 if (bestdiv % 2 == 0) {
2345 } else if (bestdiv % 4 == 0) {
2347 *qdiv = bestdiv / 4;
2349 } else if (bestdiv % 6 == 0) {
2351 *qdiv = bestdiv / 6;
2353 } else if (bestdiv % 5 == 0) {
2355 *qdiv = bestdiv / 10;
2357 } else if (bestdiv % 14 == 0) {
2359 *qdiv = bestdiv / 14;
2363 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2367 } else { /* 9, 15, 21 */
2368 *pdiv = bestdiv / 3;
2375 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2376 u32 dco_freq, u32 ref_freq,
2377 int pdiv, int qdiv, int kdiv)
2392 WARN(1, "Incorrect KDiv\n");
2409 WARN(1, "Incorrect PDiv\n");
2412 WARN_ON(kdiv != 2 && qdiv != 1);
2414 params->qdiv_ratio = qdiv;
2415 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2417 dco = div_u64((u64)dco_freq << 15, ref_freq);
2419 params->dco_integer = dco >> 15;
2420 params->dco_fraction = dco & 0x7fff;
2424 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2425 * Program half of the nominal DCO divider fraction value.
2428 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2430 return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2431 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2432 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2433 i915->display.dpll.ref_clks.nssc == 38400;
2436 struct icl_combo_pll_params {
2438 struct skl_wrpll_params wrpll;
2442 * These values alrea already adjusted: they're the bits we write to the
2443 * registers, not the logical values.
2445 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2447 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2448 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2450 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2451 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2453 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2454 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2456 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2457 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2459 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2460 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2462 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2463 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2465 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2466 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2468 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2469 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2473 /* Also used for 38.4 MHz values. */
2474 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2476 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2477 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2479 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2480 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2482 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2483 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2486 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2489 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2491 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2492 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2495 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2498 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2501 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2502 .dco_integer = 0x151, .dco_fraction = 0x4000,
2503 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2506 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2507 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2508 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2511 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2512 .dco_integer = 0x54, .dco_fraction = 0x3000,
2513 /* the following params are unused */
2514 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2517 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2518 .dco_integer = 0x43, .dco_fraction = 0x4000,
2519 /* the following params are unused */
2522 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2523 struct skl_wrpll_params *pll_params)
2525 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2526 const struct icl_combo_pll_params *params =
2527 dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2528 icl_dp_combo_pll_24MHz_values :
2529 icl_dp_combo_pll_19_2MHz_values;
2530 int clock = crtc_state->port_clock;
2533 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2534 if (clock == params[i].clock) {
2535 *pll_params = params[i].wrpll;
2540 MISSING_CASE(clock);
2544 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2545 struct skl_wrpll_params *pll_params)
2547 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2549 if (DISPLAY_VER(dev_priv) >= 12) {
2550 switch (dev_priv->display.dpll.ref_clks.nssc) {
2552 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2556 *pll_params = tgl_tbt_pll_19_2MHz_values;
2559 *pll_params = tgl_tbt_pll_24MHz_values;
2563 switch (dev_priv->display.dpll.ref_clks.nssc) {
2565 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2569 *pll_params = icl_tbt_pll_19_2MHz_values;
2572 *pll_params = icl_tbt_pll_24MHz_values;
2580 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2581 const struct intel_shared_dpll *pll,
2582 const struct intel_dpll_hw_state *pll_state)
2585 * The PLL outputs multiple frequencies at the same time, selection is
2586 * made at DDI clock mux level.
2588 drm_WARN_ON(&i915->drm, 1);
2593 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2595 int ref_clock = i915->display.dpll.ref_clks.nssc;
2598 * For ICL+, the spec states: if reference frequency is 38.4,
2599 * use 19.2 because the DPLL automatically divides that by 2.
2601 if (ref_clock == 38400)
2608 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2609 struct skl_wrpll_params *wrpll_params)
2611 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2612 int ref_clock = icl_wrpll_ref_clock(i915);
2613 u32 afe_clock = crtc_state->port_clock * 5;
2614 u32 dco_min = 7998000;
2615 u32 dco_max = 10000000;
2616 u32 dco_mid = (dco_min + dco_max) / 2;
2617 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2618 18, 20, 24, 28, 30, 32, 36, 40,
2619 42, 44, 48, 50, 52, 54, 56, 60,
2620 64, 66, 68, 70, 72, 76, 78, 80,
2621 84, 88, 90, 92, 96, 98, 100, 102,
2622 3, 5, 7, 9, 15, 21 };
2623 u32 dco, best_dco = 0, dco_centrality = 0;
2624 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2625 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2627 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2628 dco = afe_clock * dividers[d];
2630 if (dco <= dco_max && dco >= dco_min) {
2631 dco_centrality = abs(dco - dco_mid);
2633 if (dco_centrality < best_dco_centrality) {
2634 best_dco_centrality = dco_centrality;
2635 best_div = dividers[d];
2644 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2645 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2651 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2652 const struct intel_shared_dpll *pll,
2653 const struct intel_dpll_hw_state *pll_state)
2655 int ref_clock = icl_wrpll_ref_clock(i915);
2657 u32 p0, p1, p2, dco_freq;
2659 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2660 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2662 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2663 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2664 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2669 case DPLL_CFGCR1_PDIV_2:
2672 case DPLL_CFGCR1_PDIV_3:
2675 case DPLL_CFGCR1_PDIV_5:
2678 case DPLL_CFGCR1_PDIV_7:
2684 case DPLL_CFGCR1_KDIV_1:
2687 case DPLL_CFGCR1_KDIV_2:
2690 case DPLL_CFGCR1_KDIV_3:
2695 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2698 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2699 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2701 if (ehl_combo_pll_div_frac_wa_needed(i915))
2704 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2706 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2709 return dco_freq / (p0 * p1 * p2 * 5);
2712 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2713 const struct skl_wrpll_params *pll_params,
2714 struct intel_dpll_hw_state *pll_state)
2716 u32 dco_fraction = pll_params->dco_fraction;
2718 if (ehl_combo_pll_div_frac_wa_needed(i915))
2719 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2721 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2722 pll_params->dco_integer;
2724 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2725 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2726 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2727 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2729 if (DISPLAY_VER(i915) >= 12)
2730 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2732 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2734 if (i915->display.vbt.override_afc_startup)
2735 pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2738 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2739 u32 *target_dco_khz,
2740 struct intel_dpll_hw_state *state,
2743 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2744 u32 dco_min_freq, dco_max_freq;
2748 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2749 dco_max_freq = is_dp ? 8100000 : 10000000;
2751 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2752 int div1 = div1_vals[i];
2754 for (div2 = 10; div2 > 0; div2--) {
2755 int dco = div1 * div2 * clock_khz * 5;
2756 int a_divratio, tlinedrv, inputsel;
2759 if (dco < dco_min_freq || dco > dco_max_freq)
2764 * Note: a_divratio not matching TGL BSpec
2765 * algorithm but matching hardcoded values and
2766 * working on HW for DP alt-mode at least
2768 a_divratio = is_dp ? 10 : 5;
2769 tlinedrv = is_dkl ? 1 : 2;
2774 inputsel = is_dp ? 0 : 1;
2781 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2784 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2787 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2790 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2794 *target_dco_khz = dco;
2796 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2798 state->mg_clktop2_coreclkctl1 =
2799 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2801 state->mg_clktop2_hsclkctl =
2802 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2803 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2805 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2815 * The specification for this function uses real numbers, so the math had to be
2816 * adapted to integer-only calculation, that's why it looks so different.
2818 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2819 struct intel_dpll_hw_state *pll_state)
2821 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2822 int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2823 int clock = crtc_state->port_clock;
2824 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2825 u32 iref_ndiv, iref_trim, iref_pulse_w;
2826 u32 prop_coeff, int_coeff;
2827 u32 tdc_targetcnt, feedfwgain;
2828 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2830 bool use_ssc = false;
2831 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2832 bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2835 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2841 m2div_int = dco_khz / (refclk_khz * m1div);
2842 if (m2div_int > 255) {
2845 m2div_int = dco_khz / (refclk_khz * m1div);
2848 if (m2div_int > 255)
2851 m2div_rem = dco_khz % (refclk_khz * m1div);
2853 tmp = (u64)m2div_rem * (1 << 22);
2854 do_div(tmp, refclk_khz * m1div);
2857 switch (refclk_khz) {
2874 MISSING_CASE(refclk_khz);
2879 * tdc_res = 0.000003
2880 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2882 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2883 * was supposed to be a division, but we rearranged the operations of
2884 * the formula to avoid early divisions so we don't multiply the
2887 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2888 * we also rearrange to work with integers.
2890 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2891 * last division by 10.
2893 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2896 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2897 * 32 bits. That's not a problem since we round the division down
2900 feedfwgain = (use_ssc || m2div_rem > 0) ?
2901 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2903 if (dco_khz >= 9000000) {
2912 tmp = mul_u32_u32(dco_khz, 47 * 32);
2913 do_div(tmp, refclk_khz * m1div * 10000);
2916 tmp = mul_u32_u32(dco_khz, 1000);
2917 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2924 /* write pll_state calculations */
2926 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2927 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2928 DKL_PLL_DIV0_FBPREDIV(m1div) |
2929 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2930 if (dev_priv->display.vbt.override_afc_startup) {
2931 u8 val = dev_priv->display.vbt.override_afc_startup_val;
2933 pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2936 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2937 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2939 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2940 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2941 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2942 (use_ssc ? DKL_PLL_SSC_EN : 0);
2944 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2945 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2947 pll_state->mg_pll_tdc_coldst_bias =
2948 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2949 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2952 pll_state->mg_pll_div0 =
2953 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2954 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2955 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2957 pll_state->mg_pll_div1 =
2958 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2959 MG_PLL_DIV1_DITHER_DIV_2 |
2960 MG_PLL_DIV1_NDIVRATIO(1) |
2961 MG_PLL_DIV1_FBPREDIV(m1div);
2963 pll_state->mg_pll_lf =
2964 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2965 MG_PLL_LF_AFCCNTSEL_512 |
2966 MG_PLL_LF_GAINCTRL(1) |
2967 MG_PLL_LF_INT_COEFF(int_coeff) |
2968 MG_PLL_LF_PROP_COEFF(prop_coeff);
2970 pll_state->mg_pll_frac_lock =
2971 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2972 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2973 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2974 MG_PLL_FRAC_LOCK_DCODITHEREN |
2975 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2976 if (use_ssc || m2div_rem > 0)
2977 pll_state->mg_pll_frac_lock |=
2978 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2980 pll_state->mg_pll_ssc =
2981 (use_ssc ? MG_PLL_SSC_EN : 0) |
2982 MG_PLL_SSC_TYPE(2) |
2983 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2984 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2986 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2988 pll_state->mg_pll_tdc_coldst_bias =
2989 MG_PLL_TDC_COLDST_COLDSTART |
2990 MG_PLL_TDC_COLDST_IREFINT_EN |
2991 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2992 MG_PLL_TDC_TDCOVCCORR_EN |
2993 MG_PLL_TDC_TDCSEL(3);
2995 pll_state->mg_pll_bias =
2996 MG_PLL_BIAS_BIAS_GB_SEL(3) |
2997 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2998 MG_PLL_BIAS_BIAS_BONUS(10) |
2999 MG_PLL_BIAS_BIASCAL_EN |
3000 MG_PLL_BIAS_CTRIM(12) |
3001 MG_PLL_BIAS_VREF_RDAC(4) |
3002 MG_PLL_BIAS_IREFTRIM(iref_trim);
3004 if (refclk_khz == 38400) {
3005 pll_state->mg_pll_tdc_coldst_bias_mask =
3006 MG_PLL_TDC_COLDST_COLDSTART;
3007 pll_state->mg_pll_bias_mask = 0;
3009 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3010 pll_state->mg_pll_bias_mask = -1U;
3013 pll_state->mg_pll_tdc_coldst_bias &=
3014 pll_state->mg_pll_tdc_coldst_bias_mask;
3015 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3021 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3022 const struct intel_shared_dpll *pll,
3023 const struct intel_dpll_hw_state *pll_state)
3025 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3028 ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3030 if (DISPLAY_VER(dev_priv) >= 12) {
3031 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3032 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3033 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3035 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3036 m2_frac = pll_state->mg_pll_bias &
3037 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3038 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3043 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3044 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3046 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3047 m2_frac = pll_state->mg_pll_div0 &
3048 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3049 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3055 switch (pll_state->mg_clktop2_hsclkctl &
3056 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3057 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3060 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3063 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3066 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3070 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3074 div2 = (pll_state->mg_clktop2_hsclkctl &
3075 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3076 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3078 /* div2 value of 0 is same as 1 means no div */
3083 * Adjust the original formula to delay the division by 2^22 in order to
3084 * minimize possible rounding errors.
3086 tmp = (u64)m1 * m2_int * ref_clock +
3087 (((u64)m1 * m2_frac * ref_clock) >> 22);
3088 tmp = div_u64(tmp, 5 * div1 * div2);
3094 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3095 * @crtc_state: state for the CRTC to select the DPLL for
3096 * @port_dpll_id: the active @port_dpll_id to select
3098 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3101 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3102 enum icl_port_dpll_id port_dpll_id)
3104 struct icl_port_dpll *port_dpll =
3105 &crtc_state->icl_port_dplls[port_dpll_id];
3107 crtc_state->shared_dpll = port_dpll->pll;
3108 crtc_state->dpll_hw_state = port_dpll->hw_state;
3111 static void icl_update_active_dpll(struct intel_atomic_state *state,
3112 struct intel_crtc *crtc,
3113 struct intel_encoder *encoder)
3115 struct intel_crtc_state *crtc_state =
3116 intel_atomic_get_new_crtc_state(state, crtc);
3117 struct intel_digital_port *primary_port;
3118 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3120 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3121 enc_to_mst(encoder)->primary :
3122 enc_to_dig_port(encoder);
3125 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3126 intel_tc_port_in_legacy_mode(primary_port)))
3127 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3129 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3132 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3133 struct intel_crtc *crtc)
3135 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3136 struct intel_crtc_state *crtc_state =
3137 intel_atomic_get_new_crtc_state(state, crtc);
3138 struct icl_port_dpll *port_dpll =
3139 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3140 struct skl_wrpll_params pll_params = {};
3143 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3144 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3145 ret = icl_calc_wrpll(crtc_state, &pll_params);
3147 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3152 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3154 /* this is mainly for the fastset check */
3155 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3157 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3158 &port_dpll->hw_state);
3163 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3164 struct intel_crtc *crtc,
3165 struct intel_encoder *encoder)
3167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3168 struct intel_crtc_state *crtc_state =
3169 intel_atomic_get_new_crtc_state(state, crtc);
3170 struct icl_port_dpll *port_dpll =
3171 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3172 enum port port = encoder->port;
3173 unsigned long dpll_mask;
3175 if (IS_ALDERLAKE_S(dev_priv)) {
3177 BIT(DPLL_ID_DG1_DPLL3) |
3178 BIT(DPLL_ID_DG1_DPLL2) |
3179 BIT(DPLL_ID_ICL_DPLL1) |
3180 BIT(DPLL_ID_ICL_DPLL0);
3181 } else if (IS_DG1(dev_priv)) {
3182 if (port == PORT_D || port == PORT_E) {
3184 BIT(DPLL_ID_DG1_DPLL2) |
3185 BIT(DPLL_ID_DG1_DPLL3);
3188 BIT(DPLL_ID_DG1_DPLL0) |
3189 BIT(DPLL_ID_DG1_DPLL1);
3191 } else if (IS_ROCKETLAKE(dev_priv)) {
3193 BIT(DPLL_ID_EHL_DPLL4) |
3194 BIT(DPLL_ID_ICL_DPLL1) |
3195 BIT(DPLL_ID_ICL_DPLL0);
3196 } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3198 BIT(DPLL_ID_EHL_DPLL4) |
3199 BIT(DPLL_ID_ICL_DPLL1) |
3200 BIT(DPLL_ID_ICL_DPLL0);
3202 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3205 /* Eliminate DPLLs from consideration if reserved by HTI */
3206 dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3208 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3209 &port_dpll->hw_state,
3211 if (!port_dpll->pll)
3214 intel_reference_shared_dpll(state, crtc,
3215 port_dpll->pll, &port_dpll->hw_state);
3217 icl_update_active_dpll(state, crtc, encoder);
3222 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3223 struct intel_crtc *crtc)
3225 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3226 struct intel_crtc_state *crtc_state =
3227 intel_atomic_get_new_crtc_state(state, crtc);
3228 struct icl_port_dpll *port_dpll =
3229 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3230 struct skl_wrpll_params pll_params = {};
3233 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3234 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3238 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3240 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3241 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3245 /* this is mainly for the fastset check */
3246 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3248 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3249 &port_dpll->hw_state);
3254 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3255 struct intel_crtc *crtc,
3256 struct intel_encoder *encoder)
3258 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3259 struct intel_crtc_state *crtc_state =
3260 intel_atomic_get_new_crtc_state(state, crtc);
3261 struct icl_port_dpll *port_dpll =
3262 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3263 enum intel_dpll_id dpll_id;
3266 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3267 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3268 &port_dpll->hw_state,
3269 BIT(DPLL_ID_ICL_TBTPLL));
3270 if (!port_dpll->pll)
3272 intel_reference_shared_dpll(state, crtc,
3273 port_dpll->pll, &port_dpll->hw_state);
3276 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3277 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3279 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3280 &port_dpll->hw_state,
3282 if (!port_dpll->pll) {
3284 goto err_unreference_tbt_pll;
3286 intel_reference_shared_dpll(state, crtc,
3287 port_dpll->pll, &port_dpll->hw_state);
3289 icl_update_active_dpll(state, crtc, encoder);
3293 err_unreference_tbt_pll:
3294 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3295 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3300 static int icl_compute_dplls(struct intel_atomic_state *state,
3301 struct intel_crtc *crtc,
3302 struct intel_encoder *encoder)
3304 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3305 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3307 if (intel_phy_is_combo(dev_priv, phy))
3308 return icl_compute_combo_phy_dpll(state, crtc);
3309 else if (intel_phy_is_tc(dev_priv, phy))
3310 return icl_compute_tc_phy_dplls(state, crtc);
3317 static int icl_get_dplls(struct intel_atomic_state *state,
3318 struct intel_crtc *crtc,
3319 struct intel_encoder *encoder)
3321 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3322 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3324 if (intel_phy_is_combo(dev_priv, phy))
3325 return icl_get_combo_phy_dpll(state, crtc, encoder);
3326 else if (intel_phy_is_tc(dev_priv, phy))
3327 return icl_get_tc_phy_dplls(state, crtc, encoder);
3334 static void icl_put_dplls(struct intel_atomic_state *state,
3335 struct intel_crtc *crtc)
3337 const struct intel_crtc_state *old_crtc_state =
3338 intel_atomic_get_old_crtc_state(state, crtc);
3339 struct intel_crtc_state *new_crtc_state =
3340 intel_atomic_get_new_crtc_state(state, crtc);
3341 enum icl_port_dpll_id id;
3343 new_crtc_state->shared_dpll = NULL;
3345 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3346 const struct icl_port_dpll *old_port_dpll =
3347 &old_crtc_state->icl_port_dplls[id];
3348 struct icl_port_dpll *new_port_dpll =
3349 &new_crtc_state->icl_port_dplls[id];
3351 new_port_dpll->pll = NULL;
3353 if (!old_port_dpll->pll)
3356 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3360 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3361 struct intel_shared_dpll *pll,
3362 struct intel_dpll_hw_state *hw_state)
3364 const enum intel_dpll_id id = pll->info->id;
3365 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3366 intel_wakeref_t wakeref;
3370 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3372 wakeref = intel_display_power_get_if_enabled(dev_priv,
3373 POWER_DOMAIN_DISPLAY_CORE);
3377 val = intel_de_read(dev_priv, enable_reg);
3378 if (!(val & PLL_ENABLE))
3381 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3382 MG_REFCLKIN_CTL(tc_port));
3383 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3385 hw_state->mg_clktop2_coreclkctl1 =
3386 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3387 hw_state->mg_clktop2_coreclkctl1 &=
3388 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3390 hw_state->mg_clktop2_hsclkctl =
3391 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3392 hw_state->mg_clktop2_hsclkctl &=
3393 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3394 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3395 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3396 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3398 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3399 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3400 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3401 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3402 MG_PLL_FRAC_LOCK(tc_port));
3403 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3405 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3406 hw_state->mg_pll_tdc_coldst_bias =
3407 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3409 if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3410 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3411 hw_state->mg_pll_bias_mask = 0;
3413 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3414 hw_state->mg_pll_bias_mask = -1U;
3417 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3418 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3422 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3426 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3427 struct intel_shared_dpll *pll,
3428 struct intel_dpll_hw_state *hw_state)
3430 const enum intel_dpll_id id = pll->info->id;
3431 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3432 intel_wakeref_t wakeref;
3436 wakeref = intel_display_power_get_if_enabled(dev_priv,
3437 POWER_DOMAIN_DISPLAY_CORE);
3441 val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3442 if (!(val & PLL_ENABLE))
3446 * All registers read here have the same HIP_INDEX_REG even though
3447 * they are on different building blocks
3449 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3450 DKL_REFCLKIN_CTL(tc_port));
3451 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3453 hw_state->mg_clktop2_hsclkctl =
3454 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3455 hw_state->mg_clktop2_hsclkctl &=
3456 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3457 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3458 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3459 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3461 hw_state->mg_clktop2_coreclkctl1 =
3462 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3463 hw_state->mg_clktop2_coreclkctl1 &=
3464 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3466 hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3467 val = DKL_PLL_DIV0_MASK;
3468 if (dev_priv->display.vbt.override_afc_startup)
3469 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3470 hw_state->mg_pll_div0 &= val;
3472 hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3473 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3474 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3476 hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3477 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3478 DKL_PLL_SSC_STEP_LEN_MASK |
3479 DKL_PLL_SSC_STEP_NUM_MASK |
3482 hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3483 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3484 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3486 hw_state->mg_pll_tdc_coldst_bias =
3487 intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3488 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3489 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3493 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3497 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3498 struct intel_shared_dpll *pll,
3499 struct intel_dpll_hw_state *hw_state,
3500 i915_reg_t enable_reg)
3502 const enum intel_dpll_id id = pll->info->id;
3503 intel_wakeref_t wakeref;
3507 wakeref = intel_display_power_get_if_enabled(dev_priv,
3508 POWER_DOMAIN_DISPLAY_CORE);
3512 val = intel_de_read(dev_priv, enable_reg);
3513 if (!(val & PLL_ENABLE))
3516 if (IS_ALDERLAKE_S(dev_priv)) {
3517 hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3518 hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3519 } else if (IS_DG1(dev_priv)) {
3520 hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3521 hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3522 } else if (IS_ROCKETLAKE(dev_priv)) {
3523 hw_state->cfgcr0 = intel_de_read(dev_priv,
3524 RKL_DPLL_CFGCR0(id));
3525 hw_state->cfgcr1 = intel_de_read(dev_priv,
3526 RKL_DPLL_CFGCR1(id));
3527 } else if (DISPLAY_VER(dev_priv) >= 12) {
3528 hw_state->cfgcr0 = intel_de_read(dev_priv,
3529 TGL_DPLL_CFGCR0(id));
3530 hw_state->cfgcr1 = intel_de_read(dev_priv,
3531 TGL_DPLL_CFGCR1(id));
3532 if (dev_priv->display.vbt.override_afc_startup) {
3533 hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3534 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3537 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3538 hw_state->cfgcr0 = intel_de_read(dev_priv,
3539 ICL_DPLL_CFGCR0(4));
3540 hw_state->cfgcr1 = intel_de_read(dev_priv,
3541 ICL_DPLL_CFGCR1(4));
3543 hw_state->cfgcr0 = intel_de_read(dev_priv,
3544 ICL_DPLL_CFGCR0(id));
3545 hw_state->cfgcr1 = intel_de_read(dev_priv,
3546 ICL_DPLL_CFGCR1(id));
3552 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3556 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3557 struct intel_shared_dpll *pll,
3558 struct intel_dpll_hw_state *hw_state)
3560 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3562 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3565 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3566 struct intel_shared_dpll *pll,
3567 struct intel_dpll_hw_state *hw_state)
3569 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3572 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3573 struct intel_shared_dpll *pll)
3575 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3576 const enum intel_dpll_id id = pll->info->id;
3577 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3579 if (IS_ALDERLAKE_S(dev_priv)) {
3580 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3581 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3582 } else if (IS_DG1(dev_priv)) {
3583 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3584 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3585 } else if (IS_ROCKETLAKE(dev_priv)) {
3586 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3587 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3588 } else if (DISPLAY_VER(dev_priv) >= 12) {
3589 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3590 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3591 div0_reg = TGL_DPLL0_DIV0(id);
3593 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3594 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3595 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3597 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3598 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3602 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3603 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3604 drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3605 !i915_mmio_reg_valid(div0_reg));
3606 if (dev_priv->display.vbt.override_afc_startup &&
3607 i915_mmio_reg_valid(div0_reg))
3608 intel_de_rmw(dev_priv, div0_reg,
3609 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3610 intel_de_posting_read(dev_priv, cfgcr1_reg);
3613 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3614 struct intel_shared_dpll *pll)
3616 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3617 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3620 * Some of the following registers have reserved fields, so program
3621 * these with RMW based on a mask. The mask can be fixed or generated
3622 * during the calc/readout phase if the mask depends on some other HW
3623 * state like refclk, see icl_calc_mg_pll_state().
3625 intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3626 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3628 intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3629 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3630 hw_state->mg_clktop2_coreclkctl1);
3632 intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3633 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3634 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3635 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3636 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3637 hw_state->mg_clktop2_hsclkctl);
3639 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3640 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3641 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3642 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3643 hw_state->mg_pll_frac_lock);
3644 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3646 intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3647 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3649 intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3650 hw_state->mg_pll_tdc_coldst_bias_mask,
3651 hw_state->mg_pll_tdc_coldst_bias);
3653 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3656 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3657 struct intel_shared_dpll *pll)
3659 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3664 * All registers programmed here have the same HIP_INDEX_REG even
3665 * though on different building block
3667 /* All the registers are RMW */
3668 val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3669 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3670 val |= hw_state->mg_refclkin_ctl;
3671 intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3673 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3674 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3675 val |= hw_state->mg_clktop2_coreclkctl1;
3676 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3678 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3679 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3680 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3681 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3682 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3683 val |= hw_state->mg_clktop2_hsclkctl;
3684 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3686 val = DKL_PLL_DIV0_MASK;
3687 if (dev_priv->display.vbt.override_afc_startup)
3688 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3689 intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3690 hw_state->mg_pll_div0);
3692 val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3693 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3694 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3695 val |= hw_state->mg_pll_div1;
3696 intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3698 val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3699 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3700 DKL_PLL_SSC_STEP_LEN_MASK |
3701 DKL_PLL_SSC_STEP_NUM_MASK |
3703 val |= hw_state->mg_pll_ssc;
3704 intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3706 val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3707 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3708 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3709 val |= hw_state->mg_pll_bias;
3710 intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3712 val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3713 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3714 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3715 val |= hw_state->mg_pll_tdc_coldst_bias;
3716 intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3718 intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3721 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3722 struct intel_shared_dpll *pll,
3723 i915_reg_t enable_reg)
3725 intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3728 * The spec says we need to "wait" but it also says it should be
3731 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3732 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3736 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3737 struct intel_shared_dpll *pll,
3738 i915_reg_t enable_reg)
3740 intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3742 /* Timeout is actually 600us. */
3743 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3744 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3747 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3751 if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3752 pll->info->id != DPLL_ID_ICL_DPLL0)
3755 * Wa_16011069516:adl-p[a0]
3757 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3758 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3759 * sanity check this assumption with a double read, which presumably
3760 * returns the correct value even with clock gating on.
3762 * Instead of the usual place for workarounds we apply this one here,
3763 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3765 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3766 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3767 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3768 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3771 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3772 struct intel_shared_dpll *pll)
3774 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3776 if (IS_JSL_EHL(dev_priv) &&
3777 pll->info->id == DPLL_ID_EHL_DPLL4) {
3780 * We need to disable DC states when this DPLL is enabled.
3781 * This can be done by taking a reference on DPLL4 power
3784 pll->wakeref = intel_display_power_get(dev_priv,
3785 POWER_DOMAIN_DC_OFF);
3788 icl_pll_power_enable(dev_priv, pll, enable_reg);
3790 icl_dpll_write(dev_priv, pll);
3793 * DVFS pre sequence would be here, but in our driver the cdclk code
3794 * paths should already be setting the appropriate voltage, hence we do
3798 icl_pll_enable(dev_priv, pll, enable_reg);
3800 adlp_cmtg_clock_gating_wa(dev_priv, pll);
3802 /* DVFS post sequence would be here. See the comment above. */
3805 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3806 struct intel_shared_dpll *pll)
3808 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3810 icl_dpll_write(dev_priv, pll);
3813 * DVFS pre sequence would be here, but in our driver the cdclk code
3814 * paths should already be setting the appropriate voltage, hence we do
3818 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3820 /* DVFS post sequence would be here. See the comment above. */
3823 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3824 struct intel_shared_dpll *pll)
3826 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3828 icl_pll_power_enable(dev_priv, pll, enable_reg);
3830 if (DISPLAY_VER(dev_priv) >= 12)
3831 dkl_pll_write(dev_priv, pll);
3833 icl_mg_pll_write(dev_priv, pll);
3836 * DVFS pre sequence would be here, but in our driver the cdclk code
3837 * paths should already be setting the appropriate voltage, hence we do
3841 icl_pll_enable(dev_priv, pll, enable_reg);
3843 /* DVFS post sequence would be here. See the comment above. */
3846 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3847 struct intel_shared_dpll *pll,
3848 i915_reg_t enable_reg)
3850 /* The first steps are done by intel_ddi_post_disable(). */
3853 * DVFS pre sequence would be here, but in our driver the cdclk code
3854 * paths should already be setting the appropriate voltage, hence we do
3858 intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3860 /* Timeout is actually 1us. */
3861 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3862 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3864 /* DVFS post sequence would be here. See the comment above. */
3866 intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3869 * The spec says we need to "wait" but it also says it should be
3872 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3873 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3877 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3878 struct intel_shared_dpll *pll)
3880 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3882 icl_pll_disable(dev_priv, pll, enable_reg);
3884 if (IS_JSL_EHL(dev_priv) &&
3885 pll->info->id == DPLL_ID_EHL_DPLL4)
3886 intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3890 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3891 struct intel_shared_dpll *pll)
3893 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3896 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3897 struct intel_shared_dpll *pll)
3899 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3901 icl_pll_disable(dev_priv, pll, enable_reg);
3904 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3907 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3910 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3911 const struct intel_dpll_hw_state *hw_state)
3913 drm_dbg_kms(&dev_priv->drm,
3914 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3915 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3916 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3917 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3918 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3919 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3920 hw_state->cfgcr0, hw_state->cfgcr1,
3922 hw_state->mg_refclkin_ctl,
3923 hw_state->mg_clktop2_coreclkctl1,
3924 hw_state->mg_clktop2_hsclkctl,
3925 hw_state->mg_pll_div0,
3926 hw_state->mg_pll_div1,
3927 hw_state->mg_pll_lf,
3928 hw_state->mg_pll_frac_lock,
3929 hw_state->mg_pll_ssc,
3930 hw_state->mg_pll_bias,
3931 hw_state->mg_pll_tdc_coldst_bias);
3934 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3935 .enable = combo_pll_enable,
3936 .disable = combo_pll_disable,
3937 .get_hw_state = combo_pll_get_hw_state,
3938 .get_freq = icl_ddi_combo_pll_get_freq,
3941 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3942 .enable = tbt_pll_enable,
3943 .disable = tbt_pll_disable,
3944 .get_hw_state = tbt_pll_get_hw_state,
3945 .get_freq = icl_ddi_tbt_pll_get_freq,
3948 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3949 .enable = mg_pll_enable,
3950 .disable = mg_pll_disable,
3951 .get_hw_state = mg_pll_get_hw_state,
3952 .get_freq = icl_ddi_mg_pll_get_freq,
3955 static const struct dpll_info icl_plls[] = {
3956 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3957 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3958 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3959 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3960 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3961 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3962 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3966 static const struct intel_dpll_mgr icl_pll_mgr = {
3967 .dpll_info = icl_plls,
3968 .compute_dplls = icl_compute_dplls,
3969 .get_dplls = icl_get_dplls,
3970 .put_dplls = icl_put_dplls,
3971 .update_active_dpll = icl_update_active_dpll,
3972 .update_ref_clks = icl_update_dpll_ref_clks,
3973 .dump_hw_state = icl_dump_hw_state,
3976 static const struct dpll_info ehl_plls[] = {
3977 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3978 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3979 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3983 static const struct intel_dpll_mgr ehl_pll_mgr = {
3984 .dpll_info = ehl_plls,
3985 .compute_dplls = icl_compute_dplls,
3986 .get_dplls = icl_get_dplls,
3987 .put_dplls = icl_put_dplls,
3988 .update_ref_clks = icl_update_dpll_ref_clks,
3989 .dump_hw_state = icl_dump_hw_state,
3992 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3993 .enable = mg_pll_enable,
3994 .disable = mg_pll_disable,
3995 .get_hw_state = dkl_pll_get_hw_state,
3996 .get_freq = icl_ddi_mg_pll_get_freq,
3999 static const struct dpll_info tgl_plls[] = {
4000 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4001 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4002 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4003 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4004 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4005 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4006 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4007 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4008 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4012 static const struct intel_dpll_mgr tgl_pll_mgr = {
4013 .dpll_info = tgl_plls,
4014 .compute_dplls = icl_compute_dplls,
4015 .get_dplls = icl_get_dplls,
4016 .put_dplls = icl_put_dplls,
4017 .update_active_dpll = icl_update_active_dpll,
4018 .update_ref_clks = icl_update_dpll_ref_clks,
4019 .dump_hw_state = icl_dump_hw_state,
4022 static const struct dpll_info rkl_plls[] = {
4023 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4024 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4025 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4029 static const struct intel_dpll_mgr rkl_pll_mgr = {
4030 .dpll_info = rkl_plls,
4031 .compute_dplls = icl_compute_dplls,
4032 .get_dplls = icl_get_dplls,
4033 .put_dplls = icl_put_dplls,
4034 .update_ref_clks = icl_update_dpll_ref_clks,
4035 .dump_hw_state = icl_dump_hw_state,
4038 static const struct dpll_info dg1_plls[] = {
4039 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4040 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4041 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4042 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4046 static const struct intel_dpll_mgr dg1_pll_mgr = {
4047 .dpll_info = dg1_plls,
4048 .compute_dplls = icl_compute_dplls,
4049 .get_dplls = icl_get_dplls,
4050 .put_dplls = icl_put_dplls,
4051 .update_ref_clks = icl_update_dpll_ref_clks,
4052 .dump_hw_state = icl_dump_hw_state,
4055 static const struct dpll_info adls_plls[] = {
4056 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4057 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4058 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4059 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4063 static const struct intel_dpll_mgr adls_pll_mgr = {
4064 .dpll_info = adls_plls,
4065 .compute_dplls = icl_compute_dplls,
4066 .get_dplls = icl_get_dplls,
4067 .put_dplls = icl_put_dplls,
4068 .update_ref_clks = icl_update_dpll_ref_clks,
4069 .dump_hw_state = icl_dump_hw_state,
4072 static const struct dpll_info adlp_plls[] = {
4073 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4074 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4075 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4076 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4077 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4078 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4079 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4083 static const struct intel_dpll_mgr adlp_pll_mgr = {
4084 .dpll_info = adlp_plls,
4085 .compute_dplls = icl_compute_dplls,
4086 .get_dplls = icl_get_dplls,
4087 .put_dplls = icl_put_dplls,
4088 .update_active_dpll = icl_update_active_dpll,
4089 .update_ref_clks = icl_update_dpll_ref_clks,
4090 .dump_hw_state = icl_dump_hw_state,
4094 * intel_shared_dpll_init - Initialize shared DPLLs
4095 * @dev_priv: i915 device
4097 * Initialize shared DPLLs for @dev_priv.
4099 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4101 const struct intel_dpll_mgr *dpll_mgr = NULL;
4102 const struct dpll_info *dpll_info;
4105 mutex_init(&dev_priv->display.dpll.lock);
4107 if (IS_DG2(dev_priv))
4108 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4110 else if (IS_ALDERLAKE_P(dev_priv))
4111 dpll_mgr = &adlp_pll_mgr;
4112 else if (IS_ALDERLAKE_S(dev_priv))
4113 dpll_mgr = &adls_pll_mgr;
4114 else if (IS_DG1(dev_priv))
4115 dpll_mgr = &dg1_pll_mgr;
4116 else if (IS_ROCKETLAKE(dev_priv))
4117 dpll_mgr = &rkl_pll_mgr;
4118 else if (DISPLAY_VER(dev_priv) >= 12)
4119 dpll_mgr = &tgl_pll_mgr;
4120 else if (IS_JSL_EHL(dev_priv))
4121 dpll_mgr = &ehl_pll_mgr;
4122 else if (DISPLAY_VER(dev_priv) >= 11)
4123 dpll_mgr = &icl_pll_mgr;
4124 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4125 dpll_mgr = &bxt_pll_mgr;
4126 else if (DISPLAY_VER(dev_priv) == 9)
4127 dpll_mgr = &skl_pll_mgr;
4128 else if (HAS_DDI(dev_priv))
4129 dpll_mgr = &hsw_pll_mgr;
4130 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4131 dpll_mgr = &pch_pll_mgr;
4134 dev_priv->display.dpll.num_shared_dpll = 0;
4138 dpll_info = dpll_mgr->dpll_info;
4140 for (i = 0; dpll_info[i].name; i++) {
4141 if (drm_WARN_ON(&dev_priv->drm,
4142 i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4145 drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4146 dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4149 dev_priv->display.dpll.mgr = dpll_mgr;
4150 dev_priv->display.dpll.num_shared_dpll = i;
4154 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4155 * @state: atomic state
4156 * @crtc: CRTC to compute DPLLs for
4159 * This function computes the DPLL state for the given CRTC and encoder.
4161 * The new configuration in the atomic commit @state is made effective by
4162 * calling intel_shared_dpll_swap_state().
4165 * 0 on success, negative error code on falure.
4167 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4168 struct intel_crtc *crtc,
4169 struct intel_encoder *encoder)
4171 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4172 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4174 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4177 return dpll_mgr->compute_dplls(state, crtc, encoder);
4181 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4182 * @state: atomic state
4183 * @crtc: CRTC to reserve DPLLs for
4186 * This function reserves all required DPLLs for the given CRTC and encoder
4187 * combination in the current atomic commit @state and the new @crtc atomic
4190 * The new configuration in the atomic commit @state is made effective by
4191 * calling intel_shared_dpll_swap_state().
4193 * The reserved DPLLs should be released by calling
4194 * intel_release_shared_dplls().
4197 * 0 if all required DPLLs were successfully reserved,
4198 * negative error code otherwise.
4200 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4201 struct intel_crtc *crtc,
4202 struct intel_encoder *encoder)
4204 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4205 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4207 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4210 return dpll_mgr->get_dplls(state, crtc, encoder);
4214 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4215 * @state: atomic state
4216 * @crtc: crtc from which the DPLLs are to be released
4218 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4219 * from the current atomic commit @state and the old @crtc atomic state.
4221 * The new configuration in the atomic commit @state is made effective by
4222 * calling intel_shared_dpll_swap_state().
4224 void intel_release_shared_dplls(struct intel_atomic_state *state,
4225 struct intel_crtc *crtc)
4227 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4228 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4231 * FIXME: this function is called for every platform having a
4232 * compute_clock hook, even though the platform doesn't yet support
4233 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4239 dpll_mgr->put_dplls(state, crtc);
4243 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4244 * @state: atomic state
4245 * @crtc: the CRTC for which to update the active DPLL
4246 * @encoder: encoder determining the type of port DPLL
4248 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4249 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4250 * DPLL selected will be based on the current mode of the encoder's port.
4252 void intel_update_active_dpll(struct intel_atomic_state *state,
4253 struct intel_crtc *crtc,
4254 struct intel_encoder *encoder)
4256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4257 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4259 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4262 dpll_mgr->update_active_dpll(state, crtc, encoder);
4266 * intel_dpll_get_freq - calculate the DPLL's output frequency
4267 * @i915: i915 device
4268 * @pll: DPLL for which to calculate the output frequency
4269 * @pll_state: DPLL state from which to calculate the output frequency
4271 * Return the output frequency corresponding to @pll's passed in @pll_state.
4273 int intel_dpll_get_freq(struct drm_i915_private *i915,
4274 const struct intel_shared_dpll *pll,
4275 const struct intel_dpll_hw_state *pll_state)
4277 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4280 return pll->info->funcs->get_freq(i915, pll, pll_state);
4284 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4285 * @i915: i915 device
4286 * @pll: DPLL for which to calculate the output frequency
4287 * @hw_state: DPLL's hardware state
4289 * Read out @pll's hardware state into @hw_state.
4291 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4292 struct intel_shared_dpll *pll,
4293 struct intel_dpll_hw_state *hw_state)
4295 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4298 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4299 struct intel_shared_dpll *pll)
4301 struct intel_crtc *crtc;
4303 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4305 if (IS_JSL_EHL(i915) && pll->on &&
4306 pll->info->id == DPLL_ID_EHL_DPLL4) {
4307 pll->wakeref = intel_display_power_get(i915,
4308 POWER_DOMAIN_DC_OFF);
4311 pll->state.pipe_mask = 0;
4312 for_each_intel_crtc(&i915->drm, crtc) {
4313 struct intel_crtc_state *crtc_state =
4314 to_intel_crtc_state(crtc->base.state);
4316 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4317 pll->state.pipe_mask |= BIT(crtc->pipe);
4319 pll->active_mask = pll->state.pipe_mask;
4321 drm_dbg_kms(&i915->drm,
4322 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4323 pll->info->name, pll->state.pipe_mask, pll->on);
4326 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4328 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4329 i915->display.dpll.mgr->update_ref_clks(i915);
4332 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4336 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4337 readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4340 static void sanitize_dpll_state(struct drm_i915_private *i915,
4341 struct intel_shared_dpll *pll)
4346 adlp_cmtg_clock_gating_wa(i915, pll);
4348 if (pll->active_mask)
4351 drm_dbg_kms(&i915->drm,
4352 "%s enabled but not in use, disabling\n",
4355 pll->info->funcs->disable(i915, pll);
4359 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4363 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4364 sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4368 * intel_dpll_dump_hw_state - write hw_state to dmesg
4369 * @dev_priv: i915 drm device
4370 * @hw_state: hw state to be written to the log
4372 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4374 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4375 const struct intel_dpll_hw_state *hw_state)
4377 if (dev_priv->display.dpll.mgr) {
4378 dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4380 /* fallback for platforms that don't use the shared dpll
4383 drm_dbg_kms(&dev_priv->drm,
4384 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4385 "fp0: 0x%x, fp1: 0x%x\n",
4394 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4395 struct intel_shared_dpll *pll,
4396 struct intel_crtc *crtc,
4397 struct intel_crtc_state *new_crtc_state)
4399 struct intel_dpll_hw_state dpll_hw_state;
4403 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4405 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4407 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4409 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4410 I915_STATE_WARN(!pll->on && pll->active_mask,
4411 "pll in active use but not on in sw tracking\n");
4412 I915_STATE_WARN(pll->on && !pll->active_mask,
4413 "pll is on but not used by any active pipe\n");
4414 I915_STATE_WARN(pll->on != active,
4415 "pll on state mismatch (expected %i, found %i)\n",
4420 I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4421 "more active pll users than references: 0x%x vs 0x%x\n",
4422 pll->active_mask, pll->state.pipe_mask);
4427 pipe_mask = BIT(crtc->pipe);
4429 if (new_crtc_state->hw.active)
4430 I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4431 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4432 pipe_name(crtc->pipe), pll->active_mask);
4434 I915_STATE_WARN(pll->active_mask & pipe_mask,
4435 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4436 pipe_name(crtc->pipe), pll->active_mask);
4438 I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4439 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4440 pipe_mask, pll->state.pipe_mask);
4442 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4444 sizeof(dpll_hw_state)),
4445 "pll hw state mismatch\n");
4448 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4449 struct intel_crtc_state *old_crtc_state,
4450 struct intel_crtc_state *new_crtc_state)
4452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4454 if (new_crtc_state->shared_dpll)
4455 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4456 crtc, new_crtc_state);
4458 if (old_crtc_state->shared_dpll &&
4459 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4460 u8 pipe_mask = BIT(crtc->pipe);
4461 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4463 I915_STATE_WARN(pll->active_mask & pipe_mask,
4464 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4465 pipe_name(crtc->pipe), pll->active_mask);
4466 I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4467 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4468 pipe_name(crtc->pipe), pll->state.pipe_mask);
4472 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4476 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4477 verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],