2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <linux/string_helpers.h>
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_hti.h"
35 #include "intel_mg_phy_regs.h"
36 #include "intel_pch_refclk.h"
42 * Display PLLs used for driving outputs vary by platform. While some have
43 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44 * from a pool. In the latter scenario, it is possible that multiple pipes
45 * share a PLL if their configurations match.
47 * This file provides an abstraction over display PLLs. The function
48 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
49 * users of a PLL are tracked and that tracking is integrated with the atomic
50 * modset interface. During an atomic operation, required PLLs can be reserved
51 * for a given CRTC and encoder configuration by calling
52 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53 * with intel_release_shared_dplls().
54 * Changes to the users are first staged in the atomic state, and then made
55 * effective by calling intel_shared_dpll_swap_state() during the atomic
59 /* platform specific hooks for managing DPLLs */
60 struct intel_shared_dpll_funcs {
62 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 * the pll is not already enabled.
65 void (*enable)(struct drm_i915_private *i915,
66 struct intel_shared_dpll *pll);
69 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 * only when it is safe to disable the pll, i.e., there are no more
71 * tracked users for it.
73 void (*disable)(struct drm_i915_private *i915,
74 struct intel_shared_dpll *pll);
77 * Hook for reading the values currently programmed to the DPLL
78 * registers. This is used for initial hw state readout and state
79 * verification after a mode set.
81 bool (*get_hw_state)(struct drm_i915_private *i915,
82 struct intel_shared_dpll *pll,
83 struct intel_dpll_hw_state *hw_state);
86 * Hook for calculating the pll's output frequency based on its passed
89 int (*get_freq)(struct drm_i915_private *i915,
90 const struct intel_shared_dpll *pll,
91 const struct intel_dpll_hw_state *pll_state);
94 struct intel_dpll_mgr {
95 const struct dpll_info *dpll_info;
97 int (*compute_dplls)(struct intel_atomic_state *state,
98 struct intel_crtc *crtc,
99 struct intel_encoder *encoder);
100 int (*get_dplls)(struct intel_atomic_state *state,
101 struct intel_crtc *crtc,
102 struct intel_encoder *encoder);
103 void (*put_dplls)(struct intel_atomic_state *state,
104 struct intel_crtc *crtc);
105 void (*update_active_dpll)(struct intel_atomic_state *state,
106 struct intel_crtc *crtc,
107 struct intel_encoder *encoder);
108 void (*update_ref_clks)(struct drm_i915_private *i915);
109 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 const struct intel_dpll_hw_state *hw_state);
114 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 struct intel_shared_dpll_state *shared_dpll)
117 enum intel_dpll_id i;
119 /* Copy shared dpll state */
120 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
123 shared_dpll[i] = pll->state;
127 static struct intel_shared_dpll_state *
128 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
130 struct intel_atomic_state *state = to_intel_atomic_state(s);
132 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
134 if (!state->dpll_set) {
135 state->dpll_set = true;
137 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
141 return state->shared_dpll;
145 * intel_get_shared_dpll_by_id - get a DPLL given its id
146 * @dev_priv: i915 device instance
150 * A pointer to the DPLL with @id
152 struct intel_shared_dpll *
153 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 enum intel_dpll_id id)
156 return &dev_priv->display.dpll.shared_dplls[id];
160 void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll,
165 struct intel_dpll_hw_state hw_state;
167 if (drm_WARN(&dev_priv->drm, !pll,
168 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
171 cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 I915_STATE_WARN(dev_priv, cur_state != state,
173 "%s assertion failure (expected %s, current %s)\n",
174 pll->info->name, str_on_off(state),
175 str_on_off(cur_state));
178 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
180 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
183 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
185 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
189 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 struct intel_shared_dpll *pll)
193 return DG1_DPLL_ENABLE(pll->info->id);
194 else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
195 return MG_PLL_ENABLE(0);
197 return ICL_DPLL_ENABLE(pll->info->id);
201 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
202 struct intel_shared_dpll *pll)
204 const enum intel_dpll_id id = pll->info->id;
205 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
207 if (IS_ALDERLAKE_P(i915))
208 return ADLP_PORTTC_PLL_ENABLE(tc_port);
210 return MG_PLL_ENABLE(tc_port);
214 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
215 * @crtc_state: CRTC, and its state, which has a shared DPLL
217 * Enable the shared DPLL used by @crtc.
219 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
221 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
222 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
223 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
224 unsigned int pipe_mask = BIT(crtc->pipe);
225 unsigned int old_mask;
227 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
230 mutex_lock(&dev_priv->display.dpll.lock);
231 old_mask = pll->active_mask;
233 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
234 drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
237 pll->active_mask |= pipe_mask;
239 drm_dbg_kms(&dev_priv->drm,
240 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
241 pll->info->name, pll->active_mask, pll->on,
242 crtc->base.base.id, crtc->base.name);
245 drm_WARN_ON(&dev_priv->drm, !pll->on);
246 assert_shared_dpll_enabled(dev_priv, pll);
249 drm_WARN_ON(&dev_priv->drm, pll->on);
251 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
252 pll->info->funcs->enable(dev_priv, pll);
256 mutex_unlock(&dev_priv->display.dpll.lock);
260 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
261 * @crtc_state: CRTC, and its state, which has a shared DPLL
263 * Disable the shared DPLL used by @crtc.
265 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
267 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
270 unsigned int pipe_mask = BIT(crtc->pipe);
272 /* PCH only available on ILK+ */
273 if (DISPLAY_VER(dev_priv) < 5)
279 mutex_lock(&dev_priv->display.dpll.lock);
280 if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
281 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
282 crtc->base.base.id, crtc->base.name))
285 drm_dbg_kms(&dev_priv->drm,
286 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
287 pll->info->name, pll->active_mask, pll->on,
288 crtc->base.base.id, crtc->base.name);
290 assert_shared_dpll_enabled(dev_priv, pll);
291 drm_WARN_ON(&dev_priv->drm, !pll->on);
293 pll->active_mask &= ~pipe_mask;
294 if (pll->active_mask)
297 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
298 pll->info->funcs->disable(dev_priv, pll);
302 mutex_unlock(&dev_priv->display.dpll.lock);
305 static struct intel_shared_dpll *
306 intel_find_shared_dpll(struct intel_atomic_state *state,
307 const struct intel_crtc *crtc,
308 const struct intel_dpll_hw_state *pll_state,
309 unsigned long dpll_mask)
311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 struct intel_shared_dpll *pll, *unused_pll = NULL;
313 struct intel_shared_dpll_state *shared_dpll;
314 enum intel_dpll_id i;
316 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
318 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
320 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
321 pll = &dev_priv->display.dpll.shared_dplls[i];
323 /* Only want to check enabled timings first */
324 if (shared_dpll[i].pipe_mask == 0) {
330 if (memcmp(pll_state,
331 &shared_dpll[i].hw_state,
332 sizeof(*pll_state)) == 0) {
333 drm_dbg_kms(&dev_priv->drm,
334 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
335 crtc->base.base.id, crtc->base.name,
337 shared_dpll[i].pipe_mask,
343 /* Ok no matching timings, maybe there's a free one? */
345 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
346 crtc->base.base.id, crtc->base.name,
347 unused_pll->info->name);
355 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
356 * @crtc: CRTC on which behalf the reference is taken
357 * @pll: DPLL for which the reference is taken
358 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
360 * Take a reference for @pll tracking the use of it by @crtc.
363 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
364 const struct intel_shared_dpll *pll,
365 struct intel_shared_dpll_state *shared_dpll_state)
367 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
369 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
371 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
373 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
374 crtc->base.base.id, crtc->base.name, pll->info->name);
378 intel_reference_shared_dpll(struct intel_atomic_state *state,
379 const struct intel_crtc *crtc,
380 const struct intel_shared_dpll *pll,
381 const struct intel_dpll_hw_state *pll_state)
383 struct intel_shared_dpll_state *shared_dpll;
384 const enum intel_dpll_id id = pll->info->id;
386 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
388 if (shared_dpll[id].pipe_mask == 0)
389 shared_dpll[id].hw_state = *pll_state;
391 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
395 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
396 * @crtc: CRTC on which behalf the reference is dropped
397 * @pll: DPLL for which the reference is dropped
398 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
400 * Drop a reference for @pll tracking the end of use of it by @crtc.
403 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
404 const struct intel_shared_dpll *pll,
405 struct intel_shared_dpll_state *shared_dpll_state)
407 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
409 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
411 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
413 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
414 crtc->base.base.id, crtc->base.name, pll->info->name);
417 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
418 const struct intel_crtc *crtc,
419 const struct intel_shared_dpll *pll)
421 struct intel_shared_dpll_state *shared_dpll;
422 const enum intel_dpll_id id = pll->info->id;
424 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
426 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
429 static void intel_put_dpll(struct intel_atomic_state *state,
430 struct intel_crtc *crtc)
432 const struct intel_crtc_state *old_crtc_state =
433 intel_atomic_get_old_crtc_state(state, crtc);
434 struct intel_crtc_state *new_crtc_state =
435 intel_atomic_get_new_crtc_state(state, crtc);
437 new_crtc_state->shared_dpll = NULL;
439 if (!old_crtc_state->shared_dpll)
442 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
446 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
447 * @state: atomic state
449 * This is the dpll version of drm_atomic_helper_swap_state() since the
450 * helper does not handle driver-specific global state.
452 * For consistency with atomic helpers this function does a complete swap,
453 * i.e. it also puts the current state into @state, even though there is no
454 * need for that at this moment.
456 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
458 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
459 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
460 enum intel_dpll_id i;
462 if (!state->dpll_set)
465 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
466 struct intel_shared_dpll *pll =
467 &dev_priv->display.dpll.shared_dplls[i];
469 swap(pll->state, shared_dpll[i]);
473 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
474 struct intel_shared_dpll *pll,
475 struct intel_dpll_hw_state *hw_state)
477 const enum intel_dpll_id id = pll->info->id;
478 intel_wakeref_t wakeref;
481 wakeref = intel_display_power_get_if_enabled(dev_priv,
482 POWER_DOMAIN_DISPLAY_CORE);
486 val = intel_de_read(dev_priv, PCH_DPLL(id));
487 hw_state->dpll = val;
488 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
489 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
491 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
493 return val & DPLL_VCO_ENABLE;
496 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
501 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
502 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
503 DREF_SUPERSPREAD_SOURCE_MASK));
504 I915_STATE_WARN(dev_priv, !enabled,
505 "PCH refclk assertion failure, should be active but is disabled\n");
508 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
509 struct intel_shared_dpll *pll)
511 const enum intel_dpll_id id = pll->info->id;
513 /* PCH refclock must be enabled first */
514 ibx_assert_pch_refclk_enabled(dev_priv);
516 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
517 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
519 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
521 /* Wait for the clocks to stabilize. */
522 intel_de_posting_read(dev_priv, PCH_DPLL(id));
525 /* The pixel multiplier can only be updated once the
526 * DPLL is enabled and the clocks are stable.
530 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
531 intel_de_posting_read(dev_priv, PCH_DPLL(id));
535 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
536 struct intel_shared_dpll *pll)
538 const enum intel_dpll_id id = pll->info->id;
540 intel_de_write(dev_priv, PCH_DPLL(id), 0);
541 intel_de_posting_read(dev_priv, PCH_DPLL(id));
545 static int ibx_compute_dpll(struct intel_atomic_state *state,
546 struct intel_crtc *crtc,
547 struct intel_encoder *encoder)
552 static int ibx_get_dpll(struct intel_atomic_state *state,
553 struct intel_crtc *crtc,
554 struct intel_encoder *encoder)
556 struct intel_crtc_state *crtc_state =
557 intel_atomic_get_new_crtc_state(state, crtc);
558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
559 struct intel_shared_dpll *pll;
560 enum intel_dpll_id i;
562 if (HAS_PCH_IBX(dev_priv)) {
563 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
564 i = (enum intel_dpll_id) crtc->pipe;
565 pll = &dev_priv->display.dpll.shared_dplls[i];
567 drm_dbg_kms(&dev_priv->drm,
568 "[CRTC:%d:%s] using pre-allocated %s\n",
569 crtc->base.base.id, crtc->base.name,
572 pll = intel_find_shared_dpll(state, crtc,
573 &crtc_state->dpll_hw_state,
574 BIT(DPLL_ID_PCH_PLL_B) |
575 BIT(DPLL_ID_PCH_PLL_A));
581 /* reference the pll */
582 intel_reference_shared_dpll(state, crtc,
583 pll, &crtc_state->dpll_hw_state);
585 crtc_state->shared_dpll = pll;
590 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
591 const struct intel_dpll_hw_state *hw_state)
593 drm_dbg_kms(&dev_priv->drm,
594 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
595 "fp0: 0x%x, fp1: 0x%x\n",
602 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
603 .enable = ibx_pch_dpll_enable,
604 .disable = ibx_pch_dpll_disable,
605 .get_hw_state = ibx_pch_dpll_get_hw_state,
608 static const struct dpll_info pch_plls[] = {
609 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
610 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
614 static const struct intel_dpll_mgr pch_pll_mgr = {
615 .dpll_info = pch_plls,
616 .compute_dplls = ibx_compute_dpll,
617 .get_dplls = ibx_get_dpll,
618 .put_dplls = intel_put_dpll,
619 .dump_hw_state = ibx_dump_hw_state,
622 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
623 struct intel_shared_dpll *pll)
625 const enum intel_dpll_id id = pll->info->id;
627 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
628 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
632 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
633 struct intel_shared_dpll *pll)
635 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
636 intel_de_posting_read(dev_priv, SPLL_CTL);
640 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
641 struct intel_shared_dpll *pll)
643 const enum intel_dpll_id id = pll->info->id;
645 intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
646 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
649 * Try to set up the PCH reference clock once all DPLLs
650 * that depend on it have been shut down.
652 if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
653 intel_init_pch_refclk(dev_priv);
656 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
657 struct intel_shared_dpll *pll)
659 enum intel_dpll_id id = pll->info->id;
661 intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
662 intel_de_posting_read(dev_priv, SPLL_CTL);
665 * Try to set up the PCH reference clock once all DPLLs
666 * that depend on it have been shut down.
668 if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
669 intel_init_pch_refclk(dev_priv);
672 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
673 struct intel_shared_dpll *pll,
674 struct intel_dpll_hw_state *hw_state)
676 const enum intel_dpll_id id = pll->info->id;
677 intel_wakeref_t wakeref;
680 wakeref = intel_display_power_get_if_enabled(dev_priv,
681 POWER_DOMAIN_DISPLAY_CORE);
685 val = intel_de_read(dev_priv, WRPLL_CTL(id));
686 hw_state->wrpll = val;
688 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
690 return val & WRPLL_PLL_ENABLE;
693 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
694 struct intel_shared_dpll *pll,
695 struct intel_dpll_hw_state *hw_state)
697 intel_wakeref_t wakeref;
700 wakeref = intel_display_power_get_if_enabled(dev_priv,
701 POWER_DOMAIN_DISPLAY_CORE);
705 val = intel_de_read(dev_priv, SPLL_CTL);
706 hw_state->spll = val;
708 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
710 return val & SPLL_PLL_ENABLE;
714 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
720 /* Constraints for PLL good behavior */
726 struct hsw_wrpll_rnp {
730 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
794 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
795 unsigned int r2, unsigned int n2,
797 struct hsw_wrpll_rnp *best)
799 u64 a, b, c, d, diff, diff_best;
801 /* No best (r,n,p) yet */
810 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
814 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
817 * and we would like delta <= budget.
819 * If the discrepancy is above the PPM-based budget, always prefer to
820 * improve upon the previous solution. However, if you're within the
821 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
823 a = freq2k * budget * p * r2;
824 b = freq2k * budget * best->p * best->r2;
825 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
826 diff_best = abs_diff(freq2k * best->p * best->r2,
827 LC_FREQ_2K * best->n2);
829 d = 1000000 * diff_best;
831 if (a < c && b < d) {
832 /* If both are above the budget, pick the closer */
833 if (best->p * best->r2 * diff < p * r2 * diff_best) {
838 } else if (a >= c && b < d) {
839 /* If A is below the threshold but B is above it? Update. */
843 } else if (a >= c && b >= d) {
844 /* Both are below the limit, so pick the higher n2/(r2*r2) */
845 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
851 /* Otherwise a < c && b >= d, do nothing */
855 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
856 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
860 struct hsw_wrpll_rnp best = {};
863 freq2k = clock / 100;
865 budget = hsw_wrpll_get_budget_for_freq(clock);
867 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
868 * and directly pass the LC PLL to it. */
869 if (freq2k == 5400000) {
877 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
880 * We want R so that REF_MIN <= Ref <= REF_MAX.
881 * Injecting R2 = 2 * R gives:
882 * REF_MAX * r2 > LC_FREQ * 2 and
883 * REF_MIN * r2 < LC_FREQ * 2
885 * Which means the desired boundaries for r2 are:
886 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
889 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
890 r2 <= LC_FREQ * 2 / REF_MIN;
894 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
896 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
897 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
898 * VCO_MAX * r2 > n2 * LC_FREQ and
899 * VCO_MIN * r2 < n2 * LC_FREQ)
901 * Which means the desired boundaries for n2 are:
902 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
904 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
905 n2 <= VCO_MAX * r2 / LC_FREQ;
908 for (p = P_MIN; p <= P_MAX; p += P_INC)
909 hsw_wrpll_update_rnp(freq2k, budget,
919 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
920 const struct intel_shared_dpll *pll,
921 const struct intel_dpll_hw_state *pll_state)
925 u32 wrpll = pll_state->wrpll;
927 switch (wrpll & WRPLL_REF_MASK) {
928 case WRPLL_REF_SPECIAL_HSW:
929 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
930 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
931 refclk = dev_priv->display.dpll.ref_clks.nssc;
935 case WRPLL_REF_PCH_SSC:
937 * We could calculate spread here, but our checking
938 * code only cares about 5% accuracy, and spread is a max of
941 refclk = dev_priv->display.dpll.ref_clks.ssc;
943 case WRPLL_REF_LCPLL:
951 r = wrpll & WRPLL_DIVIDER_REF_MASK;
952 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
953 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
955 /* Convert to KHz, p & r have a fixed point portion */
956 return (refclk * n / 10) / (p * r) * 2;
960 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
961 struct intel_crtc *crtc)
963 struct drm_i915_private *i915 = to_i915(state->base.dev);
964 struct intel_crtc_state *crtc_state =
965 intel_atomic_get_new_crtc_state(state, crtc);
966 unsigned int p, n2, r2;
968 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
970 crtc_state->dpll_hw_state.wrpll =
971 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
972 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
973 WRPLL_DIVIDER_POST(p);
975 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
976 &crtc_state->dpll_hw_state);
981 static struct intel_shared_dpll *
982 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
983 struct intel_crtc *crtc)
985 struct intel_crtc_state *crtc_state =
986 intel_atomic_get_new_crtc_state(state, crtc);
988 return intel_find_shared_dpll(state, crtc,
989 &crtc_state->dpll_hw_state,
990 BIT(DPLL_ID_WRPLL2) |
991 BIT(DPLL_ID_WRPLL1));
995 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
997 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
998 int clock = crtc_state->port_clock;
1000 switch (clock / 2) {
1006 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
1012 static struct intel_shared_dpll *
1013 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1015 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1016 struct intel_shared_dpll *pll;
1017 enum intel_dpll_id pll_id;
1018 int clock = crtc_state->port_clock;
1020 switch (clock / 2) {
1022 pll_id = DPLL_ID_LCPLL_810;
1025 pll_id = DPLL_ID_LCPLL_1350;
1028 pll_id = DPLL_ID_LCPLL_2700;
1031 MISSING_CASE(clock / 2);
1035 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1043 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1044 const struct intel_shared_dpll *pll,
1045 const struct intel_dpll_hw_state *pll_state)
1049 switch (pll->info->id) {
1050 case DPLL_ID_LCPLL_810:
1053 case DPLL_ID_LCPLL_1350:
1054 link_clock = 135000;
1056 case DPLL_ID_LCPLL_2700:
1057 link_clock = 270000;
1060 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1064 return link_clock * 2;
1068 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1069 struct intel_crtc *crtc)
1071 struct intel_crtc_state *crtc_state =
1072 intel_atomic_get_new_crtc_state(state, crtc);
1074 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1077 crtc_state->dpll_hw_state.spll =
1078 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1083 static struct intel_shared_dpll *
1084 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1085 struct intel_crtc *crtc)
1087 struct intel_crtc_state *crtc_state =
1088 intel_atomic_get_new_crtc_state(state, crtc);
1090 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1094 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1095 const struct intel_shared_dpll *pll,
1096 const struct intel_dpll_hw_state *pll_state)
1100 switch (pll_state->spll & SPLL_FREQ_MASK) {
1101 case SPLL_FREQ_810MHz:
1104 case SPLL_FREQ_1350MHz:
1105 link_clock = 135000;
1107 case SPLL_FREQ_2700MHz:
1108 link_clock = 270000;
1111 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1115 return link_clock * 2;
1118 static int hsw_compute_dpll(struct intel_atomic_state *state,
1119 struct intel_crtc *crtc,
1120 struct intel_encoder *encoder)
1122 struct intel_crtc_state *crtc_state =
1123 intel_atomic_get_new_crtc_state(state, crtc);
1125 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1126 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1127 else if (intel_crtc_has_dp_encoder(crtc_state))
1128 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1129 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1130 return hsw_ddi_spll_compute_dpll(state, crtc);
1135 static int hsw_get_dpll(struct intel_atomic_state *state,
1136 struct intel_crtc *crtc,
1137 struct intel_encoder *encoder)
1139 struct intel_crtc_state *crtc_state =
1140 intel_atomic_get_new_crtc_state(state, crtc);
1141 struct intel_shared_dpll *pll = NULL;
1143 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1144 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1145 else if (intel_crtc_has_dp_encoder(crtc_state))
1146 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1147 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1148 pll = hsw_ddi_spll_get_dpll(state, crtc);
1153 intel_reference_shared_dpll(state, crtc,
1154 pll, &crtc_state->dpll_hw_state);
1156 crtc_state->shared_dpll = pll;
1161 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1163 i915->display.dpll.ref_clks.ssc = 135000;
1164 /* Non-SSC is only used on non-ULT HSW. */
1165 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1166 i915->display.dpll.ref_clks.nssc = 24000;
1168 i915->display.dpll.ref_clks.nssc = 135000;
1171 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1172 const struct intel_dpll_hw_state *hw_state)
1174 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1175 hw_state->wrpll, hw_state->spll);
1178 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1179 .enable = hsw_ddi_wrpll_enable,
1180 .disable = hsw_ddi_wrpll_disable,
1181 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1182 .get_freq = hsw_ddi_wrpll_get_freq,
1185 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1186 .enable = hsw_ddi_spll_enable,
1187 .disable = hsw_ddi_spll_disable,
1188 .get_hw_state = hsw_ddi_spll_get_hw_state,
1189 .get_freq = hsw_ddi_spll_get_freq,
1192 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1193 struct intel_shared_dpll *pll)
1197 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1198 struct intel_shared_dpll *pll)
1202 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1203 struct intel_shared_dpll *pll,
1204 struct intel_dpll_hw_state *hw_state)
1209 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1210 .enable = hsw_ddi_lcpll_enable,
1211 .disable = hsw_ddi_lcpll_disable,
1212 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1213 .get_freq = hsw_ddi_lcpll_get_freq,
1216 static const struct dpll_info hsw_plls[] = {
1217 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1218 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1219 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1220 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1221 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1222 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1226 static const struct intel_dpll_mgr hsw_pll_mgr = {
1227 .dpll_info = hsw_plls,
1228 .compute_dplls = hsw_compute_dpll,
1229 .get_dplls = hsw_get_dpll,
1230 .put_dplls = intel_put_dpll,
1231 .update_ref_clks = hsw_update_dpll_ref_clks,
1232 .dump_hw_state = hsw_dump_hw_state,
1235 struct skl_dpll_regs {
1236 i915_reg_t ctl, cfgcr1, cfgcr2;
1239 /* this array is indexed by the *shared* pll id */
1240 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1244 /* DPLL 0 doesn't support HDMI mode */
1249 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1250 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1254 .ctl = WRPLL_CTL(0),
1255 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1256 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1260 .ctl = WRPLL_CTL(1),
1261 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1262 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1266 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1267 struct intel_shared_dpll *pll)
1269 const enum intel_dpll_id id = pll->info->id;
1271 intel_de_rmw(dev_priv, DPLL_CTRL1,
1272 DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1273 pll->state.hw_state.ctrl1 << (id * 6));
1274 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1277 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1278 struct intel_shared_dpll *pll)
1280 const struct skl_dpll_regs *regs = skl_dpll_regs;
1281 const enum intel_dpll_id id = pll->info->id;
1283 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1285 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1286 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1287 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1288 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1290 /* the enable bit is always bit 31 */
1291 intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1293 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1294 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1297 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1298 struct intel_shared_dpll *pll)
1300 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1303 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1304 struct intel_shared_dpll *pll)
1306 const struct skl_dpll_regs *regs = skl_dpll_regs;
1307 const enum intel_dpll_id id = pll->info->id;
1309 /* the enable bit is always bit 31 */
1310 intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1311 intel_de_posting_read(dev_priv, regs[id].ctl);
1314 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1315 struct intel_shared_dpll *pll)
1319 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1320 struct intel_shared_dpll *pll,
1321 struct intel_dpll_hw_state *hw_state)
1324 const struct skl_dpll_regs *regs = skl_dpll_regs;
1325 const enum intel_dpll_id id = pll->info->id;
1326 intel_wakeref_t wakeref;
1329 wakeref = intel_display_power_get_if_enabled(dev_priv,
1330 POWER_DOMAIN_DISPLAY_CORE);
1336 val = intel_de_read(dev_priv, regs[id].ctl);
1337 if (!(val & LCPLL_PLL_ENABLE))
1340 val = intel_de_read(dev_priv, DPLL_CTRL1);
1341 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1343 /* avoid reading back stale values if HDMI mode is not enabled */
1344 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1345 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1346 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1351 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1356 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1357 struct intel_shared_dpll *pll,
1358 struct intel_dpll_hw_state *hw_state)
1360 const struct skl_dpll_regs *regs = skl_dpll_regs;
1361 const enum intel_dpll_id id = pll->info->id;
1362 intel_wakeref_t wakeref;
1366 wakeref = intel_display_power_get_if_enabled(dev_priv,
1367 POWER_DOMAIN_DISPLAY_CORE);
1373 /* DPLL0 is always enabled since it drives CDCLK */
1374 val = intel_de_read(dev_priv, regs[id].ctl);
1375 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1378 val = intel_de_read(dev_priv, DPLL_CTRL1);
1379 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1384 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1389 struct skl_wrpll_context {
1390 u64 min_deviation; /* current minimal deviation */
1391 u64 central_freq; /* chosen central freq */
1392 u64 dco_freq; /* chosen dco freq */
1393 unsigned int p; /* chosen divider */
1396 /* DCO freq must be within +1%/-6% of the DCO central freq */
1397 #define SKL_DCO_MAX_PDEVIATION 100
1398 #define SKL_DCO_MAX_NDEVIATION 600
1400 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1403 unsigned int divider)
1407 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1410 /* positive deviation */
1411 if (dco_freq >= central_freq) {
1412 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1413 deviation < ctx->min_deviation) {
1414 ctx->min_deviation = deviation;
1415 ctx->central_freq = central_freq;
1416 ctx->dco_freq = dco_freq;
1419 /* negative deviation */
1420 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1421 deviation < ctx->min_deviation) {
1422 ctx->min_deviation = deviation;
1423 ctx->central_freq = central_freq;
1424 ctx->dco_freq = dco_freq;
1429 static void skl_wrpll_get_multipliers(unsigned int p,
1430 unsigned int *p0 /* out */,
1431 unsigned int *p1 /* out */,
1432 unsigned int *p2 /* out */)
1436 unsigned int half = p / 2;
1438 if (half == 1 || half == 2 || half == 3 || half == 5) {
1442 } else if (half % 2 == 0) {
1446 } else if (half % 3 == 0) {
1450 } else if (half % 7 == 0) {
1455 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1459 } else if (p == 5 || p == 7) {
1463 } else if (p == 15) {
1467 } else if (p == 21) {
1471 } else if (p == 35) {
1478 struct skl_wrpll_params {
1488 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1492 u32 p0, u32 p1, u32 p2)
1496 switch (central_freq) {
1498 params->central_freq = 0;
1501 params->central_freq = 1;
1504 params->central_freq = 3;
1521 WARN(1, "Incorrect PDiv\n");
1538 WARN(1, "Incorrect KDiv\n");
1541 params->qdiv_ratio = p1;
1542 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1544 dco_freq = p0 * p1 * p2 * afe_clock;
1547 * Intermediate values are in Hz.
1548 * Divide by MHz to match bsepc
1550 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1551 params->dco_fraction =
1552 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1553 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1557 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1559 struct skl_wrpll_params *wrpll_params)
1561 static const u64 dco_central_freq[3] = { 8400000000ULL,
1564 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1565 24, 28, 30, 32, 36, 40, 42, 44,
1566 48, 52, 54, 56, 60, 64, 66, 68,
1567 70, 72, 76, 78, 80, 84, 88, 90,
1569 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1570 static const struct {
1574 { even_dividers, ARRAY_SIZE(even_dividers) },
1575 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1577 struct skl_wrpll_context ctx = {
1578 .min_deviation = U64_MAX,
1580 unsigned int dco, d, i;
1581 unsigned int p0, p1, p2;
1582 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1584 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1585 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1586 for (i = 0; i < dividers[d].n_dividers; i++) {
1587 unsigned int p = dividers[d].list[i];
1588 u64 dco_freq = p * afe_clock;
1590 skl_wrpll_try_divider(&ctx,
1591 dco_central_freq[dco],
1595 * Skip the remaining dividers if we're sure to
1596 * have found the definitive divider, we can't
1597 * improve a 0 deviation.
1599 if (ctx.min_deviation == 0)
1600 goto skip_remaining_dividers;
1604 skip_remaining_dividers:
1606 * If a solution is found with an even divider, prefer
1609 if (d == 0 && ctx.p)
1617 * gcc incorrectly analyses that these can be used without being
1618 * initialized. To be fair, it's hard to guess.
1621 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1622 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1623 ctx.central_freq, p0, p1, p2);
1628 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1629 const struct intel_shared_dpll *pll,
1630 const struct intel_dpll_hw_state *pll_state)
1632 int ref_clock = i915->display.dpll.ref_clks.nssc;
1633 u32 p0, p1, p2, dco_freq;
1635 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1636 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1638 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1639 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1645 case DPLL_CFGCR2_PDIV_1:
1648 case DPLL_CFGCR2_PDIV_2:
1651 case DPLL_CFGCR2_PDIV_3:
1654 case DPLL_CFGCR2_PDIV_7_INVALID:
1656 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1657 * handling it the same way as PDIV_7.
1659 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1661 case DPLL_CFGCR2_PDIV_7:
1670 case DPLL_CFGCR2_KDIV_5:
1673 case DPLL_CFGCR2_KDIV_2:
1676 case DPLL_CFGCR2_KDIV_3:
1679 case DPLL_CFGCR2_KDIV_1:
1687 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1690 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1693 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1696 return dco_freq / (p0 * p1 * p2 * 5);
1699 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1701 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1702 struct skl_wrpll_params wrpll_params = {};
1703 u32 ctrl1, cfgcr1, cfgcr2;
1707 * See comment in intel_dpll_hw_state to understand why we always use 0
1708 * as the DPLL id in this function.
1710 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1712 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1714 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1715 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1719 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1720 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1721 wrpll_params.dco_integer;
1723 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1724 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1725 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1726 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1727 wrpll_params.central_freq;
1729 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1730 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1731 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1733 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1734 &crtc_state->dpll_hw_state);
1740 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1745 * See comment in intel_dpll_hw_state to understand why we always use 0
1746 * as the DPLL id in this function.
1748 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1749 switch (crtc_state->port_clock / 2) {
1751 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1754 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1757 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1761 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1764 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1767 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1771 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1776 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1777 const struct intel_shared_dpll *pll,
1778 const struct intel_dpll_hw_state *pll_state)
1782 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1783 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1784 case DPLL_CTRL1_LINK_RATE_810:
1787 case DPLL_CTRL1_LINK_RATE_1080:
1788 link_clock = 108000;
1790 case DPLL_CTRL1_LINK_RATE_1350:
1791 link_clock = 135000;
1793 case DPLL_CTRL1_LINK_RATE_1620:
1794 link_clock = 162000;
1796 case DPLL_CTRL1_LINK_RATE_2160:
1797 link_clock = 216000;
1799 case DPLL_CTRL1_LINK_RATE_2700:
1800 link_clock = 270000;
1803 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1807 return link_clock * 2;
1810 static int skl_compute_dpll(struct intel_atomic_state *state,
1811 struct intel_crtc *crtc,
1812 struct intel_encoder *encoder)
1814 struct intel_crtc_state *crtc_state =
1815 intel_atomic_get_new_crtc_state(state, crtc);
1817 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1818 return skl_ddi_hdmi_pll_dividers(crtc_state);
1819 else if (intel_crtc_has_dp_encoder(crtc_state))
1820 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1825 static int skl_get_dpll(struct intel_atomic_state *state,
1826 struct intel_crtc *crtc,
1827 struct intel_encoder *encoder)
1829 struct intel_crtc_state *crtc_state =
1830 intel_atomic_get_new_crtc_state(state, crtc);
1831 struct intel_shared_dpll *pll;
1833 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1834 pll = intel_find_shared_dpll(state, crtc,
1835 &crtc_state->dpll_hw_state,
1836 BIT(DPLL_ID_SKL_DPLL0));
1838 pll = intel_find_shared_dpll(state, crtc,
1839 &crtc_state->dpll_hw_state,
1840 BIT(DPLL_ID_SKL_DPLL3) |
1841 BIT(DPLL_ID_SKL_DPLL2) |
1842 BIT(DPLL_ID_SKL_DPLL1));
1846 intel_reference_shared_dpll(state, crtc,
1847 pll, &crtc_state->dpll_hw_state);
1849 crtc_state->shared_dpll = pll;
1854 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1855 const struct intel_shared_dpll *pll,
1856 const struct intel_dpll_hw_state *pll_state)
1859 * ctrl1 register is already shifted for each pll, just use 0 to get
1860 * the internal shift for each field
1862 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1863 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1865 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1868 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1871 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1874 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1875 const struct intel_dpll_hw_state *hw_state)
1877 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1878 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1884 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1885 .enable = skl_ddi_pll_enable,
1886 .disable = skl_ddi_pll_disable,
1887 .get_hw_state = skl_ddi_pll_get_hw_state,
1888 .get_freq = skl_ddi_pll_get_freq,
1891 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1892 .enable = skl_ddi_dpll0_enable,
1893 .disable = skl_ddi_dpll0_disable,
1894 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1895 .get_freq = skl_ddi_pll_get_freq,
1898 static const struct dpll_info skl_plls[] = {
1899 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1900 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1901 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1902 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1906 static const struct intel_dpll_mgr skl_pll_mgr = {
1907 .dpll_info = skl_plls,
1908 .compute_dplls = skl_compute_dpll,
1909 .get_dplls = skl_get_dpll,
1910 .put_dplls = intel_put_dpll,
1911 .update_ref_clks = skl_update_dpll_ref_clks,
1912 .dump_hw_state = skl_dump_hw_state,
1915 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1916 struct intel_shared_dpll *pll)
1919 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1921 enum dpio_channel ch;
1923 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1925 /* Non-SSC reference */
1926 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1928 if (IS_GEMINILAKE(dev_priv)) {
1929 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1930 0, PORT_PLL_POWER_ENABLE);
1932 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1933 PORT_PLL_POWER_STATE), 200))
1934 drm_err(&dev_priv->drm,
1935 "Power state not set for PLL:%d\n", port);
1938 /* Disable 10 bit clock */
1939 intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1940 PORT_PLL_10BIT_CLK_ENABLE, 0);
1943 intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1944 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1946 /* Write M2 integer */
1947 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1948 PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1951 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1952 PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1954 /* Write M2 fraction */
1955 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1956 PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1958 /* Write M2 fraction enable */
1959 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1960 PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1963 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1964 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1965 temp &= ~PORT_PLL_INT_COEFF_MASK;
1966 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1967 temp |= pll->state.hw_state.pll6;
1968 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1970 /* Write calibration val */
1971 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1972 PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1974 intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1975 PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1977 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1978 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1979 temp &= ~PORT_PLL_DCO_AMP_MASK;
1980 temp |= pll->state.hw_state.pll10;
1981 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1983 /* Recalibrate with new settings */
1984 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1985 temp |= PORT_PLL_RECALIBRATE;
1986 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1987 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1988 temp |= pll->state.hw_state.ebb4;
1989 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1992 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1993 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1995 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1997 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1999 if (IS_GEMINILAKE(dev_priv)) {
2000 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2001 temp |= DCC_DELAY_RANGE_2;
2002 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2006 * While we write to the group register to program all lanes at once we
2007 * can read only lane registers and we pick lanes 0/1 for that.
2009 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2010 temp &= ~LANE_STAGGER_MASK;
2011 temp &= ~LANESTAGGER_STRAP_OVRD;
2012 temp |= pll->state.hw_state.pcsdw12;
2013 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2016 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2017 struct intel_shared_dpll *pll)
2019 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2021 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2022 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2024 if (IS_GEMINILAKE(dev_priv)) {
2025 intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
2026 PORT_PLL_POWER_ENABLE, 0);
2028 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2029 PORT_PLL_POWER_STATE), 200))
2030 drm_err(&dev_priv->drm,
2031 "Power state not reset for PLL:%d\n", port);
2035 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2036 struct intel_shared_dpll *pll,
2037 struct intel_dpll_hw_state *hw_state)
2039 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2040 intel_wakeref_t wakeref;
2042 enum dpio_channel ch;
2046 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2048 wakeref = intel_display_power_get_if_enabled(dev_priv,
2049 POWER_DOMAIN_DISPLAY_CORE);
2055 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2056 if (!(val & PORT_PLL_ENABLE))
2059 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2060 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2062 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2063 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2065 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2066 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2068 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2069 hw_state->pll1 &= PORT_PLL_N_MASK;
2071 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2072 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2074 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2075 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2077 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2078 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2079 PORT_PLL_INT_COEFF_MASK |
2080 PORT_PLL_GAIN_CTL_MASK;
2082 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2083 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2085 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2086 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2088 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2089 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2090 PORT_PLL_DCO_AMP_MASK;
2093 * While we write to the group register to program all lanes at once we
2094 * can read only lane registers. We configure all lanes the same way, so
2095 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2097 hw_state->pcsdw12 = intel_de_read(dev_priv,
2098 BXT_PORT_PCS_DW12_LN01(phy, ch));
2099 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2100 drm_dbg(&dev_priv->drm,
2101 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2103 intel_de_read(dev_priv,
2104 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2105 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2110 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2115 /* pre-calculated values for DP linkrates */
2116 static const struct dpll bxt_dp_clk_val[] = {
2117 /* m2 is .22 binary fixed point */
2118 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2119 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2120 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2121 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2122 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2123 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2124 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2128 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2129 struct dpll *clk_div)
2131 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2133 /* Calculate HDMI div */
2135 * FIXME: tie the following calculation into
2136 * i9xx_crtc_compute_clock
2138 if (!bxt_find_best_dpll(crtc_state, clk_div))
2141 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2146 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2147 struct dpll *clk_div)
2149 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2152 *clk_div = bxt_dp_clk_val[0];
2153 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2154 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2155 *clk_div = bxt_dp_clk_val[i];
2160 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2162 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2163 clk_div->dot != crtc_state->port_clock);
2166 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2167 const struct dpll *clk_div)
2169 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2170 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2171 int clock = crtc_state->port_clock;
2172 int vco = clk_div->vco;
2173 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2176 if (vco >= 6200000 && vco <= 6700000) {
2181 } else if ((vco > 5400000 && vco < 6200000) ||
2182 (vco >= 4800000 && vco < 5400000)) {
2187 } else if (vco == 5400000) {
2193 drm_err(&i915->drm, "Invalid VCO\n");
2199 else if (clock > 135000)
2201 else if (clock > 67000)
2203 else if (clock > 33000)
2208 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2209 dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2210 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2211 dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2213 if (clk_div->m2 & 0x3fffff)
2214 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2216 dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2217 PORT_PLL_INT_COEFF(int_coef) |
2218 PORT_PLL_GAIN_CTL(gain_ctl);
2220 dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2222 dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2224 dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2225 PORT_PLL_DCO_AMP_OVR_EN_H;
2227 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2229 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2234 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2235 const struct intel_shared_dpll *pll,
2236 const struct intel_dpll_hw_state *pll_state)
2241 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2242 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2243 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2244 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2245 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2246 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2248 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2252 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2254 struct dpll clk_div = {};
2256 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2258 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2262 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2264 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2265 struct dpll clk_div = {};
2268 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2270 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2274 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2275 &crtc_state->dpll_hw_state);
2280 static int bxt_compute_dpll(struct intel_atomic_state *state,
2281 struct intel_crtc *crtc,
2282 struct intel_encoder *encoder)
2284 struct intel_crtc_state *crtc_state =
2285 intel_atomic_get_new_crtc_state(state, crtc);
2287 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2288 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2289 else if (intel_crtc_has_dp_encoder(crtc_state))
2290 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2295 static int bxt_get_dpll(struct intel_atomic_state *state,
2296 struct intel_crtc *crtc,
2297 struct intel_encoder *encoder)
2299 struct intel_crtc_state *crtc_state =
2300 intel_atomic_get_new_crtc_state(state, crtc);
2301 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2302 struct intel_shared_dpll *pll;
2303 enum intel_dpll_id id;
2305 /* 1:1 mapping between ports and PLLs */
2306 id = (enum intel_dpll_id) encoder->port;
2307 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2309 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2310 crtc->base.base.id, crtc->base.name, pll->info->name);
2312 intel_reference_shared_dpll(state, crtc,
2313 pll, &crtc_state->dpll_hw_state);
2315 crtc_state->shared_dpll = pll;
2320 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2322 i915->display.dpll.ref_clks.ssc = 100000;
2323 i915->display.dpll.ref_clks.nssc = 100000;
2324 /* DSI non-SSC ref 19.2MHz */
2327 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2328 const struct intel_dpll_hw_state *hw_state)
2330 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2331 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2332 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2346 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2347 .enable = bxt_ddi_pll_enable,
2348 .disable = bxt_ddi_pll_disable,
2349 .get_hw_state = bxt_ddi_pll_get_hw_state,
2350 .get_freq = bxt_ddi_pll_get_freq,
2353 static const struct dpll_info bxt_plls[] = {
2354 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2355 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2356 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2360 static const struct intel_dpll_mgr bxt_pll_mgr = {
2361 .dpll_info = bxt_plls,
2362 .compute_dplls = bxt_compute_dpll,
2363 .get_dplls = bxt_get_dpll,
2364 .put_dplls = intel_put_dpll,
2365 .update_ref_clks = bxt_update_dpll_ref_clks,
2366 .dump_hw_state = bxt_dump_hw_state,
2369 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2370 int *qdiv, int *kdiv)
2373 if (bestdiv % 2 == 0) {
2378 } else if (bestdiv % 4 == 0) {
2380 *qdiv = bestdiv / 4;
2382 } else if (bestdiv % 6 == 0) {
2384 *qdiv = bestdiv / 6;
2386 } else if (bestdiv % 5 == 0) {
2388 *qdiv = bestdiv / 10;
2390 } else if (bestdiv % 14 == 0) {
2392 *qdiv = bestdiv / 14;
2396 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2400 } else { /* 9, 15, 21 */
2401 *pdiv = bestdiv / 3;
2408 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2409 u32 dco_freq, u32 ref_freq,
2410 int pdiv, int qdiv, int kdiv)
2425 WARN(1, "Incorrect KDiv\n");
2442 WARN(1, "Incorrect PDiv\n");
2445 WARN_ON(kdiv != 2 && qdiv != 1);
2447 params->qdiv_ratio = qdiv;
2448 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2450 dco = div_u64((u64)dco_freq << 15, ref_freq);
2452 params->dco_integer = dco >> 15;
2453 params->dco_fraction = dco & 0x7fff;
2457 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2458 * Program half of the nominal DCO divider fraction value.
2461 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2463 return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2464 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2465 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2466 i915->display.dpll.ref_clks.nssc == 38400;
2469 struct icl_combo_pll_params {
2471 struct skl_wrpll_params wrpll;
2475 * These values alrea already adjusted: they're the bits we write to the
2476 * registers, not the logical values.
2478 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2480 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2481 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2484 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2487 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2489 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2490 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2492 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2493 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2495 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2496 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2498 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2499 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2501 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2502 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 /* Also used for 38.4 MHz values. */
2507 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2509 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2510 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2513 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2516 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2519 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2522 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2524 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2525 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2527 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2528 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2530 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2531 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2535 .dco_integer = 0x151, .dco_fraction = 0x4000,
2536 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2539 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2540 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2541 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2544 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2545 .dco_integer = 0x54, .dco_fraction = 0x3000,
2546 /* the following params are unused */
2547 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2550 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2551 .dco_integer = 0x43, .dco_fraction = 0x4000,
2552 /* the following params are unused */
2555 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2556 struct skl_wrpll_params *pll_params)
2558 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2559 const struct icl_combo_pll_params *params =
2560 dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2561 icl_dp_combo_pll_24MHz_values :
2562 icl_dp_combo_pll_19_2MHz_values;
2563 int clock = crtc_state->port_clock;
2566 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2567 if (clock == params[i].clock) {
2568 *pll_params = params[i].wrpll;
2573 MISSING_CASE(clock);
2577 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2578 struct skl_wrpll_params *pll_params)
2580 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2582 if (DISPLAY_VER(dev_priv) >= 12) {
2583 switch (dev_priv->display.dpll.ref_clks.nssc) {
2585 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2589 *pll_params = tgl_tbt_pll_19_2MHz_values;
2592 *pll_params = tgl_tbt_pll_24MHz_values;
2596 switch (dev_priv->display.dpll.ref_clks.nssc) {
2598 MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2602 *pll_params = icl_tbt_pll_19_2MHz_values;
2605 *pll_params = icl_tbt_pll_24MHz_values;
2613 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2614 const struct intel_shared_dpll *pll,
2615 const struct intel_dpll_hw_state *pll_state)
2618 * The PLL outputs multiple frequencies at the same time, selection is
2619 * made at DDI clock mux level.
2621 drm_WARN_ON(&i915->drm, 1);
2626 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2628 int ref_clock = i915->display.dpll.ref_clks.nssc;
2631 * For ICL+, the spec states: if reference frequency is 38.4,
2632 * use 19.2 because the DPLL automatically divides that by 2.
2634 if (ref_clock == 38400)
2641 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2642 struct skl_wrpll_params *wrpll_params)
2644 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2645 int ref_clock = icl_wrpll_ref_clock(i915);
2646 u32 afe_clock = crtc_state->port_clock * 5;
2647 u32 dco_min = 7998000;
2648 u32 dco_max = 10000000;
2649 u32 dco_mid = (dco_min + dco_max) / 2;
2650 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2651 18, 20, 24, 28, 30, 32, 36, 40,
2652 42, 44, 48, 50, 52, 54, 56, 60,
2653 64, 66, 68, 70, 72, 76, 78, 80,
2654 84, 88, 90, 92, 96, 98, 100, 102,
2655 3, 5, 7, 9, 15, 21 };
2656 u32 dco, best_dco = 0, dco_centrality = 0;
2657 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2658 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2660 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2661 dco = afe_clock * dividers[d];
2663 if (dco <= dco_max && dco >= dco_min) {
2664 dco_centrality = abs(dco - dco_mid);
2666 if (dco_centrality < best_dco_centrality) {
2667 best_dco_centrality = dco_centrality;
2668 best_div = dividers[d];
2677 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2678 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2684 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2685 const struct intel_shared_dpll *pll,
2686 const struct intel_dpll_hw_state *pll_state)
2688 int ref_clock = icl_wrpll_ref_clock(i915);
2690 u32 p0, p1, p2, dco_freq;
2692 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2693 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2695 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2696 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2697 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2702 case DPLL_CFGCR1_PDIV_2:
2705 case DPLL_CFGCR1_PDIV_3:
2708 case DPLL_CFGCR1_PDIV_5:
2711 case DPLL_CFGCR1_PDIV_7:
2717 case DPLL_CFGCR1_KDIV_1:
2720 case DPLL_CFGCR1_KDIV_2:
2723 case DPLL_CFGCR1_KDIV_3:
2728 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2731 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2732 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2734 if (ehl_combo_pll_div_frac_wa_needed(i915))
2737 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2739 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2742 return dco_freq / (p0 * p1 * p2 * 5);
2745 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2746 const struct skl_wrpll_params *pll_params,
2747 struct intel_dpll_hw_state *pll_state)
2749 u32 dco_fraction = pll_params->dco_fraction;
2751 if (ehl_combo_pll_div_frac_wa_needed(i915))
2752 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2754 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2755 pll_params->dco_integer;
2757 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2758 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2759 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2760 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2762 if (DISPLAY_VER(i915) >= 12)
2763 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2765 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2767 if (i915->display.vbt.override_afc_startup)
2768 pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2771 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2772 u32 *target_dco_khz,
2773 struct intel_dpll_hw_state *state,
2776 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2777 u32 dco_min_freq, dco_max_freq;
2781 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2782 dco_max_freq = is_dp ? 8100000 : 10000000;
2784 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2785 int div1 = div1_vals[i];
2787 for (div2 = 10; div2 > 0; div2--) {
2788 int dco = div1 * div2 * clock_khz * 5;
2789 int a_divratio, tlinedrv, inputsel;
2792 if (dco < dco_min_freq || dco > dco_max_freq)
2797 * Note: a_divratio not matching TGL BSpec
2798 * algorithm but matching hardcoded values and
2799 * working on HW for DP alt-mode at least
2801 a_divratio = is_dp ? 10 : 5;
2802 tlinedrv = is_dkl ? 1 : 2;
2807 inputsel = is_dp ? 0 : 1;
2814 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2817 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2820 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2823 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2827 *target_dco_khz = dco;
2829 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2831 state->mg_clktop2_coreclkctl1 =
2832 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2834 state->mg_clktop2_hsclkctl =
2835 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2836 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2838 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2848 * The specification for this function uses real numbers, so the math had to be
2849 * adapted to integer-only calculation, that's why it looks so different.
2851 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2852 struct intel_dpll_hw_state *pll_state)
2854 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2855 int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2856 int clock = crtc_state->port_clock;
2857 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2858 u32 iref_ndiv, iref_trim, iref_pulse_w;
2859 u32 prop_coeff, int_coeff;
2860 u32 tdc_targetcnt, feedfwgain;
2861 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2863 bool use_ssc = false;
2864 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2865 bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2868 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2874 m2div_int = dco_khz / (refclk_khz * m1div);
2875 if (m2div_int > 255) {
2878 m2div_int = dco_khz / (refclk_khz * m1div);
2881 if (m2div_int > 255)
2884 m2div_rem = dco_khz % (refclk_khz * m1div);
2886 tmp = (u64)m2div_rem * (1 << 22);
2887 do_div(tmp, refclk_khz * m1div);
2890 switch (refclk_khz) {
2907 MISSING_CASE(refclk_khz);
2912 * tdc_res = 0.000003
2913 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2915 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2916 * was supposed to be a division, but we rearranged the operations of
2917 * the formula to avoid early divisions so we don't multiply the
2920 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2921 * we also rearrange to work with integers.
2923 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2924 * last division by 10.
2926 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2929 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2930 * 32 bits. That's not a problem since we round the division down
2933 feedfwgain = (use_ssc || m2div_rem > 0) ?
2934 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2936 if (dco_khz >= 9000000) {
2945 tmp = mul_u32_u32(dco_khz, 47 * 32);
2946 do_div(tmp, refclk_khz * m1div * 10000);
2949 tmp = mul_u32_u32(dco_khz, 1000);
2950 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2957 /* write pll_state calculations */
2959 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2960 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2961 DKL_PLL_DIV0_FBPREDIV(m1div) |
2962 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2963 if (dev_priv->display.vbt.override_afc_startup) {
2964 u8 val = dev_priv->display.vbt.override_afc_startup_val;
2966 pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2969 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2970 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2972 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2973 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2974 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2975 (use_ssc ? DKL_PLL_SSC_EN : 0);
2977 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2978 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2980 pll_state->mg_pll_tdc_coldst_bias =
2981 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2982 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2985 pll_state->mg_pll_div0 =
2986 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2987 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2988 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2990 pll_state->mg_pll_div1 =
2991 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2992 MG_PLL_DIV1_DITHER_DIV_2 |
2993 MG_PLL_DIV1_NDIVRATIO(1) |
2994 MG_PLL_DIV1_FBPREDIV(m1div);
2996 pll_state->mg_pll_lf =
2997 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2998 MG_PLL_LF_AFCCNTSEL_512 |
2999 MG_PLL_LF_GAINCTRL(1) |
3000 MG_PLL_LF_INT_COEFF(int_coeff) |
3001 MG_PLL_LF_PROP_COEFF(prop_coeff);
3003 pll_state->mg_pll_frac_lock =
3004 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3005 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3006 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3007 MG_PLL_FRAC_LOCK_DCODITHEREN |
3008 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3009 if (use_ssc || m2div_rem > 0)
3010 pll_state->mg_pll_frac_lock |=
3011 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3013 pll_state->mg_pll_ssc =
3014 (use_ssc ? MG_PLL_SSC_EN : 0) |
3015 MG_PLL_SSC_TYPE(2) |
3016 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3017 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3019 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3021 pll_state->mg_pll_tdc_coldst_bias =
3022 MG_PLL_TDC_COLDST_COLDSTART |
3023 MG_PLL_TDC_COLDST_IREFINT_EN |
3024 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3025 MG_PLL_TDC_TDCOVCCORR_EN |
3026 MG_PLL_TDC_TDCSEL(3);
3028 pll_state->mg_pll_bias =
3029 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3030 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3031 MG_PLL_BIAS_BIAS_BONUS(10) |
3032 MG_PLL_BIAS_BIASCAL_EN |
3033 MG_PLL_BIAS_CTRIM(12) |
3034 MG_PLL_BIAS_VREF_RDAC(4) |
3035 MG_PLL_BIAS_IREFTRIM(iref_trim);
3037 if (refclk_khz == 38400) {
3038 pll_state->mg_pll_tdc_coldst_bias_mask =
3039 MG_PLL_TDC_COLDST_COLDSTART;
3040 pll_state->mg_pll_bias_mask = 0;
3042 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3043 pll_state->mg_pll_bias_mask = -1U;
3046 pll_state->mg_pll_tdc_coldst_bias &=
3047 pll_state->mg_pll_tdc_coldst_bias_mask;
3048 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3054 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3055 const struct intel_shared_dpll *pll,
3056 const struct intel_dpll_hw_state *pll_state)
3058 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3061 ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3063 if (DISPLAY_VER(dev_priv) >= 12) {
3064 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3065 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3066 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3068 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3069 m2_frac = pll_state->mg_pll_bias &
3070 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3071 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3076 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3077 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3079 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3080 m2_frac = pll_state->mg_pll_div0 &
3081 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3082 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3088 switch (pll_state->mg_clktop2_hsclkctl &
3089 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3090 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3093 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3096 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3099 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3103 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3107 div2 = (pll_state->mg_clktop2_hsclkctl &
3108 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3109 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3111 /* div2 value of 0 is same as 1 means no div */
3116 * Adjust the original formula to delay the division by 2^22 in order to
3117 * minimize possible rounding errors.
3119 tmp = (u64)m1 * m2_int * ref_clock +
3120 (((u64)m1 * m2_frac * ref_clock) >> 22);
3121 tmp = div_u64(tmp, 5 * div1 * div2);
3127 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3128 * @crtc_state: state for the CRTC to select the DPLL for
3129 * @port_dpll_id: the active @port_dpll_id to select
3131 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3134 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3135 enum icl_port_dpll_id port_dpll_id)
3137 struct icl_port_dpll *port_dpll =
3138 &crtc_state->icl_port_dplls[port_dpll_id];
3140 crtc_state->shared_dpll = port_dpll->pll;
3141 crtc_state->dpll_hw_state = port_dpll->hw_state;
3144 static void icl_update_active_dpll(struct intel_atomic_state *state,
3145 struct intel_crtc *crtc,
3146 struct intel_encoder *encoder)
3148 struct intel_crtc_state *crtc_state =
3149 intel_atomic_get_new_crtc_state(state, crtc);
3150 struct intel_digital_port *primary_port;
3151 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3153 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3154 enc_to_mst(encoder)->primary :
3155 enc_to_dig_port(encoder);
3158 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3159 intel_tc_port_in_legacy_mode(primary_port)))
3160 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3162 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3165 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3166 struct intel_crtc *crtc)
3168 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3169 struct intel_crtc_state *crtc_state =
3170 intel_atomic_get_new_crtc_state(state, crtc);
3171 struct icl_port_dpll *port_dpll =
3172 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3173 struct skl_wrpll_params pll_params = {};
3176 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3177 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3178 ret = icl_calc_wrpll(crtc_state, &pll_params);
3180 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3185 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3187 /* this is mainly for the fastset check */
3188 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3190 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3191 &port_dpll->hw_state);
3196 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3197 struct intel_crtc *crtc,
3198 struct intel_encoder *encoder)
3200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3201 struct intel_crtc_state *crtc_state =
3202 intel_atomic_get_new_crtc_state(state, crtc);
3203 struct icl_port_dpll *port_dpll =
3204 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3205 enum port port = encoder->port;
3206 unsigned long dpll_mask;
3208 if (IS_ALDERLAKE_S(dev_priv)) {
3210 BIT(DPLL_ID_DG1_DPLL3) |
3211 BIT(DPLL_ID_DG1_DPLL2) |
3212 BIT(DPLL_ID_ICL_DPLL1) |
3213 BIT(DPLL_ID_ICL_DPLL0);
3214 } else if (IS_DG1(dev_priv)) {
3215 if (port == PORT_D || port == PORT_E) {
3217 BIT(DPLL_ID_DG1_DPLL2) |
3218 BIT(DPLL_ID_DG1_DPLL3);
3221 BIT(DPLL_ID_DG1_DPLL0) |
3222 BIT(DPLL_ID_DG1_DPLL1);
3224 } else if (IS_ROCKETLAKE(dev_priv)) {
3226 BIT(DPLL_ID_EHL_DPLL4) |
3227 BIT(DPLL_ID_ICL_DPLL1) |
3228 BIT(DPLL_ID_ICL_DPLL0);
3229 } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3231 BIT(DPLL_ID_EHL_DPLL4) |
3232 BIT(DPLL_ID_ICL_DPLL1) |
3233 BIT(DPLL_ID_ICL_DPLL0);
3235 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3238 /* Eliminate DPLLs from consideration if reserved by HTI */
3239 dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3241 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3242 &port_dpll->hw_state,
3244 if (!port_dpll->pll)
3247 intel_reference_shared_dpll(state, crtc,
3248 port_dpll->pll, &port_dpll->hw_state);
3250 icl_update_active_dpll(state, crtc, encoder);
3255 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3256 struct intel_crtc *crtc)
3258 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3259 struct intel_crtc_state *crtc_state =
3260 intel_atomic_get_new_crtc_state(state, crtc);
3261 struct icl_port_dpll *port_dpll =
3262 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3263 struct skl_wrpll_params pll_params = {};
3266 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3267 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3271 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3273 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3274 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3278 /* this is mainly for the fastset check */
3279 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3281 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3282 &port_dpll->hw_state);
3287 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3288 struct intel_crtc *crtc,
3289 struct intel_encoder *encoder)
3291 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3292 struct intel_crtc_state *crtc_state =
3293 intel_atomic_get_new_crtc_state(state, crtc);
3294 struct icl_port_dpll *port_dpll =
3295 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3296 enum intel_dpll_id dpll_id;
3299 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3300 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3301 &port_dpll->hw_state,
3302 BIT(DPLL_ID_ICL_TBTPLL));
3303 if (!port_dpll->pll)
3305 intel_reference_shared_dpll(state, crtc,
3306 port_dpll->pll, &port_dpll->hw_state);
3309 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3310 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3312 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3313 &port_dpll->hw_state,
3315 if (!port_dpll->pll) {
3317 goto err_unreference_tbt_pll;
3319 intel_reference_shared_dpll(state, crtc,
3320 port_dpll->pll, &port_dpll->hw_state);
3322 icl_update_active_dpll(state, crtc, encoder);
3326 err_unreference_tbt_pll:
3327 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3328 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3333 static int icl_compute_dplls(struct intel_atomic_state *state,
3334 struct intel_crtc *crtc,
3335 struct intel_encoder *encoder)
3337 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3338 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3340 if (intel_phy_is_combo(dev_priv, phy))
3341 return icl_compute_combo_phy_dpll(state, crtc);
3342 else if (intel_phy_is_tc(dev_priv, phy))
3343 return icl_compute_tc_phy_dplls(state, crtc);
3350 static int icl_get_dplls(struct intel_atomic_state *state,
3351 struct intel_crtc *crtc,
3352 struct intel_encoder *encoder)
3354 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3355 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3357 if (intel_phy_is_combo(dev_priv, phy))
3358 return icl_get_combo_phy_dpll(state, crtc, encoder);
3359 else if (intel_phy_is_tc(dev_priv, phy))
3360 return icl_get_tc_phy_dplls(state, crtc, encoder);
3367 static void icl_put_dplls(struct intel_atomic_state *state,
3368 struct intel_crtc *crtc)
3370 const struct intel_crtc_state *old_crtc_state =
3371 intel_atomic_get_old_crtc_state(state, crtc);
3372 struct intel_crtc_state *new_crtc_state =
3373 intel_atomic_get_new_crtc_state(state, crtc);
3374 enum icl_port_dpll_id id;
3376 new_crtc_state->shared_dpll = NULL;
3378 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3379 const struct icl_port_dpll *old_port_dpll =
3380 &old_crtc_state->icl_port_dplls[id];
3381 struct icl_port_dpll *new_port_dpll =
3382 &new_crtc_state->icl_port_dplls[id];
3384 new_port_dpll->pll = NULL;
3386 if (!old_port_dpll->pll)
3389 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3393 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3394 struct intel_shared_dpll *pll,
3395 struct intel_dpll_hw_state *hw_state)
3397 const enum intel_dpll_id id = pll->info->id;
3398 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3399 intel_wakeref_t wakeref;
3403 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3405 wakeref = intel_display_power_get_if_enabled(dev_priv,
3406 POWER_DOMAIN_DISPLAY_CORE);
3410 val = intel_de_read(dev_priv, enable_reg);
3411 if (!(val & PLL_ENABLE))
3414 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3415 MG_REFCLKIN_CTL(tc_port));
3416 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3418 hw_state->mg_clktop2_coreclkctl1 =
3419 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3420 hw_state->mg_clktop2_coreclkctl1 &=
3421 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3423 hw_state->mg_clktop2_hsclkctl =
3424 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3425 hw_state->mg_clktop2_hsclkctl &=
3426 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3427 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3428 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3429 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3431 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3432 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3433 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3434 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3435 MG_PLL_FRAC_LOCK(tc_port));
3436 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3438 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3439 hw_state->mg_pll_tdc_coldst_bias =
3440 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3442 if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3443 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3444 hw_state->mg_pll_bias_mask = 0;
3446 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3447 hw_state->mg_pll_bias_mask = -1U;
3450 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3451 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3455 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3459 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3460 struct intel_shared_dpll *pll,
3461 struct intel_dpll_hw_state *hw_state)
3463 const enum intel_dpll_id id = pll->info->id;
3464 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3465 intel_wakeref_t wakeref;
3469 wakeref = intel_display_power_get_if_enabled(dev_priv,
3470 POWER_DOMAIN_DISPLAY_CORE);
3474 val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3475 if (!(val & PLL_ENABLE))
3479 * All registers read here have the same HIP_INDEX_REG even though
3480 * they are on different building blocks
3482 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3483 DKL_REFCLKIN_CTL(tc_port));
3484 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3486 hw_state->mg_clktop2_hsclkctl =
3487 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3488 hw_state->mg_clktop2_hsclkctl &=
3489 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3490 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3491 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3492 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3494 hw_state->mg_clktop2_coreclkctl1 =
3495 intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3496 hw_state->mg_clktop2_coreclkctl1 &=
3497 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3499 hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3500 val = DKL_PLL_DIV0_MASK;
3501 if (dev_priv->display.vbt.override_afc_startup)
3502 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3503 hw_state->mg_pll_div0 &= val;
3505 hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3506 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3507 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3509 hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3510 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3511 DKL_PLL_SSC_STEP_LEN_MASK |
3512 DKL_PLL_SSC_STEP_NUM_MASK |
3515 hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3516 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3517 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3519 hw_state->mg_pll_tdc_coldst_bias =
3520 intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3521 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3522 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3526 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3530 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3531 struct intel_shared_dpll *pll,
3532 struct intel_dpll_hw_state *hw_state,
3533 i915_reg_t enable_reg)
3535 const enum intel_dpll_id id = pll->info->id;
3536 intel_wakeref_t wakeref;
3540 wakeref = intel_display_power_get_if_enabled(dev_priv,
3541 POWER_DOMAIN_DISPLAY_CORE);
3545 val = intel_de_read(dev_priv, enable_reg);
3546 if (!(val & PLL_ENABLE))
3549 if (IS_ALDERLAKE_S(dev_priv)) {
3550 hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3551 hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3552 } else if (IS_DG1(dev_priv)) {
3553 hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3554 hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3555 } else if (IS_ROCKETLAKE(dev_priv)) {
3556 hw_state->cfgcr0 = intel_de_read(dev_priv,
3557 RKL_DPLL_CFGCR0(id));
3558 hw_state->cfgcr1 = intel_de_read(dev_priv,
3559 RKL_DPLL_CFGCR1(id));
3560 } else if (DISPLAY_VER(dev_priv) >= 12) {
3561 hw_state->cfgcr0 = intel_de_read(dev_priv,
3562 TGL_DPLL_CFGCR0(id));
3563 hw_state->cfgcr1 = intel_de_read(dev_priv,
3564 TGL_DPLL_CFGCR1(id));
3565 if (dev_priv->display.vbt.override_afc_startup) {
3566 hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3567 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3570 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3571 hw_state->cfgcr0 = intel_de_read(dev_priv,
3572 ICL_DPLL_CFGCR0(4));
3573 hw_state->cfgcr1 = intel_de_read(dev_priv,
3574 ICL_DPLL_CFGCR1(4));
3576 hw_state->cfgcr0 = intel_de_read(dev_priv,
3577 ICL_DPLL_CFGCR0(id));
3578 hw_state->cfgcr1 = intel_de_read(dev_priv,
3579 ICL_DPLL_CFGCR1(id));
3585 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3589 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3590 struct intel_shared_dpll *pll,
3591 struct intel_dpll_hw_state *hw_state)
3593 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3595 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3598 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3599 struct intel_shared_dpll *pll,
3600 struct intel_dpll_hw_state *hw_state)
3602 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3605 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3606 struct intel_shared_dpll *pll)
3608 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3609 const enum intel_dpll_id id = pll->info->id;
3610 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3612 if (IS_ALDERLAKE_S(dev_priv)) {
3613 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3614 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3615 } else if (IS_DG1(dev_priv)) {
3616 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3617 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3618 } else if (IS_ROCKETLAKE(dev_priv)) {
3619 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3620 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3621 } else if (DISPLAY_VER(dev_priv) >= 12) {
3622 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3623 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3624 div0_reg = TGL_DPLL0_DIV0(id);
3626 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3627 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3628 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3630 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3631 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3635 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3636 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3637 drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3638 !i915_mmio_reg_valid(div0_reg));
3639 if (dev_priv->display.vbt.override_afc_startup &&
3640 i915_mmio_reg_valid(div0_reg))
3641 intel_de_rmw(dev_priv, div0_reg,
3642 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3643 intel_de_posting_read(dev_priv, cfgcr1_reg);
3646 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3647 struct intel_shared_dpll *pll)
3649 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3650 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3653 * Some of the following registers have reserved fields, so program
3654 * these with RMW based on a mask. The mask can be fixed or generated
3655 * during the calc/readout phase if the mask depends on some other HW
3656 * state like refclk, see icl_calc_mg_pll_state().
3658 intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3659 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3661 intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3662 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3663 hw_state->mg_clktop2_coreclkctl1);
3665 intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3666 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3667 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3668 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3669 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3670 hw_state->mg_clktop2_hsclkctl);
3672 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3673 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3674 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3675 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3676 hw_state->mg_pll_frac_lock);
3677 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3679 intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3680 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3682 intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3683 hw_state->mg_pll_tdc_coldst_bias_mask,
3684 hw_state->mg_pll_tdc_coldst_bias);
3686 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3689 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3690 struct intel_shared_dpll *pll)
3692 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3693 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3697 * All registers programmed here have the same HIP_INDEX_REG even
3698 * though on different building block
3700 /* All the registers are RMW */
3701 val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3702 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3703 val |= hw_state->mg_refclkin_ctl;
3704 intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3706 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3707 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3708 val |= hw_state->mg_clktop2_coreclkctl1;
3709 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3711 val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3712 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3713 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3714 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3715 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3716 val |= hw_state->mg_clktop2_hsclkctl;
3717 intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3719 val = DKL_PLL_DIV0_MASK;
3720 if (dev_priv->display.vbt.override_afc_startup)
3721 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3722 intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3723 hw_state->mg_pll_div0);
3725 val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3726 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3727 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3728 val |= hw_state->mg_pll_div1;
3729 intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3731 val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3732 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3733 DKL_PLL_SSC_STEP_LEN_MASK |
3734 DKL_PLL_SSC_STEP_NUM_MASK |
3736 val |= hw_state->mg_pll_ssc;
3737 intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3739 val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3740 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3741 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3742 val |= hw_state->mg_pll_bias;
3743 intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3745 val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3746 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3747 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3748 val |= hw_state->mg_pll_tdc_coldst_bias;
3749 intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3751 intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3754 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3755 struct intel_shared_dpll *pll,
3756 i915_reg_t enable_reg)
3758 intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3761 * The spec says we need to "wait" but it also says it should be
3764 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3765 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3769 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3770 struct intel_shared_dpll *pll,
3771 i915_reg_t enable_reg)
3773 intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3775 /* Timeout is actually 600us. */
3776 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3777 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3780 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3784 if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3785 pll->info->id != DPLL_ID_ICL_DPLL0)
3788 * Wa_16011069516:adl-p[a0]
3790 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3791 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3792 * sanity check this assumption with a double read, which presumably
3793 * returns the correct value even with clock gating on.
3795 * Instead of the usual place for workarounds we apply this one here,
3796 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3798 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3799 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3800 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3801 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3804 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3805 struct intel_shared_dpll *pll)
3807 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3809 if (IS_JSL_EHL(dev_priv) &&
3810 pll->info->id == DPLL_ID_EHL_DPLL4) {
3813 * We need to disable DC states when this DPLL is enabled.
3814 * This can be done by taking a reference on DPLL4 power
3817 pll->wakeref = intel_display_power_get(dev_priv,
3818 POWER_DOMAIN_DC_OFF);
3821 icl_pll_power_enable(dev_priv, pll, enable_reg);
3823 icl_dpll_write(dev_priv, pll);
3826 * DVFS pre sequence would be here, but in our driver the cdclk code
3827 * paths should already be setting the appropriate voltage, hence we do
3831 icl_pll_enable(dev_priv, pll, enable_reg);
3833 adlp_cmtg_clock_gating_wa(dev_priv, pll);
3835 /* DVFS post sequence would be here. See the comment above. */
3838 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3839 struct intel_shared_dpll *pll)
3841 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3843 icl_dpll_write(dev_priv, pll);
3846 * DVFS pre sequence would be here, but in our driver the cdclk code
3847 * paths should already be setting the appropriate voltage, hence we do
3851 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3853 /* DVFS post sequence would be here. See the comment above. */
3856 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3857 struct intel_shared_dpll *pll)
3859 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3861 icl_pll_power_enable(dev_priv, pll, enable_reg);
3863 if (DISPLAY_VER(dev_priv) >= 12)
3864 dkl_pll_write(dev_priv, pll);
3866 icl_mg_pll_write(dev_priv, pll);
3869 * DVFS pre sequence would be here, but in our driver the cdclk code
3870 * paths should already be setting the appropriate voltage, hence we do
3874 icl_pll_enable(dev_priv, pll, enable_reg);
3876 /* DVFS post sequence would be here. See the comment above. */
3879 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3880 struct intel_shared_dpll *pll,
3881 i915_reg_t enable_reg)
3883 /* The first steps are done by intel_ddi_post_disable(). */
3886 * DVFS pre sequence would be here, but in our driver the cdclk code
3887 * paths should already be setting the appropriate voltage, hence we do
3891 intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3893 /* Timeout is actually 1us. */
3894 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3895 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3897 /* DVFS post sequence would be here. See the comment above. */
3899 intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3902 * The spec says we need to "wait" but it also says it should be
3905 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3906 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3910 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3911 struct intel_shared_dpll *pll)
3913 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3915 icl_pll_disable(dev_priv, pll, enable_reg);
3917 if (IS_JSL_EHL(dev_priv) &&
3918 pll->info->id == DPLL_ID_EHL_DPLL4)
3919 intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3923 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3924 struct intel_shared_dpll *pll)
3926 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3929 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3930 struct intel_shared_dpll *pll)
3932 i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3934 icl_pll_disable(dev_priv, pll, enable_reg);
3937 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3940 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3943 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3944 const struct intel_dpll_hw_state *hw_state)
3946 drm_dbg_kms(&dev_priv->drm,
3947 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3948 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3949 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3950 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3951 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3952 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3953 hw_state->cfgcr0, hw_state->cfgcr1,
3955 hw_state->mg_refclkin_ctl,
3956 hw_state->mg_clktop2_coreclkctl1,
3957 hw_state->mg_clktop2_hsclkctl,
3958 hw_state->mg_pll_div0,
3959 hw_state->mg_pll_div1,
3960 hw_state->mg_pll_lf,
3961 hw_state->mg_pll_frac_lock,
3962 hw_state->mg_pll_ssc,
3963 hw_state->mg_pll_bias,
3964 hw_state->mg_pll_tdc_coldst_bias);
3967 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3968 .enable = combo_pll_enable,
3969 .disable = combo_pll_disable,
3970 .get_hw_state = combo_pll_get_hw_state,
3971 .get_freq = icl_ddi_combo_pll_get_freq,
3974 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3975 .enable = tbt_pll_enable,
3976 .disable = tbt_pll_disable,
3977 .get_hw_state = tbt_pll_get_hw_state,
3978 .get_freq = icl_ddi_tbt_pll_get_freq,
3981 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3982 .enable = mg_pll_enable,
3983 .disable = mg_pll_disable,
3984 .get_hw_state = mg_pll_get_hw_state,
3985 .get_freq = icl_ddi_mg_pll_get_freq,
3988 static const struct dpll_info icl_plls[] = {
3989 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3990 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3991 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3992 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3993 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3994 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3995 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3999 static const struct intel_dpll_mgr icl_pll_mgr = {
4000 .dpll_info = icl_plls,
4001 .compute_dplls = icl_compute_dplls,
4002 .get_dplls = icl_get_dplls,
4003 .put_dplls = icl_put_dplls,
4004 .update_active_dpll = icl_update_active_dpll,
4005 .update_ref_clks = icl_update_dpll_ref_clks,
4006 .dump_hw_state = icl_dump_hw_state,
4009 static const struct dpll_info ehl_plls[] = {
4010 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4011 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4012 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4016 static const struct intel_dpll_mgr ehl_pll_mgr = {
4017 .dpll_info = ehl_plls,
4018 .compute_dplls = icl_compute_dplls,
4019 .get_dplls = icl_get_dplls,
4020 .put_dplls = icl_put_dplls,
4021 .update_ref_clks = icl_update_dpll_ref_clks,
4022 .dump_hw_state = icl_dump_hw_state,
4025 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4026 .enable = mg_pll_enable,
4027 .disable = mg_pll_disable,
4028 .get_hw_state = dkl_pll_get_hw_state,
4029 .get_freq = icl_ddi_mg_pll_get_freq,
4032 static const struct dpll_info tgl_plls[] = {
4033 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4034 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4035 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4036 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4037 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4038 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4039 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4040 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4041 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4045 static const struct intel_dpll_mgr tgl_pll_mgr = {
4046 .dpll_info = tgl_plls,
4047 .compute_dplls = icl_compute_dplls,
4048 .get_dplls = icl_get_dplls,
4049 .put_dplls = icl_put_dplls,
4050 .update_active_dpll = icl_update_active_dpll,
4051 .update_ref_clks = icl_update_dpll_ref_clks,
4052 .dump_hw_state = icl_dump_hw_state,
4055 static const struct dpll_info rkl_plls[] = {
4056 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4057 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4058 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4062 static const struct intel_dpll_mgr rkl_pll_mgr = {
4063 .dpll_info = rkl_plls,
4064 .compute_dplls = icl_compute_dplls,
4065 .get_dplls = icl_get_dplls,
4066 .put_dplls = icl_put_dplls,
4067 .update_ref_clks = icl_update_dpll_ref_clks,
4068 .dump_hw_state = icl_dump_hw_state,
4071 static const struct dpll_info dg1_plls[] = {
4072 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4073 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4074 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4075 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4079 static const struct intel_dpll_mgr dg1_pll_mgr = {
4080 .dpll_info = dg1_plls,
4081 .compute_dplls = icl_compute_dplls,
4082 .get_dplls = icl_get_dplls,
4083 .put_dplls = icl_put_dplls,
4084 .update_ref_clks = icl_update_dpll_ref_clks,
4085 .dump_hw_state = icl_dump_hw_state,
4088 static const struct dpll_info adls_plls[] = {
4089 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4090 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4091 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4092 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4096 static const struct intel_dpll_mgr adls_pll_mgr = {
4097 .dpll_info = adls_plls,
4098 .compute_dplls = icl_compute_dplls,
4099 .get_dplls = icl_get_dplls,
4100 .put_dplls = icl_put_dplls,
4101 .update_ref_clks = icl_update_dpll_ref_clks,
4102 .dump_hw_state = icl_dump_hw_state,
4105 static const struct dpll_info adlp_plls[] = {
4106 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4107 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4108 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4109 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4110 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4111 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4112 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4116 static const struct intel_dpll_mgr adlp_pll_mgr = {
4117 .dpll_info = adlp_plls,
4118 .compute_dplls = icl_compute_dplls,
4119 .get_dplls = icl_get_dplls,
4120 .put_dplls = icl_put_dplls,
4121 .update_active_dpll = icl_update_active_dpll,
4122 .update_ref_clks = icl_update_dpll_ref_clks,
4123 .dump_hw_state = icl_dump_hw_state,
4127 * intel_shared_dpll_init - Initialize shared DPLLs
4128 * @dev_priv: i915 device
4130 * Initialize shared DPLLs for @dev_priv.
4132 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4134 const struct intel_dpll_mgr *dpll_mgr = NULL;
4135 const struct dpll_info *dpll_info;
4138 mutex_init(&dev_priv->display.dpll.lock);
4140 if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
4141 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4143 else if (IS_ALDERLAKE_P(dev_priv))
4144 dpll_mgr = &adlp_pll_mgr;
4145 else if (IS_ALDERLAKE_S(dev_priv))
4146 dpll_mgr = &adls_pll_mgr;
4147 else if (IS_DG1(dev_priv))
4148 dpll_mgr = &dg1_pll_mgr;
4149 else if (IS_ROCKETLAKE(dev_priv))
4150 dpll_mgr = &rkl_pll_mgr;
4151 else if (DISPLAY_VER(dev_priv) >= 12)
4152 dpll_mgr = &tgl_pll_mgr;
4153 else if (IS_JSL_EHL(dev_priv))
4154 dpll_mgr = &ehl_pll_mgr;
4155 else if (DISPLAY_VER(dev_priv) >= 11)
4156 dpll_mgr = &icl_pll_mgr;
4157 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4158 dpll_mgr = &bxt_pll_mgr;
4159 else if (DISPLAY_VER(dev_priv) == 9)
4160 dpll_mgr = &skl_pll_mgr;
4161 else if (HAS_DDI(dev_priv))
4162 dpll_mgr = &hsw_pll_mgr;
4163 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4164 dpll_mgr = &pch_pll_mgr;
4167 dev_priv->display.dpll.num_shared_dpll = 0;
4171 dpll_info = dpll_mgr->dpll_info;
4173 for (i = 0; dpll_info[i].name; i++) {
4174 if (drm_WARN_ON(&dev_priv->drm,
4175 i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4178 drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4179 dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4182 dev_priv->display.dpll.mgr = dpll_mgr;
4183 dev_priv->display.dpll.num_shared_dpll = i;
4187 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4188 * @state: atomic state
4189 * @crtc: CRTC to compute DPLLs for
4192 * This function computes the DPLL state for the given CRTC and encoder.
4194 * The new configuration in the atomic commit @state is made effective by
4195 * calling intel_shared_dpll_swap_state().
4198 * 0 on success, negative error code on falure.
4200 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4201 struct intel_crtc *crtc,
4202 struct intel_encoder *encoder)
4204 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4205 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4207 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4210 return dpll_mgr->compute_dplls(state, crtc, encoder);
4214 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4215 * @state: atomic state
4216 * @crtc: CRTC to reserve DPLLs for
4219 * This function reserves all required DPLLs for the given CRTC and encoder
4220 * combination in the current atomic commit @state and the new @crtc atomic
4223 * The new configuration in the atomic commit @state is made effective by
4224 * calling intel_shared_dpll_swap_state().
4226 * The reserved DPLLs should be released by calling
4227 * intel_release_shared_dplls().
4230 * 0 if all required DPLLs were successfully reserved,
4231 * negative error code otherwise.
4233 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4234 struct intel_crtc *crtc,
4235 struct intel_encoder *encoder)
4237 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4238 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4240 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4243 return dpll_mgr->get_dplls(state, crtc, encoder);
4247 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4248 * @state: atomic state
4249 * @crtc: crtc from which the DPLLs are to be released
4251 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4252 * from the current atomic commit @state and the old @crtc atomic state.
4254 * The new configuration in the atomic commit @state is made effective by
4255 * calling intel_shared_dpll_swap_state().
4257 void intel_release_shared_dplls(struct intel_atomic_state *state,
4258 struct intel_crtc *crtc)
4260 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4261 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4264 * FIXME: this function is called for every platform having a
4265 * compute_clock hook, even though the platform doesn't yet support
4266 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4272 dpll_mgr->put_dplls(state, crtc);
4276 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4277 * @state: atomic state
4278 * @crtc: the CRTC for which to update the active DPLL
4279 * @encoder: encoder determining the type of port DPLL
4281 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4282 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4283 * DPLL selected will be based on the current mode of the encoder's port.
4285 void intel_update_active_dpll(struct intel_atomic_state *state,
4286 struct intel_crtc *crtc,
4287 struct intel_encoder *encoder)
4289 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4290 const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4292 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4295 dpll_mgr->update_active_dpll(state, crtc, encoder);
4299 * intel_dpll_get_freq - calculate the DPLL's output frequency
4300 * @i915: i915 device
4301 * @pll: DPLL for which to calculate the output frequency
4302 * @pll_state: DPLL state from which to calculate the output frequency
4304 * Return the output frequency corresponding to @pll's passed in @pll_state.
4306 int intel_dpll_get_freq(struct drm_i915_private *i915,
4307 const struct intel_shared_dpll *pll,
4308 const struct intel_dpll_hw_state *pll_state)
4310 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4313 return pll->info->funcs->get_freq(i915, pll, pll_state);
4317 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4318 * @i915: i915 device
4319 * @pll: DPLL for which to calculate the output frequency
4320 * @hw_state: DPLL's hardware state
4322 * Read out @pll's hardware state into @hw_state.
4324 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4325 struct intel_shared_dpll *pll,
4326 struct intel_dpll_hw_state *hw_state)
4328 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4331 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4332 struct intel_shared_dpll *pll)
4334 struct intel_crtc *crtc;
4336 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4338 if (IS_JSL_EHL(i915) && pll->on &&
4339 pll->info->id == DPLL_ID_EHL_DPLL4) {
4340 pll->wakeref = intel_display_power_get(i915,
4341 POWER_DOMAIN_DC_OFF);
4344 pll->state.pipe_mask = 0;
4345 for_each_intel_crtc(&i915->drm, crtc) {
4346 struct intel_crtc_state *crtc_state =
4347 to_intel_crtc_state(crtc->base.state);
4349 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4350 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4352 pll->active_mask = pll->state.pipe_mask;
4354 drm_dbg_kms(&i915->drm,
4355 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4356 pll->info->name, pll->state.pipe_mask, pll->on);
4359 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4361 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4362 i915->display.dpll.mgr->update_ref_clks(i915);
4365 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4369 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4370 readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4373 static void sanitize_dpll_state(struct drm_i915_private *i915,
4374 struct intel_shared_dpll *pll)
4379 adlp_cmtg_clock_gating_wa(i915, pll);
4381 if (pll->active_mask)
4384 drm_dbg_kms(&i915->drm,
4385 "%s enabled but not in use, disabling\n",
4388 pll->info->funcs->disable(i915, pll);
4392 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4396 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4397 sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4401 * intel_dpll_dump_hw_state - write hw_state to dmesg
4402 * @dev_priv: i915 drm device
4403 * @hw_state: hw state to be written to the log
4405 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4407 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4408 const struct intel_dpll_hw_state *hw_state)
4410 if (dev_priv->display.dpll.mgr) {
4411 dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4413 /* fallback for platforms that don't use the shared dpll
4416 drm_dbg_kms(&dev_priv->drm,
4417 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4418 "fp0: 0x%x, fp1: 0x%x\n",
4427 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4428 struct intel_shared_dpll *pll,
4429 struct intel_crtc *crtc,
4430 struct intel_crtc_state *new_crtc_state)
4432 struct intel_dpll_hw_state dpll_hw_state;
4436 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4438 drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4440 active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4442 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4443 I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask,
4444 "pll in active use but not on in sw tracking\n");
4445 I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask,
4446 "pll is on but not used by any active pipe\n");
4447 I915_STATE_WARN(dev_priv, pll->on != active,
4448 "pll on state mismatch (expected %i, found %i)\n",
4453 I915_STATE_WARN(dev_priv,
4454 pll->active_mask & ~pll->state.pipe_mask,
4455 "more active pll users than references: 0x%x vs 0x%x\n",
4456 pll->active_mask, pll->state.pipe_mask);
4461 pipe_mask = BIT(crtc->pipe);
4463 if (new_crtc_state->hw.active)
4464 I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask),
4465 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4466 pipe_name(crtc->pipe), pll->active_mask);
4468 I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4469 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4470 pipe_name(crtc->pipe), pll->active_mask);
4472 I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask),
4473 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4474 pipe_mask, pll->state.pipe_mask);
4476 I915_STATE_WARN(dev_priv,
4477 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4478 sizeof(dpll_hw_state)),
4479 "pll hw state mismatch\n");
4482 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4483 struct intel_crtc_state *old_crtc_state,
4484 struct intel_crtc_state *new_crtc_state)
4486 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4488 if (new_crtc_state->shared_dpll)
4489 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4490 crtc, new_crtc_state);
4492 if (old_crtc_state->shared_dpll &&
4493 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4494 u8 pipe_mask = BIT(crtc->pipe);
4495 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4497 I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4498 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4499 pipe_name(crtc->pipe), pll->active_mask);
4500 I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask,
4501 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4502 pipe_name(crtc->pipe), pll->state.pipe_mask);
4506 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4510 for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4511 verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],