2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
43 * Display PLLs used for driving outputs vary by platform. While some have
44 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45 * from a pool. In the latter scenario, it is possible that multiple pipes
46 * share a PLL if their configurations match.
48 * This file provides an abstraction over display PLLs. The function
49 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
50 * users of a PLL are tracked and that tracking is integrated with the atomic
51 * modset interface. During an atomic operation, required PLLs can be reserved
52 * for a given CRTC and encoder configuration by calling
53 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54 * with intel_release_shared_dplls().
55 * Changes to the users are first staged in the atomic state, and then made
56 * effective by calling intel_shared_dpll_swap_state() during the atomic
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
63 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 * the pll is not already enabled.
66 void (*enable)(struct drm_i915_private *i915,
67 struct intel_shared_dpll *pll);
70 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 * only when it is safe to disable the pll, i.e., there are no more
72 * tracked users for it.
74 void (*disable)(struct drm_i915_private *i915,
75 struct intel_shared_dpll *pll);
78 * Hook for reading the values currently programmed to the DPLL
79 * registers. This is used for initial hw state readout and state
80 * verification after a mode set.
82 bool (*get_hw_state)(struct drm_i915_private *i915,
83 struct intel_shared_dpll *pll,
84 struct intel_dpll_hw_state *hw_state);
87 * Hook for calculating the pll's output frequency based on its passed
90 int (*get_freq)(struct drm_i915_private *i915,
91 const struct intel_shared_dpll *pll,
92 const struct intel_dpll_hw_state *pll_state);
95 struct intel_dpll_mgr {
96 const struct dpll_info *dpll_info;
98 int (*compute_dplls)(struct intel_atomic_state *state,
99 struct intel_crtc *crtc,
100 struct intel_encoder *encoder);
101 int (*get_dplls)(struct intel_atomic_state *state,
102 struct intel_crtc *crtc,
103 struct intel_encoder *encoder);
104 void (*put_dplls)(struct intel_atomic_state *state,
105 struct intel_crtc *crtc);
106 void (*update_active_dpll)(struct intel_atomic_state *state,
107 struct intel_crtc *crtc,
108 struct intel_encoder *encoder);
109 void (*update_ref_clks)(struct drm_i915_private *i915);
110 void (*dump_hw_state)(struct drm_i915_private *i915,
111 const struct intel_dpll_hw_state *hw_state);
115 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
116 struct intel_shared_dpll_state *shared_dpll)
118 struct intel_shared_dpll *pll;
121 /* Copy shared dpll state */
122 for_each_shared_dpll(i915, pll, i)
123 shared_dpll[pll->index] = pll->state;
126 static struct intel_shared_dpll_state *
127 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
129 struct intel_atomic_state *state = to_intel_atomic_state(s);
131 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133 if (!state->dpll_set) {
134 state->dpll_set = true;
136 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
140 return state->shared_dpll;
144 * intel_get_shared_dpll_by_id - get a DPLL given its id
145 * @i915: i915 device instance
149 * A pointer to the DPLL with @id
151 struct intel_shared_dpll *
152 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
153 enum intel_dpll_id id)
155 struct intel_shared_dpll *pll;
158 for_each_shared_dpll(i915, pll, i) {
159 if (pll->info->id == id)
168 void assert_shared_dpll(struct drm_i915_private *i915,
169 struct intel_shared_dpll *pll,
173 struct intel_dpll_hw_state hw_state;
175 if (drm_WARN(&i915->drm, !pll,
176 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
179 cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
180 I915_STATE_WARN(i915, cur_state != state,
181 "%s assertion failure (expected %s, current %s)\n",
182 pll->info->name, str_on_off(state),
183 str_on_off(cur_state));
186 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
188 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
191 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
193 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
197 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
198 struct intel_shared_dpll *pll)
201 return DG1_DPLL_ENABLE(pll->info->id);
202 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
203 (pll->info->id == DPLL_ID_EHL_DPLL4))
204 return MG_PLL_ENABLE(0);
206 return ICL_DPLL_ENABLE(pll->info->id);
210 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
211 struct intel_shared_dpll *pll)
213 const enum intel_dpll_id id = pll->info->id;
214 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
216 if (IS_ALDERLAKE_P(i915))
217 return ADLP_PORTTC_PLL_ENABLE(tc_port);
219 return MG_PLL_ENABLE(tc_port);
223 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
224 * @crtc_state: CRTC, and its state, which has a shared DPLL
226 * Enable the shared DPLL used by @crtc.
228 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
230 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
231 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
232 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
233 unsigned int pipe_mask = BIT(crtc->pipe);
234 unsigned int old_mask;
236 if (drm_WARN_ON(&i915->drm, pll == NULL))
239 mutex_lock(&i915->display.dpll.lock);
240 old_mask = pll->active_mask;
242 if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
243 drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
246 pll->active_mask |= pipe_mask;
248 drm_dbg_kms(&i915->drm,
249 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
250 pll->info->name, pll->active_mask, pll->on,
251 crtc->base.base.id, crtc->base.name);
254 drm_WARN_ON(&i915->drm, !pll->on);
255 assert_shared_dpll_enabled(i915, pll);
258 drm_WARN_ON(&i915->drm, pll->on);
260 drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
261 pll->info->funcs->enable(i915, pll);
265 mutex_unlock(&i915->display.dpll.lock);
269 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
270 * @crtc_state: CRTC, and its state, which has a shared DPLL
272 * Disable the shared DPLL used by @crtc.
274 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
276 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
277 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
278 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
279 unsigned int pipe_mask = BIT(crtc->pipe);
281 /* PCH only available on ILK+ */
282 if (DISPLAY_VER(i915) < 5)
288 mutex_lock(&i915->display.dpll.lock);
289 if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
290 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
291 crtc->base.base.id, crtc->base.name))
294 drm_dbg_kms(&i915->drm,
295 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
296 pll->info->name, pll->active_mask, pll->on,
297 crtc->base.base.id, crtc->base.name);
299 assert_shared_dpll_enabled(i915, pll);
300 drm_WARN_ON(&i915->drm, !pll->on);
302 pll->active_mask &= ~pipe_mask;
303 if (pll->active_mask)
306 drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
307 pll->info->funcs->disable(i915, pll);
311 mutex_unlock(&i915->display.dpll.lock);
315 intel_dpll_mask_all(struct drm_i915_private *i915)
317 struct intel_shared_dpll *pll;
318 unsigned long dpll_mask = 0;
321 for_each_shared_dpll(i915, pll, i) {
322 drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
324 dpll_mask |= BIT(pll->info->id);
330 static struct intel_shared_dpll *
331 intel_find_shared_dpll(struct intel_atomic_state *state,
332 const struct intel_crtc *crtc,
333 const struct intel_dpll_hw_state *pll_state,
334 unsigned long dpll_mask)
336 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
337 unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
338 struct intel_shared_dpll_state *shared_dpll;
339 struct intel_shared_dpll *unused_pll = NULL;
340 enum intel_dpll_id id;
342 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
344 drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
346 for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
347 struct intel_shared_dpll *pll;
349 pll = intel_get_shared_dpll_by_id(i915, id);
353 /* Only want to check enabled timings first */
354 if (shared_dpll[pll->index].pipe_mask == 0) {
360 if (memcmp(pll_state,
361 &shared_dpll[pll->index].hw_state,
362 sizeof(*pll_state)) == 0) {
363 drm_dbg_kms(&i915->drm,
364 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
365 crtc->base.base.id, crtc->base.name,
367 shared_dpll[pll->index].pipe_mask,
373 /* Ok no matching timings, maybe there's a free one? */
375 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
376 crtc->base.base.id, crtc->base.name,
377 unused_pll->info->name);
385 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
386 * @crtc: CRTC on which behalf the reference is taken
387 * @pll: DPLL for which the reference is taken
388 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
390 * Take a reference for @pll tracking the use of it by @crtc.
393 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
394 const struct intel_shared_dpll *pll,
395 struct intel_shared_dpll_state *shared_dpll_state)
397 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
399 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
401 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
403 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
404 crtc->base.base.id, crtc->base.name, pll->info->name);
408 intel_reference_shared_dpll(struct intel_atomic_state *state,
409 const struct intel_crtc *crtc,
410 const struct intel_shared_dpll *pll,
411 const struct intel_dpll_hw_state *pll_state)
413 struct intel_shared_dpll_state *shared_dpll;
415 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
417 if (shared_dpll[pll->index].pipe_mask == 0)
418 shared_dpll[pll->index].hw_state = *pll_state;
420 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
424 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
425 * @crtc: CRTC on which behalf the reference is dropped
426 * @pll: DPLL for which the reference is dropped
427 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
429 * Drop a reference for @pll tracking the end of use of it by @crtc.
432 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
433 const struct intel_shared_dpll *pll,
434 struct intel_shared_dpll_state *shared_dpll_state)
436 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
438 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
440 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
442 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
443 crtc->base.base.id, crtc->base.name, pll->info->name);
446 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
447 const struct intel_crtc *crtc,
448 const struct intel_shared_dpll *pll)
450 struct intel_shared_dpll_state *shared_dpll;
452 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
454 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
457 static void intel_put_dpll(struct intel_atomic_state *state,
458 struct intel_crtc *crtc)
460 const struct intel_crtc_state *old_crtc_state =
461 intel_atomic_get_old_crtc_state(state, crtc);
462 struct intel_crtc_state *new_crtc_state =
463 intel_atomic_get_new_crtc_state(state, crtc);
465 new_crtc_state->shared_dpll = NULL;
467 if (!old_crtc_state->shared_dpll)
470 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
474 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
475 * @state: atomic state
477 * This is the dpll version of drm_atomic_helper_swap_state() since the
478 * helper does not handle driver-specific global state.
480 * For consistency with atomic helpers this function does a complete swap,
481 * i.e. it also puts the current state into @state, even though there is no
482 * need for that at this moment.
484 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
486 struct drm_i915_private *i915 = to_i915(state->base.dev);
487 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
488 struct intel_shared_dpll *pll;
491 if (!state->dpll_set)
494 for_each_shared_dpll(i915, pll, i)
495 swap(pll->state, shared_dpll[pll->index]);
498 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
499 struct intel_shared_dpll *pll,
500 struct intel_dpll_hw_state *hw_state)
502 const enum intel_dpll_id id = pll->info->id;
503 intel_wakeref_t wakeref;
506 wakeref = intel_display_power_get_if_enabled(i915,
507 POWER_DOMAIN_DISPLAY_CORE);
511 val = intel_de_read(i915, PCH_DPLL(id));
512 hw_state->dpll = val;
513 hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
514 hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
516 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
518 return val & DPLL_VCO_ENABLE;
521 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
526 val = intel_de_read(i915, PCH_DREF_CONTROL);
527 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
528 DREF_SUPERSPREAD_SOURCE_MASK));
529 I915_STATE_WARN(i915, !enabled,
530 "PCH refclk assertion failure, should be active but is disabled\n");
533 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
534 struct intel_shared_dpll *pll)
536 const enum intel_dpll_id id = pll->info->id;
538 /* PCH refclock must be enabled first */
539 ibx_assert_pch_refclk_enabled(i915);
541 intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
542 intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
544 intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
546 /* Wait for the clocks to stabilize. */
547 intel_de_posting_read(i915, PCH_DPLL(id));
550 /* The pixel multiplier can only be updated once the
551 * DPLL is enabled and the clocks are stable.
555 intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
556 intel_de_posting_read(i915, PCH_DPLL(id));
560 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
561 struct intel_shared_dpll *pll)
563 const enum intel_dpll_id id = pll->info->id;
565 intel_de_write(i915, PCH_DPLL(id), 0);
566 intel_de_posting_read(i915, PCH_DPLL(id));
570 static int ibx_compute_dpll(struct intel_atomic_state *state,
571 struct intel_crtc *crtc,
572 struct intel_encoder *encoder)
577 static int ibx_get_dpll(struct intel_atomic_state *state,
578 struct intel_crtc *crtc,
579 struct intel_encoder *encoder)
581 struct intel_crtc_state *crtc_state =
582 intel_atomic_get_new_crtc_state(state, crtc);
583 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
584 struct intel_shared_dpll *pll;
585 enum intel_dpll_id id;
587 if (HAS_PCH_IBX(i915)) {
588 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
589 id = (enum intel_dpll_id) crtc->pipe;
590 pll = intel_get_shared_dpll_by_id(i915, id);
592 drm_dbg_kms(&i915->drm,
593 "[CRTC:%d:%s] using pre-allocated %s\n",
594 crtc->base.base.id, crtc->base.name,
597 pll = intel_find_shared_dpll(state, crtc,
598 &crtc_state->dpll_hw_state,
599 BIT(DPLL_ID_PCH_PLL_B) |
600 BIT(DPLL_ID_PCH_PLL_A));
606 /* reference the pll */
607 intel_reference_shared_dpll(state, crtc,
608 pll, &crtc_state->dpll_hw_state);
610 crtc_state->shared_dpll = pll;
615 static void ibx_dump_hw_state(struct drm_i915_private *i915,
616 const struct intel_dpll_hw_state *hw_state)
618 drm_dbg_kms(&i915->drm,
619 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
620 "fp0: 0x%x, fp1: 0x%x\n",
627 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
628 .enable = ibx_pch_dpll_enable,
629 .disable = ibx_pch_dpll_disable,
630 .get_hw_state = ibx_pch_dpll_get_hw_state,
633 static const struct dpll_info pch_plls[] = {
634 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
635 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
639 static const struct intel_dpll_mgr pch_pll_mgr = {
640 .dpll_info = pch_plls,
641 .compute_dplls = ibx_compute_dpll,
642 .get_dplls = ibx_get_dpll,
643 .put_dplls = intel_put_dpll,
644 .dump_hw_state = ibx_dump_hw_state,
647 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
648 struct intel_shared_dpll *pll)
650 const enum intel_dpll_id id = pll->info->id;
652 intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
653 intel_de_posting_read(i915, WRPLL_CTL(id));
657 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
658 struct intel_shared_dpll *pll)
660 intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
661 intel_de_posting_read(i915, SPLL_CTL);
665 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
666 struct intel_shared_dpll *pll)
668 const enum intel_dpll_id id = pll->info->id;
670 intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
671 intel_de_posting_read(i915, WRPLL_CTL(id));
674 * Try to set up the PCH reference clock once all DPLLs
675 * that depend on it have been shut down.
677 if (i915->display.dpll.pch_ssc_use & BIT(id))
678 intel_init_pch_refclk(i915);
681 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
682 struct intel_shared_dpll *pll)
684 enum intel_dpll_id id = pll->info->id;
686 intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
687 intel_de_posting_read(i915, SPLL_CTL);
690 * Try to set up the PCH reference clock once all DPLLs
691 * that depend on it have been shut down.
693 if (i915->display.dpll.pch_ssc_use & BIT(id))
694 intel_init_pch_refclk(i915);
697 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
698 struct intel_shared_dpll *pll,
699 struct intel_dpll_hw_state *hw_state)
701 const enum intel_dpll_id id = pll->info->id;
702 intel_wakeref_t wakeref;
705 wakeref = intel_display_power_get_if_enabled(i915,
706 POWER_DOMAIN_DISPLAY_CORE);
710 val = intel_de_read(i915, WRPLL_CTL(id));
711 hw_state->wrpll = val;
713 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
715 return val & WRPLL_PLL_ENABLE;
718 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
719 struct intel_shared_dpll *pll,
720 struct intel_dpll_hw_state *hw_state)
722 intel_wakeref_t wakeref;
725 wakeref = intel_display_power_get_if_enabled(i915,
726 POWER_DOMAIN_DISPLAY_CORE);
730 val = intel_de_read(i915, SPLL_CTL);
731 hw_state->spll = val;
733 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
735 return val & SPLL_PLL_ENABLE;
739 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
745 /* Constraints for PLL good behavior */
751 struct hsw_wrpll_rnp {
755 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
819 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
820 unsigned int r2, unsigned int n2,
822 struct hsw_wrpll_rnp *best)
824 u64 a, b, c, d, diff, diff_best;
826 /* No best (r,n,p) yet */
835 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
839 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
842 * and we would like delta <= budget.
844 * If the discrepancy is above the PPM-based budget, always prefer to
845 * improve upon the previous solution. However, if you're within the
846 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
848 a = freq2k * budget * p * r2;
849 b = freq2k * budget * best->p * best->r2;
850 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
851 diff_best = abs_diff(freq2k * best->p * best->r2,
852 LC_FREQ_2K * best->n2);
854 d = 1000000 * diff_best;
856 if (a < c && b < d) {
857 /* If both are above the budget, pick the closer */
858 if (best->p * best->r2 * diff < p * r2 * diff_best) {
863 } else if (a >= c && b < d) {
864 /* If A is below the threshold but B is above it? Update. */
868 } else if (a >= c && b >= d) {
869 /* Both are below the limit, so pick the higher n2/(r2*r2) */
870 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
876 /* Otherwise a < c && b >= d, do nothing */
880 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
881 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
885 struct hsw_wrpll_rnp best = {};
888 freq2k = clock / 100;
890 budget = hsw_wrpll_get_budget_for_freq(clock);
892 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
893 * and directly pass the LC PLL to it. */
894 if (freq2k == 5400000) {
902 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
905 * We want R so that REF_MIN <= Ref <= REF_MAX.
906 * Injecting R2 = 2 * R gives:
907 * REF_MAX * r2 > LC_FREQ * 2 and
908 * REF_MIN * r2 < LC_FREQ * 2
910 * Which means the desired boundaries for r2 are:
911 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
914 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
915 r2 <= LC_FREQ * 2 / REF_MIN;
919 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
921 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
922 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
923 * VCO_MAX * r2 > n2 * LC_FREQ and
924 * VCO_MIN * r2 < n2 * LC_FREQ)
926 * Which means the desired boundaries for n2 are:
927 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
929 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
930 n2 <= VCO_MAX * r2 / LC_FREQ;
933 for (p = P_MIN; p <= P_MAX; p += P_INC)
934 hsw_wrpll_update_rnp(freq2k, budget,
944 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
945 const struct intel_shared_dpll *pll,
946 const struct intel_dpll_hw_state *pll_state)
950 u32 wrpll = pll_state->wrpll;
952 switch (wrpll & WRPLL_REF_MASK) {
953 case WRPLL_REF_SPECIAL_HSW:
954 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
955 if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
956 refclk = i915->display.dpll.ref_clks.nssc;
960 case WRPLL_REF_PCH_SSC:
962 * We could calculate spread here, but our checking
963 * code only cares about 5% accuracy, and spread is a max of
966 refclk = i915->display.dpll.ref_clks.ssc;
968 case WRPLL_REF_LCPLL:
976 r = wrpll & WRPLL_DIVIDER_REF_MASK;
977 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
978 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
980 /* Convert to KHz, p & r have a fixed point portion */
981 return (refclk * n / 10) / (p * r) * 2;
985 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
986 struct intel_crtc *crtc)
988 struct drm_i915_private *i915 = to_i915(state->base.dev);
989 struct intel_crtc_state *crtc_state =
990 intel_atomic_get_new_crtc_state(state, crtc);
991 unsigned int p, n2, r2;
993 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
995 crtc_state->dpll_hw_state.wrpll =
996 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
997 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
998 WRPLL_DIVIDER_POST(p);
1000 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1001 &crtc_state->dpll_hw_state);
1006 static struct intel_shared_dpll *
1007 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1008 struct intel_crtc *crtc)
1010 struct intel_crtc_state *crtc_state =
1011 intel_atomic_get_new_crtc_state(state, crtc);
1013 return intel_find_shared_dpll(state, crtc,
1014 &crtc_state->dpll_hw_state,
1015 BIT(DPLL_ID_WRPLL2) |
1016 BIT(DPLL_ID_WRPLL1));
1020 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1022 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1023 int clock = crtc_state->port_clock;
1025 switch (clock / 2) {
1031 drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1037 static struct intel_shared_dpll *
1038 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1040 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1041 struct intel_shared_dpll *pll;
1042 enum intel_dpll_id pll_id;
1043 int clock = crtc_state->port_clock;
1045 switch (clock / 2) {
1047 pll_id = DPLL_ID_LCPLL_810;
1050 pll_id = DPLL_ID_LCPLL_1350;
1053 pll_id = DPLL_ID_LCPLL_2700;
1056 MISSING_CASE(clock / 2);
1060 pll = intel_get_shared_dpll_by_id(i915, pll_id);
1068 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1069 const struct intel_shared_dpll *pll,
1070 const struct intel_dpll_hw_state *pll_state)
1074 switch (pll->info->id) {
1075 case DPLL_ID_LCPLL_810:
1078 case DPLL_ID_LCPLL_1350:
1079 link_clock = 135000;
1081 case DPLL_ID_LCPLL_2700:
1082 link_clock = 270000;
1085 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1089 return link_clock * 2;
1093 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1094 struct intel_crtc *crtc)
1096 struct intel_crtc_state *crtc_state =
1097 intel_atomic_get_new_crtc_state(state, crtc);
1099 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1102 crtc_state->dpll_hw_state.spll =
1103 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1108 static struct intel_shared_dpll *
1109 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1110 struct intel_crtc *crtc)
1112 struct intel_crtc_state *crtc_state =
1113 intel_atomic_get_new_crtc_state(state, crtc);
1115 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1119 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1120 const struct intel_shared_dpll *pll,
1121 const struct intel_dpll_hw_state *pll_state)
1125 switch (pll_state->spll & SPLL_FREQ_MASK) {
1126 case SPLL_FREQ_810MHz:
1129 case SPLL_FREQ_1350MHz:
1130 link_clock = 135000;
1132 case SPLL_FREQ_2700MHz:
1133 link_clock = 270000;
1136 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1140 return link_clock * 2;
1143 static int hsw_compute_dpll(struct intel_atomic_state *state,
1144 struct intel_crtc *crtc,
1145 struct intel_encoder *encoder)
1147 struct intel_crtc_state *crtc_state =
1148 intel_atomic_get_new_crtc_state(state, crtc);
1150 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1151 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1152 else if (intel_crtc_has_dp_encoder(crtc_state))
1153 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1154 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1155 return hsw_ddi_spll_compute_dpll(state, crtc);
1160 static int hsw_get_dpll(struct intel_atomic_state *state,
1161 struct intel_crtc *crtc,
1162 struct intel_encoder *encoder)
1164 struct intel_crtc_state *crtc_state =
1165 intel_atomic_get_new_crtc_state(state, crtc);
1166 struct intel_shared_dpll *pll = NULL;
1168 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1169 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1170 else if (intel_crtc_has_dp_encoder(crtc_state))
1171 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1172 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1173 pll = hsw_ddi_spll_get_dpll(state, crtc);
1178 intel_reference_shared_dpll(state, crtc,
1179 pll, &crtc_state->dpll_hw_state);
1181 crtc_state->shared_dpll = pll;
1186 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1188 i915->display.dpll.ref_clks.ssc = 135000;
1189 /* Non-SSC is only used on non-ULT HSW. */
1190 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1191 i915->display.dpll.ref_clks.nssc = 24000;
1193 i915->display.dpll.ref_clks.nssc = 135000;
1196 static void hsw_dump_hw_state(struct drm_i915_private *i915,
1197 const struct intel_dpll_hw_state *hw_state)
1199 drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1200 hw_state->wrpll, hw_state->spll);
1203 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1204 .enable = hsw_ddi_wrpll_enable,
1205 .disable = hsw_ddi_wrpll_disable,
1206 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1207 .get_freq = hsw_ddi_wrpll_get_freq,
1210 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1211 .enable = hsw_ddi_spll_enable,
1212 .disable = hsw_ddi_spll_disable,
1213 .get_hw_state = hsw_ddi_spll_get_hw_state,
1214 .get_freq = hsw_ddi_spll_get_freq,
1217 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1218 struct intel_shared_dpll *pll)
1222 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1223 struct intel_shared_dpll *pll)
1227 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1228 struct intel_shared_dpll *pll,
1229 struct intel_dpll_hw_state *hw_state)
1234 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1235 .enable = hsw_ddi_lcpll_enable,
1236 .disable = hsw_ddi_lcpll_disable,
1237 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1238 .get_freq = hsw_ddi_lcpll_get_freq,
1241 static const struct dpll_info hsw_plls[] = {
1242 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1243 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1244 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1245 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1246 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1247 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1251 static const struct intel_dpll_mgr hsw_pll_mgr = {
1252 .dpll_info = hsw_plls,
1253 .compute_dplls = hsw_compute_dpll,
1254 .get_dplls = hsw_get_dpll,
1255 .put_dplls = intel_put_dpll,
1256 .update_ref_clks = hsw_update_dpll_ref_clks,
1257 .dump_hw_state = hsw_dump_hw_state,
1260 struct skl_dpll_regs {
1261 i915_reg_t ctl, cfgcr1, cfgcr2;
1264 /* this array is indexed by the *shared* pll id */
1265 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1269 /* DPLL 0 doesn't support HDMI mode */
1274 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1275 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1279 .ctl = WRPLL_CTL(0),
1280 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1281 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1285 .ctl = WRPLL_CTL(1),
1286 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1287 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1291 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1292 struct intel_shared_dpll *pll)
1294 const enum intel_dpll_id id = pll->info->id;
1296 intel_de_rmw(i915, DPLL_CTRL1,
1297 DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1298 pll->state.hw_state.ctrl1 << (id * 6));
1299 intel_de_posting_read(i915, DPLL_CTRL1);
1302 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1303 struct intel_shared_dpll *pll)
1305 const struct skl_dpll_regs *regs = skl_dpll_regs;
1306 const enum intel_dpll_id id = pll->info->id;
1308 skl_ddi_pll_write_ctrl1(i915, pll);
1310 intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1311 intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1312 intel_de_posting_read(i915, regs[id].cfgcr1);
1313 intel_de_posting_read(i915, regs[id].cfgcr2);
1315 /* the enable bit is always bit 31 */
1316 intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1318 if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1319 drm_err(&i915->drm, "DPLL %d not locked\n", id);
1322 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1323 struct intel_shared_dpll *pll)
1325 skl_ddi_pll_write_ctrl1(i915, pll);
1328 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1329 struct intel_shared_dpll *pll)
1331 const struct skl_dpll_regs *regs = skl_dpll_regs;
1332 const enum intel_dpll_id id = pll->info->id;
1334 /* the enable bit is always bit 31 */
1335 intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1336 intel_de_posting_read(i915, regs[id].ctl);
1339 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1340 struct intel_shared_dpll *pll)
1344 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1345 struct intel_shared_dpll *pll,
1346 struct intel_dpll_hw_state *hw_state)
1349 const struct skl_dpll_regs *regs = skl_dpll_regs;
1350 const enum intel_dpll_id id = pll->info->id;
1351 intel_wakeref_t wakeref;
1354 wakeref = intel_display_power_get_if_enabled(i915,
1355 POWER_DOMAIN_DISPLAY_CORE);
1361 val = intel_de_read(i915, regs[id].ctl);
1362 if (!(val & LCPLL_PLL_ENABLE))
1365 val = intel_de_read(i915, DPLL_CTRL1);
1366 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1368 /* avoid reading back stale values if HDMI mode is not enabled */
1369 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1370 hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1371 hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1376 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1381 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1382 struct intel_shared_dpll *pll,
1383 struct intel_dpll_hw_state *hw_state)
1385 const struct skl_dpll_regs *regs = skl_dpll_regs;
1386 const enum intel_dpll_id id = pll->info->id;
1387 intel_wakeref_t wakeref;
1391 wakeref = intel_display_power_get_if_enabled(i915,
1392 POWER_DOMAIN_DISPLAY_CORE);
1398 /* DPLL0 is always enabled since it drives CDCLK */
1399 val = intel_de_read(i915, regs[id].ctl);
1400 if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1403 val = intel_de_read(i915, DPLL_CTRL1);
1404 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1409 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1414 struct skl_wrpll_context {
1415 u64 min_deviation; /* current minimal deviation */
1416 u64 central_freq; /* chosen central freq */
1417 u64 dco_freq; /* chosen dco freq */
1418 unsigned int p; /* chosen divider */
1421 /* DCO freq must be within +1%/-6% of the DCO central freq */
1422 #define SKL_DCO_MAX_PDEVIATION 100
1423 #define SKL_DCO_MAX_NDEVIATION 600
1425 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1428 unsigned int divider)
1432 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1435 /* positive deviation */
1436 if (dco_freq >= central_freq) {
1437 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1438 deviation < ctx->min_deviation) {
1439 ctx->min_deviation = deviation;
1440 ctx->central_freq = central_freq;
1441 ctx->dco_freq = dco_freq;
1444 /* negative deviation */
1445 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1446 deviation < ctx->min_deviation) {
1447 ctx->min_deviation = deviation;
1448 ctx->central_freq = central_freq;
1449 ctx->dco_freq = dco_freq;
1454 static void skl_wrpll_get_multipliers(unsigned int p,
1455 unsigned int *p0 /* out */,
1456 unsigned int *p1 /* out */,
1457 unsigned int *p2 /* out */)
1461 unsigned int half = p / 2;
1463 if (half == 1 || half == 2 || half == 3 || half == 5) {
1467 } else if (half % 2 == 0) {
1471 } else if (half % 3 == 0) {
1475 } else if (half % 7 == 0) {
1480 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1484 } else if (p == 5 || p == 7) {
1488 } else if (p == 15) {
1492 } else if (p == 21) {
1496 } else if (p == 35) {
1503 struct skl_wrpll_params {
1513 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1517 u32 p0, u32 p1, u32 p2)
1521 switch (central_freq) {
1523 params->central_freq = 0;
1526 params->central_freq = 1;
1529 params->central_freq = 3;
1546 WARN(1, "Incorrect PDiv\n");
1563 WARN(1, "Incorrect KDiv\n");
1566 params->qdiv_ratio = p1;
1567 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1569 dco_freq = p0 * p1 * p2 * afe_clock;
1572 * Intermediate values are in Hz.
1573 * Divide by MHz to match bsepc
1575 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1576 params->dco_fraction =
1577 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1578 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1582 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1584 struct skl_wrpll_params *wrpll_params)
1586 static const u64 dco_central_freq[3] = { 8400000000ULL,
1589 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1590 24, 28, 30, 32, 36, 40, 42, 44,
1591 48, 52, 54, 56, 60, 64, 66, 68,
1592 70, 72, 76, 78, 80, 84, 88, 90,
1594 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1595 static const struct {
1599 { even_dividers, ARRAY_SIZE(even_dividers) },
1600 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1602 struct skl_wrpll_context ctx = {
1603 .min_deviation = U64_MAX,
1605 unsigned int dco, d, i;
1606 unsigned int p0, p1, p2;
1607 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1609 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1610 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1611 for (i = 0; i < dividers[d].n_dividers; i++) {
1612 unsigned int p = dividers[d].list[i];
1613 u64 dco_freq = p * afe_clock;
1615 skl_wrpll_try_divider(&ctx,
1616 dco_central_freq[dco],
1620 * Skip the remaining dividers if we're sure to
1621 * have found the definitive divider, we can't
1622 * improve a 0 deviation.
1624 if (ctx.min_deviation == 0)
1625 goto skip_remaining_dividers;
1629 skip_remaining_dividers:
1631 * If a solution is found with an even divider, prefer
1634 if (d == 0 && ctx.p)
1642 * gcc incorrectly analyses that these can be used without being
1643 * initialized. To be fair, it's hard to guess.
1646 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1647 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1648 ctx.central_freq, p0, p1, p2);
1653 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1654 const struct intel_shared_dpll *pll,
1655 const struct intel_dpll_hw_state *pll_state)
1657 int ref_clock = i915->display.dpll.ref_clks.nssc;
1658 u32 p0, p1, p2, dco_freq;
1660 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1661 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1663 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1664 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1670 case DPLL_CFGCR2_PDIV_1:
1673 case DPLL_CFGCR2_PDIV_2:
1676 case DPLL_CFGCR2_PDIV_3:
1679 case DPLL_CFGCR2_PDIV_7_INVALID:
1681 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1682 * handling it the same way as PDIV_7.
1684 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1686 case DPLL_CFGCR2_PDIV_7:
1695 case DPLL_CFGCR2_KDIV_5:
1698 case DPLL_CFGCR2_KDIV_2:
1701 case DPLL_CFGCR2_KDIV_3:
1704 case DPLL_CFGCR2_KDIV_1:
1712 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1715 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1718 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1721 return dco_freq / (p0 * p1 * p2 * 5);
1724 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1726 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1727 struct skl_wrpll_params wrpll_params = {};
1728 u32 ctrl1, cfgcr1, cfgcr2;
1732 * See comment in intel_dpll_hw_state to understand why we always use 0
1733 * as the DPLL id in this function.
1735 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1737 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1739 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1740 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1744 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1745 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1746 wrpll_params.dco_integer;
1748 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1749 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1750 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1751 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1752 wrpll_params.central_freq;
1754 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1755 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1756 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1758 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1759 &crtc_state->dpll_hw_state);
1765 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1770 * See comment in intel_dpll_hw_state to understand why we always use 0
1771 * as the DPLL id in this function.
1773 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1774 switch (crtc_state->port_clock / 2) {
1776 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1779 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1782 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1786 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1789 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1792 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1796 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1801 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1802 const struct intel_shared_dpll *pll,
1803 const struct intel_dpll_hw_state *pll_state)
1807 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1808 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1809 case DPLL_CTRL1_LINK_RATE_810:
1812 case DPLL_CTRL1_LINK_RATE_1080:
1813 link_clock = 108000;
1815 case DPLL_CTRL1_LINK_RATE_1350:
1816 link_clock = 135000;
1818 case DPLL_CTRL1_LINK_RATE_1620:
1819 link_clock = 162000;
1821 case DPLL_CTRL1_LINK_RATE_2160:
1822 link_clock = 216000;
1824 case DPLL_CTRL1_LINK_RATE_2700:
1825 link_clock = 270000;
1828 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1832 return link_clock * 2;
1835 static int skl_compute_dpll(struct intel_atomic_state *state,
1836 struct intel_crtc *crtc,
1837 struct intel_encoder *encoder)
1839 struct intel_crtc_state *crtc_state =
1840 intel_atomic_get_new_crtc_state(state, crtc);
1842 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1843 return skl_ddi_hdmi_pll_dividers(crtc_state);
1844 else if (intel_crtc_has_dp_encoder(crtc_state))
1845 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1850 static int skl_get_dpll(struct intel_atomic_state *state,
1851 struct intel_crtc *crtc,
1852 struct intel_encoder *encoder)
1854 struct intel_crtc_state *crtc_state =
1855 intel_atomic_get_new_crtc_state(state, crtc);
1856 struct intel_shared_dpll *pll;
1858 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1859 pll = intel_find_shared_dpll(state, crtc,
1860 &crtc_state->dpll_hw_state,
1861 BIT(DPLL_ID_SKL_DPLL0));
1863 pll = intel_find_shared_dpll(state, crtc,
1864 &crtc_state->dpll_hw_state,
1865 BIT(DPLL_ID_SKL_DPLL3) |
1866 BIT(DPLL_ID_SKL_DPLL2) |
1867 BIT(DPLL_ID_SKL_DPLL1));
1871 intel_reference_shared_dpll(state, crtc,
1872 pll, &crtc_state->dpll_hw_state);
1874 crtc_state->shared_dpll = pll;
1879 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1880 const struct intel_shared_dpll *pll,
1881 const struct intel_dpll_hw_state *pll_state)
1884 * ctrl1 register is already shifted for each pll, just use 0 to get
1885 * the internal shift for each field
1887 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1888 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1890 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1893 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1896 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1899 static void skl_dump_hw_state(struct drm_i915_private *i915,
1900 const struct intel_dpll_hw_state *hw_state)
1902 drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1903 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1909 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1910 .enable = skl_ddi_pll_enable,
1911 .disable = skl_ddi_pll_disable,
1912 .get_hw_state = skl_ddi_pll_get_hw_state,
1913 .get_freq = skl_ddi_pll_get_freq,
1916 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1917 .enable = skl_ddi_dpll0_enable,
1918 .disable = skl_ddi_dpll0_disable,
1919 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1920 .get_freq = skl_ddi_pll_get_freq,
1923 static const struct dpll_info skl_plls[] = {
1924 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1925 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1926 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1927 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1931 static const struct intel_dpll_mgr skl_pll_mgr = {
1932 .dpll_info = skl_plls,
1933 .compute_dplls = skl_compute_dpll,
1934 .get_dplls = skl_get_dpll,
1935 .put_dplls = intel_put_dpll,
1936 .update_ref_clks = skl_update_dpll_ref_clks,
1937 .dump_hw_state = skl_dump_hw_state,
1940 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1941 struct intel_shared_dpll *pll)
1944 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1946 enum dpio_channel ch;
1948 bxt_port_to_phy_channel(i915, port, &phy, &ch);
1950 /* Non-SSC reference */
1951 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1953 if (IS_GEMINILAKE(i915)) {
1954 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
1955 0, PORT_PLL_POWER_ENABLE);
1957 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
1958 PORT_PLL_POWER_STATE), 200))
1960 "Power state not set for PLL:%d\n", port);
1963 /* Disable 10 bit clock */
1964 intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
1965 PORT_PLL_10BIT_CLK_ENABLE, 0);
1968 intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
1969 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1971 /* Write M2 integer */
1972 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
1973 PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1976 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
1977 PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1979 /* Write M2 fraction */
1980 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
1981 PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1983 /* Write M2 fraction enable */
1984 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
1985 PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1988 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
1989 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1990 temp &= ~PORT_PLL_INT_COEFF_MASK;
1991 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1992 temp |= pll->state.hw_state.pll6;
1993 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
1995 /* Write calibration val */
1996 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
1997 PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1999 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2000 PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2002 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2003 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2004 temp &= ~PORT_PLL_DCO_AMP_MASK;
2005 temp |= pll->state.hw_state.pll10;
2006 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2008 /* Recalibrate with new settings */
2009 temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2010 temp |= PORT_PLL_RECALIBRATE;
2011 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2012 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2013 temp |= pll->state.hw_state.ebb4;
2014 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2017 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2018 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2020 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2022 drm_err(&i915->drm, "PLL %d not locked\n", port);
2024 if (IS_GEMINILAKE(i915)) {
2025 temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2026 temp |= DCC_DELAY_RANGE_2;
2027 intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2031 * While we write to the group register to program all lanes at once we
2032 * can read only lane registers and we pick lanes 0/1 for that.
2034 temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2035 temp &= ~LANE_STAGGER_MASK;
2036 temp &= ~LANESTAGGER_STRAP_OVRD;
2037 temp |= pll->state.hw_state.pcsdw12;
2038 intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2041 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2042 struct intel_shared_dpll *pll)
2044 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2046 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2047 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2049 if (IS_GEMINILAKE(i915)) {
2050 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2051 PORT_PLL_POWER_ENABLE, 0);
2053 if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2054 PORT_PLL_POWER_STATE), 200))
2056 "Power state not reset for PLL:%d\n", port);
2060 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2061 struct intel_shared_dpll *pll,
2062 struct intel_dpll_hw_state *hw_state)
2064 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2065 intel_wakeref_t wakeref;
2067 enum dpio_channel ch;
2071 bxt_port_to_phy_channel(i915, port, &phy, &ch);
2073 wakeref = intel_display_power_get_if_enabled(i915,
2074 POWER_DOMAIN_DISPLAY_CORE);
2080 val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2081 if (!(val & PORT_PLL_ENABLE))
2084 hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2085 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2087 hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2088 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2090 hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2091 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2093 hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2094 hw_state->pll1 &= PORT_PLL_N_MASK;
2096 hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2097 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2099 hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2100 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2102 hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2103 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2104 PORT_PLL_INT_COEFF_MASK |
2105 PORT_PLL_GAIN_CTL_MASK;
2107 hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2108 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2110 hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2111 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2113 hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2114 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2115 PORT_PLL_DCO_AMP_MASK;
2118 * While we write to the group register to program all lanes at once we
2119 * can read only lane registers. We configure all lanes the same way, so
2120 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2122 hw_state->pcsdw12 = intel_de_read(i915,
2123 BXT_PORT_PCS_DW12_LN01(phy, ch));
2124 if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2126 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2129 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2130 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2135 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2140 /* pre-calculated values for DP linkrates */
2141 static const struct dpll bxt_dp_clk_val[] = {
2142 /* m2 is .22 binary fixed point */
2143 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2144 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2145 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2146 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2147 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2148 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2149 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2153 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2154 struct dpll *clk_div)
2156 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2158 /* Calculate HDMI div */
2160 * FIXME: tie the following calculation into
2161 * i9xx_crtc_compute_clock
2163 if (!bxt_find_best_dpll(crtc_state, clk_div))
2166 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2171 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2172 struct dpll *clk_div)
2174 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2177 *clk_div = bxt_dp_clk_val[0];
2178 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2179 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2180 *clk_div = bxt_dp_clk_val[i];
2185 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2187 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2188 clk_div->dot != crtc_state->port_clock);
2191 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2192 const struct dpll *clk_div)
2194 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2195 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2196 int clock = crtc_state->port_clock;
2197 int vco = clk_div->vco;
2198 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2201 if (vco >= 6200000 && vco <= 6700000) {
2206 } else if ((vco > 5400000 && vco < 6200000) ||
2207 (vco >= 4800000 && vco < 5400000)) {
2212 } else if (vco == 5400000) {
2218 drm_err(&i915->drm, "Invalid VCO\n");
2224 else if (clock > 135000)
2226 else if (clock > 67000)
2228 else if (clock > 33000)
2233 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2234 dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2235 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2236 dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2238 if (clk_div->m2 & 0x3fffff)
2239 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2241 dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2242 PORT_PLL_INT_COEFF(int_coef) |
2243 PORT_PLL_GAIN_CTL(gain_ctl);
2245 dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2247 dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2249 dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2250 PORT_PLL_DCO_AMP_OVR_EN_H;
2252 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2254 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2259 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2260 const struct intel_shared_dpll *pll,
2261 const struct intel_dpll_hw_state *pll_state)
2266 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2267 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2268 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2269 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2270 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2271 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2273 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2277 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2279 struct dpll clk_div = {};
2281 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2283 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2287 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2289 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2290 struct dpll clk_div = {};
2293 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2295 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2299 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2300 &crtc_state->dpll_hw_state);
2305 static int bxt_compute_dpll(struct intel_atomic_state *state,
2306 struct intel_crtc *crtc,
2307 struct intel_encoder *encoder)
2309 struct intel_crtc_state *crtc_state =
2310 intel_atomic_get_new_crtc_state(state, crtc);
2312 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2313 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2314 else if (intel_crtc_has_dp_encoder(crtc_state))
2315 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2320 static int bxt_get_dpll(struct intel_atomic_state *state,
2321 struct intel_crtc *crtc,
2322 struct intel_encoder *encoder)
2324 struct intel_crtc_state *crtc_state =
2325 intel_atomic_get_new_crtc_state(state, crtc);
2326 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2327 struct intel_shared_dpll *pll;
2328 enum intel_dpll_id id;
2330 /* 1:1 mapping between ports and PLLs */
2331 id = (enum intel_dpll_id) encoder->port;
2332 pll = intel_get_shared_dpll_by_id(i915, id);
2334 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2335 crtc->base.base.id, crtc->base.name, pll->info->name);
2337 intel_reference_shared_dpll(state, crtc,
2338 pll, &crtc_state->dpll_hw_state);
2340 crtc_state->shared_dpll = pll;
2345 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2347 i915->display.dpll.ref_clks.ssc = 100000;
2348 i915->display.dpll.ref_clks.nssc = 100000;
2349 /* DSI non-SSC ref 19.2MHz */
2352 static void bxt_dump_hw_state(struct drm_i915_private *i915,
2353 const struct intel_dpll_hw_state *hw_state)
2355 drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2356 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2357 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2371 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2372 .enable = bxt_ddi_pll_enable,
2373 .disable = bxt_ddi_pll_disable,
2374 .get_hw_state = bxt_ddi_pll_get_hw_state,
2375 .get_freq = bxt_ddi_pll_get_freq,
2378 static const struct dpll_info bxt_plls[] = {
2379 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2380 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2381 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2385 static const struct intel_dpll_mgr bxt_pll_mgr = {
2386 .dpll_info = bxt_plls,
2387 .compute_dplls = bxt_compute_dpll,
2388 .get_dplls = bxt_get_dpll,
2389 .put_dplls = intel_put_dpll,
2390 .update_ref_clks = bxt_update_dpll_ref_clks,
2391 .dump_hw_state = bxt_dump_hw_state,
2394 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2395 int *qdiv, int *kdiv)
2398 if (bestdiv % 2 == 0) {
2403 } else if (bestdiv % 4 == 0) {
2405 *qdiv = bestdiv / 4;
2407 } else if (bestdiv % 6 == 0) {
2409 *qdiv = bestdiv / 6;
2411 } else if (bestdiv % 5 == 0) {
2413 *qdiv = bestdiv / 10;
2415 } else if (bestdiv % 14 == 0) {
2417 *qdiv = bestdiv / 14;
2421 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2425 } else { /* 9, 15, 21 */
2426 *pdiv = bestdiv / 3;
2433 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2434 u32 dco_freq, u32 ref_freq,
2435 int pdiv, int qdiv, int kdiv)
2450 WARN(1, "Incorrect KDiv\n");
2467 WARN(1, "Incorrect PDiv\n");
2470 WARN_ON(kdiv != 2 && qdiv != 1);
2472 params->qdiv_ratio = qdiv;
2473 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2475 dco = div_u64((u64)dco_freq << 15, ref_freq);
2477 params->dco_integer = dco >> 15;
2478 params->dco_fraction = dco & 0x7fff;
2482 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2483 * Program half of the nominal DCO divider fraction value.
2486 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2488 return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2489 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2490 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2491 i915->display.dpll.ref_clks.nssc == 38400;
2494 struct icl_combo_pll_params {
2496 struct skl_wrpll_params wrpll;
2500 * These values alrea already adjusted: they're the bits we write to the
2501 * registers, not the logical values.
2503 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2505 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2506 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2509 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2512 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2515 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2518 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2520 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2521 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2524 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2526 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2527 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 /* Also used for 38.4 MHz values. */
2532 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2534 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2535 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2537 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2538 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2540 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2541 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2543 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2544 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2546 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2547 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2549 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2550 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2552 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2553 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2555 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2556 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2559 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2560 .dco_integer = 0x151, .dco_fraction = 0x4000,
2561 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2564 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2565 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2566 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2569 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2570 .dco_integer = 0x54, .dco_fraction = 0x3000,
2571 /* the following params are unused */
2572 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2575 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2576 .dco_integer = 0x43, .dco_fraction = 0x4000,
2577 /* the following params are unused */
2580 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2581 struct skl_wrpll_params *pll_params)
2583 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2584 const struct icl_combo_pll_params *params =
2585 i915->display.dpll.ref_clks.nssc == 24000 ?
2586 icl_dp_combo_pll_24MHz_values :
2587 icl_dp_combo_pll_19_2MHz_values;
2588 int clock = crtc_state->port_clock;
2591 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2592 if (clock == params[i].clock) {
2593 *pll_params = params[i].wrpll;
2598 MISSING_CASE(clock);
2602 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2603 struct skl_wrpll_params *pll_params)
2605 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2607 if (DISPLAY_VER(i915) >= 12) {
2608 switch (i915->display.dpll.ref_clks.nssc) {
2610 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2614 *pll_params = tgl_tbt_pll_19_2MHz_values;
2617 *pll_params = tgl_tbt_pll_24MHz_values;
2621 switch (i915->display.dpll.ref_clks.nssc) {
2623 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2627 *pll_params = icl_tbt_pll_19_2MHz_values;
2630 *pll_params = icl_tbt_pll_24MHz_values;
2638 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2639 const struct intel_shared_dpll *pll,
2640 const struct intel_dpll_hw_state *pll_state)
2643 * The PLL outputs multiple frequencies at the same time, selection is
2644 * made at DDI clock mux level.
2646 drm_WARN_ON(&i915->drm, 1);
2651 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2653 int ref_clock = i915->display.dpll.ref_clks.nssc;
2656 * For ICL+, the spec states: if reference frequency is 38.4,
2657 * use 19.2 because the DPLL automatically divides that by 2.
2659 if (ref_clock == 38400)
2666 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2667 struct skl_wrpll_params *wrpll_params)
2669 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2670 int ref_clock = icl_wrpll_ref_clock(i915);
2671 u32 afe_clock = crtc_state->port_clock * 5;
2672 u32 dco_min = 7998000;
2673 u32 dco_max = 10000000;
2674 u32 dco_mid = (dco_min + dco_max) / 2;
2675 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2676 18, 20, 24, 28, 30, 32, 36, 40,
2677 42, 44, 48, 50, 52, 54, 56, 60,
2678 64, 66, 68, 70, 72, 76, 78, 80,
2679 84, 88, 90, 92, 96, 98, 100, 102,
2680 3, 5, 7, 9, 15, 21 };
2681 u32 dco, best_dco = 0, dco_centrality = 0;
2682 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2683 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2685 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2686 dco = afe_clock * dividers[d];
2688 if (dco <= dco_max && dco >= dco_min) {
2689 dco_centrality = abs(dco - dco_mid);
2691 if (dco_centrality < best_dco_centrality) {
2692 best_dco_centrality = dco_centrality;
2693 best_div = dividers[d];
2702 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2703 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2709 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2710 const struct intel_shared_dpll *pll,
2711 const struct intel_dpll_hw_state *pll_state)
2713 int ref_clock = icl_wrpll_ref_clock(i915);
2715 u32 p0, p1, p2, dco_freq;
2717 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2718 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2720 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2721 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2722 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2727 case DPLL_CFGCR1_PDIV_2:
2730 case DPLL_CFGCR1_PDIV_3:
2733 case DPLL_CFGCR1_PDIV_5:
2736 case DPLL_CFGCR1_PDIV_7:
2742 case DPLL_CFGCR1_KDIV_1:
2745 case DPLL_CFGCR1_KDIV_2:
2748 case DPLL_CFGCR1_KDIV_3:
2753 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2756 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2757 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2759 if (ehl_combo_pll_div_frac_wa_needed(i915))
2762 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2764 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2767 return dco_freq / (p0 * p1 * p2 * 5);
2770 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2771 const struct skl_wrpll_params *pll_params,
2772 struct intel_dpll_hw_state *pll_state)
2774 u32 dco_fraction = pll_params->dco_fraction;
2776 if (ehl_combo_pll_div_frac_wa_needed(i915))
2777 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2779 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2780 pll_params->dco_integer;
2782 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2783 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2784 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2785 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2787 if (DISPLAY_VER(i915) >= 12)
2788 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2790 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2792 if (i915->display.vbt.override_afc_startup)
2793 pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2796 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2797 u32 *target_dco_khz,
2798 struct intel_dpll_hw_state *state,
2801 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2802 u32 dco_min_freq, dco_max_freq;
2806 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2807 dco_max_freq = is_dp ? 8100000 : 10000000;
2809 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2810 int div1 = div1_vals[i];
2812 for (div2 = 10; div2 > 0; div2--) {
2813 int dco = div1 * div2 * clock_khz * 5;
2814 int a_divratio, tlinedrv, inputsel;
2817 if (dco < dco_min_freq || dco > dco_max_freq)
2822 * Note: a_divratio not matching TGL BSpec
2823 * algorithm but matching hardcoded values and
2824 * working on HW for DP alt-mode at least
2826 a_divratio = is_dp ? 10 : 5;
2827 tlinedrv = is_dkl ? 1 : 2;
2832 inputsel = is_dp ? 0 : 1;
2839 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2842 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2845 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2848 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2852 *target_dco_khz = dco;
2854 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2856 state->mg_clktop2_coreclkctl1 =
2857 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2859 state->mg_clktop2_hsclkctl =
2860 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2861 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2863 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2873 * The specification for this function uses real numbers, so the math had to be
2874 * adapted to integer-only calculation, that's why it looks so different.
2876 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2877 struct intel_dpll_hw_state *pll_state)
2879 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2880 int refclk_khz = i915->display.dpll.ref_clks.nssc;
2881 int clock = crtc_state->port_clock;
2882 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2883 u32 iref_ndiv, iref_trim, iref_pulse_w;
2884 u32 prop_coeff, int_coeff;
2885 u32 tdc_targetcnt, feedfwgain;
2886 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2888 bool use_ssc = false;
2889 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2890 bool is_dkl = DISPLAY_VER(i915) >= 12;
2893 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2899 m2div_int = dco_khz / (refclk_khz * m1div);
2900 if (m2div_int > 255) {
2903 m2div_int = dco_khz / (refclk_khz * m1div);
2906 if (m2div_int > 255)
2909 m2div_rem = dco_khz % (refclk_khz * m1div);
2911 tmp = (u64)m2div_rem * (1 << 22);
2912 do_div(tmp, refclk_khz * m1div);
2915 switch (refclk_khz) {
2932 MISSING_CASE(refclk_khz);
2937 * tdc_res = 0.000003
2938 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2940 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2941 * was supposed to be a division, but we rearranged the operations of
2942 * the formula to avoid early divisions so we don't multiply the
2945 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2946 * we also rearrange to work with integers.
2948 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2949 * last division by 10.
2951 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2954 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2955 * 32 bits. That's not a problem since we round the division down
2958 feedfwgain = (use_ssc || m2div_rem > 0) ?
2959 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2961 if (dco_khz >= 9000000) {
2970 tmp = mul_u32_u32(dco_khz, 47 * 32);
2971 do_div(tmp, refclk_khz * m1div * 10000);
2974 tmp = mul_u32_u32(dco_khz, 1000);
2975 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2982 /* write pll_state calculations */
2984 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2985 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2986 DKL_PLL_DIV0_FBPREDIV(m1div) |
2987 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2988 if (i915->display.vbt.override_afc_startup) {
2989 u8 val = i915->display.vbt.override_afc_startup_val;
2991 pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2994 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2995 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2997 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2998 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2999 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3000 (use_ssc ? DKL_PLL_SSC_EN : 0);
3002 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3003 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3005 pll_state->mg_pll_tdc_coldst_bias =
3006 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3007 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3010 pll_state->mg_pll_div0 =
3011 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3012 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3013 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3015 pll_state->mg_pll_div1 =
3016 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3017 MG_PLL_DIV1_DITHER_DIV_2 |
3018 MG_PLL_DIV1_NDIVRATIO(1) |
3019 MG_PLL_DIV1_FBPREDIV(m1div);
3021 pll_state->mg_pll_lf =
3022 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3023 MG_PLL_LF_AFCCNTSEL_512 |
3024 MG_PLL_LF_GAINCTRL(1) |
3025 MG_PLL_LF_INT_COEFF(int_coeff) |
3026 MG_PLL_LF_PROP_COEFF(prop_coeff);
3028 pll_state->mg_pll_frac_lock =
3029 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3030 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3031 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3032 MG_PLL_FRAC_LOCK_DCODITHEREN |
3033 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3034 if (use_ssc || m2div_rem > 0)
3035 pll_state->mg_pll_frac_lock |=
3036 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3038 pll_state->mg_pll_ssc =
3039 (use_ssc ? MG_PLL_SSC_EN : 0) |
3040 MG_PLL_SSC_TYPE(2) |
3041 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3042 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3044 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3046 pll_state->mg_pll_tdc_coldst_bias =
3047 MG_PLL_TDC_COLDST_COLDSTART |
3048 MG_PLL_TDC_COLDST_IREFINT_EN |
3049 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3050 MG_PLL_TDC_TDCOVCCORR_EN |
3051 MG_PLL_TDC_TDCSEL(3);
3053 pll_state->mg_pll_bias =
3054 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3055 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3056 MG_PLL_BIAS_BIAS_BONUS(10) |
3057 MG_PLL_BIAS_BIASCAL_EN |
3058 MG_PLL_BIAS_CTRIM(12) |
3059 MG_PLL_BIAS_VREF_RDAC(4) |
3060 MG_PLL_BIAS_IREFTRIM(iref_trim);
3062 if (refclk_khz == 38400) {
3063 pll_state->mg_pll_tdc_coldst_bias_mask =
3064 MG_PLL_TDC_COLDST_COLDSTART;
3065 pll_state->mg_pll_bias_mask = 0;
3067 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3068 pll_state->mg_pll_bias_mask = -1U;
3071 pll_state->mg_pll_tdc_coldst_bias &=
3072 pll_state->mg_pll_tdc_coldst_bias_mask;
3073 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3079 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3080 const struct intel_shared_dpll *pll,
3081 const struct intel_dpll_hw_state *pll_state)
3083 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3086 ref_clock = i915->display.dpll.ref_clks.nssc;
3088 if (DISPLAY_VER(i915) >= 12) {
3089 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3090 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3091 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3093 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3094 m2_frac = pll_state->mg_pll_bias &
3095 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3096 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3101 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3102 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3104 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3105 m2_frac = pll_state->mg_pll_div0 &
3106 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3107 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3113 switch (pll_state->mg_clktop2_hsclkctl &
3114 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3115 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3118 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3121 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3124 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3128 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3132 div2 = (pll_state->mg_clktop2_hsclkctl &
3133 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3134 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3136 /* div2 value of 0 is same as 1 means no div */
3141 * Adjust the original formula to delay the division by 2^22 in order to
3142 * minimize possible rounding errors.
3144 tmp = (u64)m1 * m2_int * ref_clock +
3145 (((u64)m1 * m2_frac * ref_clock) >> 22);
3146 tmp = div_u64(tmp, 5 * div1 * div2);
3152 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3153 * @crtc_state: state for the CRTC to select the DPLL for
3154 * @port_dpll_id: the active @port_dpll_id to select
3156 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3159 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3160 enum icl_port_dpll_id port_dpll_id)
3162 struct icl_port_dpll *port_dpll =
3163 &crtc_state->icl_port_dplls[port_dpll_id];
3165 crtc_state->shared_dpll = port_dpll->pll;
3166 crtc_state->dpll_hw_state = port_dpll->hw_state;
3169 static void icl_update_active_dpll(struct intel_atomic_state *state,
3170 struct intel_crtc *crtc,
3171 struct intel_encoder *encoder)
3173 struct intel_crtc_state *crtc_state =
3174 intel_atomic_get_new_crtc_state(state, crtc);
3175 struct intel_digital_port *primary_port;
3176 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3178 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3179 enc_to_mst(encoder)->primary :
3180 enc_to_dig_port(encoder);
3183 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3184 intel_tc_port_in_legacy_mode(primary_port)))
3185 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3187 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3190 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3191 struct intel_crtc *crtc)
3193 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3194 struct intel_crtc_state *crtc_state =
3195 intel_atomic_get_new_crtc_state(state, crtc);
3196 struct icl_port_dpll *port_dpll =
3197 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3198 struct skl_wrpll_params pll_params = {};
3201 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3202 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3203 ret = icl_calc_wrpll(crtc_state, &pll_params);
3205 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3210 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3212 /* this is mainly for the fastset check */
3213 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3215 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3216 &port_dpll->hw_state);
3221 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3222 struct intel_crtc *crtc,
3223 struct intel_encoder *encoder)
3225 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3226 struct intel_crtc_state *crtc_state =
3227 intel_atomic_get_new_crtc_state(state, crtc);
3228 struct icl_port_dpll *port_dpll =
3229 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3230 enum port port = encoder->port;
3231 unsigned long dpll_mask;
3233 if (IS_ALDERLAKE_S(i915)) {
3235 BIT(DPLL_ID_DG1_DPLL3) |
3236 BIT(DPLL_ID_DG1_DPLL2) |
3237 BIT(DPLL_ID_ICL_DPLL1) |
3238 BIT(DPLL_ID_ICL_DPLL0);
3239 } else if (IS_DG1(i915)) {
3240 if (port == PORT_D || port == PORT_E) {
3242 BIT(DPLL_ID_DG1_DPLL2) |
3243 BIT(DPLL_ID_DG1_DPLL3);
3246 BIT(DPLL_ID_DG1_DPLL0) |
3247 BIT(DPLL_ID_DG1_DPLL1);
3249 } else if (IS_ROCKETLAKE(i915)) {
3251 BIT(DPLL_ID_EHL_DPLL4) |
3252 BIT(DPLL_ID_ICL_DPLL1) |
3253 BIT(DPLL_ID_ICL_DPLL0);
3254 } else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3257 BIT(DPLL_ID_EHL_DPLL4) |
3258 BIT(DPLL_ID_ICL_DPLL1) |
3259 BIT(DPLL_ID_ICL_DPLL0);
3261 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3264 /* Eliminate DPLLs from consideration if reserved by HTI */
3265 dpll_mask &= ~intel_hti_dpll_mask(i915);
3267 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3268 &port_dpll->hw_state,
3270 if (!port_dpll->pll)
3273 intel_reference_shared_dpll(state, crtc,
3274 port_dpll->pll, &port_dpll->hw_state);
3276 icl_update_active_dpll(state, crtc, encoder);
3281 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3282 struct intel_crtc *crtc)
3284 struct drm_i915_private *i915 = to_i915(state->base.dev);
3285 struct intel_crtc_state *crtc_state =
3286 intel_atomic_get_new_crtc_state(state, crtc);
3287 struct icl_port_dpll *port_dpll =
3288 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3289 struct skl_wrpll_params pll_params = {};
3292 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3293 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3297 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3299 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3300 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3304 /* this is mainly for the fastset check */
3305 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3307 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3308 &port_dpll->hw_state);
3313 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3314 struct intel_crtc *crtc,
3315 struct intel_encoder *encoder)
3317 struct drm_i915_private *i915 = to_i915(state->base.dev);
3318 struct intel_crtc_state *crtc_state =
3319 intel_atomic_get_new_crtc_state(state, crtc);
3320 struct icl_port_dpll *port_dpll =
3321 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3322 enum intel_dpll_id dpll_id;
3325 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3326 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3327 &port_dpll->hw_state,
3328 BIT(DPLL_ID_ICL_TBTPLL));
3329 if (!port_dpll->pll)
3331 intel_reference_shared_dpll(state, crtc,
3332 port_dpll->pll, &port_dpll->hw_state);
3335 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3336 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
3338 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3339 &port_dpll->hw_state,
3341 if (!port_dpll->pll) {
3343 goto err_unreference_tbt_pll;
3345 intel_reference_shared_dpll(state, crtc,
3346 port_dpll->pll, &port_dpll->hw_state);
3348 icl_update_active_dpll(state, crtc, encoder);
3352 err_unreference_tbt_pll:
3353 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3354 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3359 static int icl_compute_dplls(struct intel_atomic_state *state,
3360 struct intel_crtc *crtc,
3361 struct intel_encoder *encoder)
3363 struct drm_i915_private *i915 = to_i915(state->base.dev);
3364 enum phy phy = intel_port_to_phy(i915, encoder->port);
3366 if (intel_phy_is_combo(i915, phy))
3367 return icl_compute_combo_phy_dpll(state, crtc);
3368 else if (intel_phy_is_tc(i915, phy))
3369 return icl_compute_tc_phy_dplls(state, crtc);
3376 static int icl_get_dplls(struct intel_atomic_state *state,
3377 struct intel_crtc *crtc,
3378 struct intel_encoder *encoder)
3380 struct drm_i915_private *i915 = to_i915(state->base.dev);
3381 enum phy phy = intel_port_to_phy(i915, encoder->port);
3383 if (intel_phy_is_combo(i915, phy))
3384 return icl_get_combo_phy_dpll(state, crtc, encoder);
3385 else if (intel_phy_is_tc(i915, phy))
3386 return icl_get_tc_phy_dplls(state, crtc, encoder);
3393 static void icl_put_dplls(struct intel_atomic_state *state,
3394 struct intel_crtc *crtc)
3396 const struct intel_crtc_state *old_crtc_state =
3397 intel_atomic_get_old_crtc_state(state, crtc);
3398 struct intel_crtc_state *new_crtc_state =
3399 intel_atomic_get_new_crtc_state(state, crtc);
3400 enum icl_port_dpll_id id;
3402 new_crtc_state->shared_dpll = NULL;
3404 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3405 const struct icl_port_dpll *old_port_dpll =
3406 &old_crtc_state->icl_port_dplls[id];
3407 struct icl_port_dpll *new_port_dpll =
3408 &new_crtc_state->icl_port_dplls[id];
3410 new_port_dpll->pll = NULL;
3412 if (!old_port_dpll->pll)
3415 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3419 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3420 struct intel_shared_dpll *pll,
3421 struct intel_dpll_hw_state *hw_state)
3423 const enum intel_dpll_id id = pll->info->id;
3424 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3425 intel_wakeref_t wakeref;
3429 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3431 wakeref = intel_display_power_get_if_enabled(i915,
3432 POWER_DOMAIN_DISPLAY_CORE);
3436 val = intel_de_read(i915, enable_reg);
3437 if (!(val & PLL_ENABLE))
3440 hw_state->mg_refclkin_ctl = intel_de_read(i915,
3441 MG_REFCLKIN_CTL(tc_port));
3442 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3444 hw_state->mg_clktop2_coreclkctl1 =
3445 intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3446 hw_state->mg_clktop2_coreclkctl1 &=
3447 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3449 hw_state->mg_clktop2_hsclkctl =
3450 intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3451 hw_state->mg_clktop2_hsclkctl &=
3452 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3453 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3454 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3455 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3457 hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3458 hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3459 hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3460 hw_state->mg_pll_frac_lock = intel_de_read(i915,
3461 MG_PLL_FRAC_LOCK(tc_port));
3462 hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3464 hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3465 hw_state->mg_pll_tdc_coldst_bias =
3466 intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3468 if (i915->display.dpll.ref_clks.nssc == 38400) {
3469 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3470 hw_state->mg_pll_bias_mask = 0;
3472 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3473 hw_state->mg_pll_bias_mask = -1U;
3476 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3477 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3481 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3485 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3486 struct intel_shared_dpll *pll,
3487 struct intel_dpll_hw_state *hw_state)
3489 const enum intel_dpll_id id = pll->info->id;
3490 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3491 intel_wakeref_t wakeref;
3495 wakeref = intel_display_power_get_if_enabled(i915,
3496 POWER_DOMAIN_DISPLAY_CORE);
3500 val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3501 if (!(val & PLL_ENABLE))
3505 * All registers read here have the same HIP_INDEX_REG even though
3506 * they are on different building blocks
3508 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3509 DKL_REFCLKIN_CTL(tc_port));
3510 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3512 hw_state->mg_clktop2_hsclkctl =
3513 intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3514 hw_state->mg_clktop2_hsclkctl &=
3515 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3516 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3517 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3518 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3520 hw_state->mg_clktop2_coreclkctl1 =
3521 intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3522 hw_state->mg_clktop2_coreclkctl1 &=
3523 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3525 hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3526 val = DKL_PLL_DIV0_MASK;
3527 if (i915->display.vbt.override_afc_startup)
3528 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3529 hw_state->mg_pll_div0 &= val;
3531 hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3532 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3533 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3535 hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3536 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3537 DKL_PLL_SSC_STEP_LEN_MASK |
3538 DKL_PLL_SSC_STEP_NUM_MASK |
3541 hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3542 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3543 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3545 hw_state->mg_pll_tdc_coldst_bias =
3546 intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3547 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3548 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3552 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3556 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3557 struct intel_shared_dpll *pll,
3558 struct intel_dpll_hw_state *hw_state,
3559 i915_reg_t enable_reg)
3561 const enum intel_dpll_id id = pll->info->id;
3562 intel_wakeref_t wakeref;
3566 wakeref = intel_display_power_get_if_enabled(i915,
3567 POWER_DOMAIN_DISPLAY_CORE);
3571 val = intel_de_read(i915, enable_reg);
3572 if (!(val & PLL_ENABLE))
3575 if (IS_ALDERLAKE_S(i915)) {
3576 hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3577 hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3578 } else if (IS_DG1(i915)) {
3579 hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3580 hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3581 } else if (IS_ROCKETLAKE(i915)) {
3582 hw_state->cfgcr0 = intel_de_read(i915,
3583 RKL_DPLL_CFGCR0(id));
3584 hw_state->cfgcr1 = intel_de_read(i915,
3585 RKL_DPLL_CFGCR1(id));
3586 } else if (DISPLAY_VER(i915) >= 12) {
3587 hw_state->cfgcr0 = intel_de_read(i915,
3588 TGL_DPLL_CFGCR0(id));
3589 hw_state->cfgcr1 = intel_de_read(i915,
3590 TGL_DPLL_CFGCR1(id));
3591 if (i915->display.vbt.override_afc_startup) {
3592 hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3593 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3596 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3597 id == DPLL_ID_EHL_DPLL4) {
3598 hw_state->cfgcr0 = intel_de_read(i915,
3599 ICL_DPLL_CFGCR0(4));
3600 hw_state->cfgcr1 = intel_de_read(i915,
3601 ICL_DPLL_CFGCR1(4));
3603 hw_state->cfgcr0 = intel_de_read(i915,
3604 ICL_DPLL_CFGCR0(id));
3605 hw_state->cfgcr1 = intel_de_read(i915,
3606 ICL_DPLL_CFGCR1(id));
3612 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3616 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3617 struct intel_shared_dpll *pll,
3618 struct intel_dpll_hw_state *hw_state)
3620 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3622 return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3625 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3626 struct intel_shared_dpll *pll,
3627 struct intel_dpll_hw_state *hw_state)
3629 return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3632 static void icl_dpll_write(struct drm_i915_private *i915,
3633 struct intel_shared_dpll *pll)
3635 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3636 const enum intel_dpll_id id = pll->info->id;
3637 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3639 if (IS_ALDERLAKE_S(i915)) {
3640 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3641 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3642 } else if (IS_DG1(i915)) {
3643 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3644 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3645 } else if (IS_ROCKETLAKE(i915)) {
3646 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3647 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3648 } else if (DISPLAY_VER(i915) >= 12) {
3649 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3650 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3651 div0_reg = TGL_DPLL0_DIV0(id);
3653 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3654 id == DPLL_ID_EHL_DPLL4) {
3655 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3656 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3658 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3659 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3663 intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3664 intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3665 drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3666 !i915_mmio_reg_valid(div0_reg));
3667 if (i915->display.vbt.override_afc_startup &&
3668 i915_mmio_reg_valid(div0_reg))
3669 intel_de_rmw(i915, div0_reg,
3670 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3671 intel_de_posting_read(i915, cfgcr1_reg);
3674 static void icl_mg_pll_write(struct drm_i915_private *i915,
3675 struct intel_shared_dpll *pll)
3677 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3678 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3681 * Some of the following registers have reserved fields, so program
3682 * these with RMW based on a mask. The mask can be fixed or generated
3683 * during the calc/readout phase if the mask depends on some other HW
3684 * state like refclk, see icl_calc_mg_pll_state().
3686 intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3687 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3689 intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3690 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3691 hw_state->mg_clktop2_coreclkctl1);
3693 intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3694 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3695 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3696 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3697 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3698 hw_state->mg_clktop2_hsclkctl);
3700 intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3701 intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3702 intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3703 intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3704 hw_state->mg_pll_frac_lock);
3705 intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3707 intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3708 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3710 intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3711 hw_state->mg_pll_tdc_coldst_bias_mask,
3712 hw_state->mg_pll_tdc_coldst_bias);
3714 intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3717 static void dkl_pll_write(struct drm_i915_private *i915,
3718 struct intel_shared_dpll *pll)
3720 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3721 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3725 * All registers programmed here have the same HIP_INDEX_REG even
3726 * though on different building block
3728 /* All the registers are RMW */
3729 val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3730 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3731 val |= hw_state->mg_refclkin_ctl;
3732 intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3734 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3735 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3736 val |= hw_state->mg_clktop2_coreclkctl1;
3737 intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3739 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3740 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3741 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3742 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3743 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3744 val |= hw_state->mg_clktop2_hsclkctl;
3745 intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3747 val = DKL_PLL_DIV0_MASK;
3748 if (i915->display.vbt.override_afc_startup)
3749 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3750 intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3751 hw_state->mg_pll_div0);
3753 val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3754 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3755 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3756 val |= hw_state->mg_pll_div1;
3757 intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3759 val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3760 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3761 DKL_PLL_SSC_STEP_LEN_MASK |
3762 DKL_PLL_SSC_STEP_NUM_MASK |
3764 val |= hw_state->mg_pll_ssc;
3765 intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3767 val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3768 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3769 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3770 val |= hw_state->mg_pll_bias;
3771 intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3773 val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3774 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3775 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3776 val |= hw_state->mg_pll_tdc_coldst_bias;
3777 intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3779 intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3782 static void icl_pll_power_enable(struct drm_i915_private *i915,
3783 struct intel_shared_dpll *pll,
3784 i915_reg_t enable_reg)
3786 intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3789 * The spec says we need to "wait" but it also says it should be
3792 if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3793 drm_err(&i915->drm, "PLL %d Power not enabled\n",
3797 static void icl_pll_enable(struct drm_i915_private *i915,
3798 struct intel_shared_dpll *pll,
3799 i915_reg_t enable_reg)
3801 intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3803 /* Timeout is actually 600us. */
3804 if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3805 drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3808 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3812 if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3813 pll->info->id != DPLL_ID_ICL_DPLL0)
3816 * Wa_16011069516:adl-p[a0]
3818 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3819 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3820 * sanity check this assumption with a double read, which presumably
3821 * returns the correct value even with clock gating on.
3823 * Instead of the usual place for workarounds we apply this one here,
3824 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3826 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3827 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3828 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3829 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3832 static void combo_pll_enable(struct drm_i915_private *i915,
3833 struct intel_shared_dpll *pll)
3835 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3837 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3838 pll->info->id == DPLL_ID_EHL_DPLL4) {
3841 * We need to disable DC states when this DPLL is enabled.
3842 * This can be done by taking a reference on DPLL4 power
3845 pll->wakeref = intel_display_power_get(i915,
3846 POWER_DOMAIN_DC_OFF);
3849 icl_pll_power_enable(i915, pll, enable_reg);
3851 icl_dpll_write(i915, pll);
3854 * DVFS pre sequence would be here, but in our driver the cdclk code
3855 * paths should already be setting the appropriate voltage, hence we do
3859 icl_pll_enable(i915, pll, enable_reg);
3861 adlp_cmtg_clock_gating_wa(i915, pll);
3863 /* DVFS post sequence would be here. See the comment above. */
3866 static void tbt_pll_enable(struct drm_i915_private *i915,
3867 struct intel_shared_dpll *pll)
3869 icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3871 icl_dpll_write(i915, pll);
3874 * DVFS pre sequence would be here, but in our driver the cdclk code
3875 * paths should already be setting the appropriate voltage, hence we do
3879 icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3881 /* DVFS post sequence would be here. See the comment above. */
3884 static void mg_pll_enable(struct drm_i915_private *i915,
3885 struct intel_shared_dpll *pll)
3887 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3889 icl_pll_power_enable(i915, pll, enable_reg);
3891 if (DISPLAY_VER(i915) >= 12)
3892 dkl_pll_write(i915, pll);
3894 icl_mg_pll_write(i915, pll);
3897 * DVFS pre sequence would be here, but in our driver the cdclk code
3898 * paths should already be setting the appropriate voltage, hence we do
3902 icl_pll_enable(i915, pll, enable_reg);
3904 /* DVFS post sequence would be here. See the comment above. */
3907 static void icl_pll_disable(struct drm_i915_private *i915,
3908 struct intel_shared_dpll *pll,
3909 i915_reg_t enable_reg)
3911 /* The first steps are done by intel_ddi_post_disable(). */
3914 * DVFS pre sequence would be here, but in our driver the cdclk code
3915 * paths should already be setting the appropriate voltage, hence we do
3919 intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3921 /* Timeout is actually 1us. */
3922 if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3923 drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3925 /* DVFS post sequence would be here. See the comment above. */
3927 intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3930 * The spec says we need to "wait" but it also says it should be
3933 if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3934 drm_err(&i915->drm, "PLL %d Power not disabled\n",
3938 static void combo_pll_disable(struct drm_i915_private *i915,
3939 struct intel_shared_dpll *pll)
3941 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3943 icl_pll_disable(i915, pll, enable_reg);
3945 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3946 pll->info->id == DPLL_ID_EHL_DPLL4)
3947 intel_display_power_put(i915, POWER_DOMAIN_DC_OFF,
3951 static void tbt_pll_disable(struct drm_i915_private *i915,
3952 struct intel_shared_dpll *pll)
3954 icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
3957 static void mg_pll_disable(struct drm_i915_private *i915,
3958 struct intel_shared_dpll *pll)
3960 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3962 icl_pll_disable(i915, pll, enable_reg);
3965 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3968 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3971 static void icl_dump_hw_state(struct drm_i915_private *i915,
3972 const struct intel_dpll_hw_state *hw_state)
3974 drm_dbg_kms(&i915->drm,
3975 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3976 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3977 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3978 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3979 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3980 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3981 hw_state->cfgcr0, hw_state->cfgcr1,
3983 hw_state->mg_refclkin_ctl,
3984 hw_state->mg_clktop2_coreclkctl1,
3985 hw_state->mg_clktop2_hsclkctl,
3986 hw_state->mg_pll_div0,
3987 hw_state->mg_pll_div1,
3988 hw_state->mg_pll_lf,
3989 hw_state->mg_pll_frac_lock,
3990 hw_state->mg_pll_ssc,
3991 hw_state->mg_pll_bias,
3992 hw_state->mg_pll_tdc_coldst_bias);
3995 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3996 .enable = combo_pll_enable,
3997 .disable = combo_pll_disable,
3998 .get_hw_state = combo_pll_get_hw_state,
3999 .get_freq = icl_ddi_combo_pll_get_freq,
4002 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4003 .enable = tbt_pll_enable,
4004 .disable = tbt_pll_disable,
4005 .get_hw_state = tbt_pll_get_hw_state,
4006 .get_freq = icl_ddi_tbt_pll_get_freq,
4009 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4010 .enable = mg_pll_enable,
4011 .disable = mg_pll_disable,
4012 .get_hw_state = mg_pll_get_hw_state,
4013 .get_freq = icl_ddi_mg_pll_get_freq,
4016 static const struct dpll_info icl_plls[] = {
4017 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4018 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4019 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4020 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4021 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4022 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4023 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4027 static const struct intel_dpll_mgr icl_pll_mgr = {
4028 .dpll_info = icl_plls,
4029 .compute_dplls = icl_compute_dplls,
4030 .get_dplls = icl_get_dplls,
4031 .put_dplls = icl_put_dplls,
4032 .update_active_dpll = icl_update_active_dpll,
4033 .update_ref_clks = icl_update_dpll_ref_clks,
4034 .dump_hw_state = icl_dump_hw_state,
4037 static const struct dpll_info ehl_plls[] = {
4038 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4039 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4040 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4044 static const struct intel_dpll_mgr ehl_pll_mgr = {
4045 .dpll_info = ehl_plls,
4046 .compute_dplls = icl_compute_dplls,
4047 .get_dplls = icl_get_dplls,
4048 .put_dplls = icl_put_dplls,
4049 .update_ref_clks = icl_update_dpll_ref_clks,
4050 .dump_hw_state = icl_dump_hw_state,
4053 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4054 .enable = mg_pll_enable,
4055 .disable = mg_pll_disable,
4056 .get_hw_state = dkl_pll_get_hw_state,
4057 .get_freq = icl_ddi_mg_pll_get_freq,
4060 static const struct dpll_info tgl_plls[] = {
4061 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4062 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4063 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4064 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4065 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4066 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4067 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4068 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4069 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4073 static const struct intel_dpll_mgr tgl_pll_mgr = {
4074 .dpll_info = tgl_plls,
4075 .compute_dplls = icl_compute_dplls,
4076 .get_dplls = icl_get_dplls,
4077 .put_dplls = icl_put_dplls,
4078 .update_active_dpll = icl_update_active_dpll,
4079 .update_ref_clks = icl_update_dpll_ref_clks,
4080 .dump_hw_state = icl_dump_hw_state,
4083 static const struct dpll_info rkl_plls[] = {
4084 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4085 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4086 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4090 static const struct intel_dpll_mgr rkl_pll_mgr = {
4091 .dpll_info = rkl_plls,
4092 .compute_dplls = icl_compute_dplls,
4093 .get_dplls = icl_get_dplls,
4094 .put_dplls = icl_put_dplls,
4095 .update_ref_clks = icl_update_dpll_ref_clks,
4096 .dump_hw_state = icl_dump_hw_state,
4099 static const struct dpll_info dg1_plls[] = {
4100 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4101 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4102 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4103 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4107 static const struct intel_dpll_mgr dg1_pll_mgr = {
4108 .dpll_info = dg1_plls,
4109 .compute_dplls = icl_compute_dplls,
4110 .get_dplls = icl_get_dplls,
4111 .put_dplls = icl_put_dplls,
4112 .update_ref_clks = icl_update_dpll_ref_clks,
4113 .dump_hw_state = icl_dump_hw_state,
4116 static const struct dpll_info adls_plls[] = {
4117 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4118 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4119 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4120 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4124 static const struct intel_dpll_mgr adls_pll_mgr = {
4125 .dpll_info = adls_plls,
4126 .compute_dplls = icl_compute_dplls,
4127 .get_dplls = icl_get_dplls,
4128 .put_dplls = icl_put_dplls,
4129 .update_ref_clks = icl_update_dpll_ref_clks,
4130 .dump_hw_state = icl_dump_hw_state,
4133 static const struct dpll_info adlp_plls[] = {
4134 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4135 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4136 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4137 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4138 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4139 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4140 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4144 static const struct intel_dpll_mgr adlp_pll_mgr = {
4145 .dpll_info = adlp_plls,
4146 .compute_dplls = icl_compute_dplls,
4147 .get_dplls = icl_get_dplls,
4148 .put_dplls = icl_put_dplls,
4149 .update_active_dpll = icl_update_active_dpll,
4150 .update_ref_clks = icl_update_dpll_ref_clks,
4151 .dump_hw_state = icl_dump_hw_state,
4155 * intel_shared_dpll_init - Initialize shared DPLLs
4156 * @i915: i915 device
4158 * Initialize shared DPLLs for @i915.
4160 void intel_shared_dpll_init(struct drm_i915_private *i915)
4162 const struct intel_dpll_mgr *dpll_mgr = NULL;
4163 const struct dpll_info *dpll_info;
4166 mutex_init(&i915->display.dpll.lock);
4168 if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4169 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4171 else if (IS_ALDERLAKE_P(i915))
4172 dpll_mgr = &adlp_pll_mgr;
4173 else if (IS_ALDERLAKE_S(i915))
4174 dpll_mgr = &adls_pll_mgr;
4175 else if (IS_DG1(i915))
4176 dpll_mgr = &dg1_pll_mgr;
4177 else if (IS_ROCKETLAKE(i915))
4178 dpll_mgr = &rkl_pll_mgr;
4179 else if (DISPLAY_VER(i915) >= 12)
4180 dpll_mgr = &tgl_pll_mgr;
4181 else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4182 dpll_mgr = &ehl_pll_mgr;
4183 else if (DISPLAY_VER(i915) >= 11)
4184 dpll_mgr = &icl_pll_mgr;
4185 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4186 dpll_mgr = &bxt_pll_mgr;
4187 else if (DISPLAY_VER(i915) == 9)
4188 dpll_mgr = &skl_pll_mgr;
4189 else if (HAS_DDI(i915))
4190 dpll_mgr = &hsw_pll_mgr;
4191 else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4192 dpll_mgr = &pch_pll_mgr;
4197 dpll_info = dpll_mgr->dpll_info;
4199 for (i = 0; dpll_info[i].name; i++) {
4200 if (drm_WARN_ON(&i915->drm,
4201 i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4204 /* must fit into unsigned long bitmask on 32bit */
4205 if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4208 i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4209 i915->display.dpll.shared_dplls[i].index = i;
4212 i915->display.dpll.mgr = dpll_mgr;
4213 i915->display.dpll.num_shared_dpll = i;
4217 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4218 * @state: atomic state
4219 * @crtc: CRTC to compute DPLLs for
4222 * This function computes the DPLL state for the given CRTC and encoder.
4224 * The new configuration in the atomic commit @state is made effective by
4225 * calling intel_shared_dpll_swap_state().
4228 * 0 on success, negative error code on falure.
4230 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4231 struct intel_crtc *crtc,
4232 struct intel_encoder *encoder)
4234 struct drm_i915_private *i915 = to_i915(state->base.dev);
4235 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4237 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4240 return dpll_mgr->compute_dplls(state, crtc, encoder);
4244 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4245 * @state: atomic state
4246 * @crtc: CRTC to reserve DPLLs for
4249 * This function reserves all required DPLLs for the given CRTC and encoder
4250 * combination in the current atomic commit @state and the new @crtc atomic
4253 * The new configuration in the atomic commit @state is made effective by
4254 * calling intel_shared_dpll_swap_state().
4256 * The reserved DPLLs should be released by calling
4257 * intel_release_shared_dplls().
4260 * 0 if all required DPLLs were successfully reserved,
4261 * negative error code otherwise.
4263 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4264 struct intel_crtc *crtc,
4265 struct intel_encoder *encoder)
4267 struct drm_i915_private *i915 = to_i915(state->base.dev);
4268 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4270 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4273 return dpll_mgr->get_dplls(state, crtc, encoder);
4277 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4278 * @state: atomic state
4279 * @crtc: crtc from which the DPLLs are to be released
4281 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4282 * from the current atomic commit @state and the old @crtc atomic state.
4284 * The new configuration in the atomic commit @state is made effective by
4285 * calling intel_shared_dpll_swap_state().
4287 void intel_release_shared_dplls(struct intel_atomic_state *state,
4288 struct intel_crtc *crtc)
4290 struct drm_i915_private *i915 = to_i915(state->base.dev);
4291 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4294 * FIXME: this function is called for every platform having a
4295 * compute_clock hook, even though the platform doesn't yet support
4296 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4302 dpll_mgr->put_dplls(state, crtc);
4306 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4307 * @state: atomic state
4308 * @crtc: the CRTC for which to update the active DPLL
4309 * @encoder: encoder determining the type of port DPLL
4311 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4312 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4313 * DPLL selected will be based on the current mode of the encoder's port.
4315 void intel_update_active_dpll(struct intel_atomic_state *state,
4316 struct intel_crtc *crtc,
4317 struct intel_encoder *encoder)
4319 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4320 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4322 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4325 dpll_mgr->update_active_dpll(state, crtc, encoder);
4329 * intel_dpll_get_freq - calculate the DPLL's output frequency
4330 * @i915: i915 device
4331 * @pll: DPLL for which to calculate the output frequency
4332 * @pll_state: DPLL state from which to calculate the output frequency
4334 * Return the output frequency corresponding to @pll's passed in @pll_state.
4336 int intel_dpll_get_freq(struct drm_i915_private *i915,
4337 const struct intel_shared_dpll *pll,
4338 const struct intel_dpll_hw_state *pll_state)
4340 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4343 return pll->info->funcs->get_freq(i915, pll, pll_state);
4347 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4348 * @i915: i915 device
4349 * @pll: DPLL for which to calculate the output frequency
4350 * @hw_state: DPLL's hardware state
4352 * Read out @pll's hardware state into @hw_state.
4354 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4355 struct intel_shared_dpll *pll,
4356 struct intel_dpll_hw_state *hw_state)
4358 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4361 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4362 struct intel_shared_dpll *pll)
4364 struct intel_crtc *crtc;
4366 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4368 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
4370 pll->info->id == DPLL_ID_EHL_DPLL4) {
4371 pll->wakeref = intel_display_power_get(i915,
4372 POWER_DOMAIN_DC_OFF);
4375 pll->state.pipe_mask = 0;
4376 for_each_intel_crtc(&i915->drm, crtc) {
4377 struct intel_crtc_state *crtc_state =
4378 to_intel_crtc_state(crtc->base.state);
4380 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4381 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4383 pll->active_mask = pll->state.pipe_mask;
4385 drm_dbg_kms(&i915->drm,
4386 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4387 pll->info->name, pll->state.pipe_mask, pll->on);
4390 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4392 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4393 i915->display.dpll.mgr->update_ref_clks(i915);
4396 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4398 struct intel_shared_dpll *pll;
4401 for_each_shared_dpll(i915, pll, i)
4402 readout_dpll_hw_state(i915, pll);
4405 static void sanitize_dpll_state(struct drm_i915_private *i915,
4406 struct intel_shared_dpll *pll)
4411 adlp_cmtg_clock_gating_wa(i915, pll);
4413 if (pll->active_mask)
4416 drm_dbg_kms(&i915->drm,
4417 "%s enabled but not in use, disabling\n",
4420 pll->info->funcs->disable(i915, pll);
4424 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4426 struct intel_shared_dpll *pll;
4429 for_each_shared_dpll(i915, pll, i)
4430 sanitize_dpll_state(i915, pll);
4434 * intel_dpll_dump_hw_state - write hw_state to dmesg
4435 * @i915: i915 drm device
4436 * @hw_state: hw state to be written to the log
4438 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4440 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4441 const struct intel_dpll_hw_state *hw_state)
4443 if (i915->display.dpll.mgr) {
4444 i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4446 /* fallback for platforms that don't use the shared dpll
4449 drm_dbg_kms(&i915->drm,
4450 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4451 "fp0: 0x%x, fp1: 0x%x\n",
4460 verify_single_dpll_state(struct drm_i915_private *i915,
4461 struct intel_shared_dpll *pll,
4462 struct intel_crtc *crtc,
4463 const struct intel_crtc_state *new_crtc_state)
4465 struct intel_dpll_hw_state dpll_hw_state;
4469 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4471 drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
4473 active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4475 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4476 I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4477 "pll in active use but not on in sw tracking\n");
4478 I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4479 "pll is on but not used by any active pipe\n");
4480 I915_STATE_WARN(i915, pll->on != active,
4481 "pll on state mismatch (expected %i, found %i)\n",
4486 I915_STATE_WARN(i915,
4487 pll->active_mask & ~pll->state.pipe_mask,
4488 "more active pll users than references: 0x%x vs 0x%x\n",
4489 pll->active_mask, pll->state.pipe_mask);
4494 pipe_mask = BIT(crtc->pipe);
4496 if (new_crtc_state->hw.active)
4497 I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4498 "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4499 pipe_name(crtc->pipe), pll->active_mask);
4501 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4502 "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4503 pipe_name(crtc->pipe), pll->active_mask);
4505 I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4506 "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4507 pipe_mask, pll->state.pipe_mask);
4509 I915_STATE_WARN(i915,
4510 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4511 sizeof(dpll_hw_state)),
4512 "pll hw state mismatch\n");
4515 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4516 struct intel_crtc *crtc)
4518 struct drm_i915_private *i915 = to_i915(state->base.dev);
4519 const struct intel_crtc_state *old_crtc_state =
4520 intel_atomic_get_old_crtc_state(state, crtc);
4521 const struct intel_crtc_state *new_crtc_state =
4522 intel_atomic_get_new_crtc_state(state, crtc);
4524 if (new_crtc_state->shared_dpll)
4525 verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4526 crtc, new_crtc_state);
4528 if (old_crtc_state->shared_dpll &&
4529 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4530 u8 pipe_mask = BIT(crtc->pipe);
4531 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4533 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4534 "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4535 pipe_name(crtc->pipe), pll->active_mask);
4536 I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
4537 "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4538 pipe_name(crtc->pipe), pll->state.pipe_mask);
4542 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4544 struct drm_i915_private *i915 = to_i915(state->base.dev);
4545 struct intel_shared_dpll *pll;
4548 for_each_shared_dpll(i915, pll, i)
4549 verify_single_dpll_state(i915, pll, NULL, NULL);