2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
27 #include "bxt_dpio_phy_regs.h"
30 #include "intel_display_types.h"
31 #include "intel_dkl_phy.h"
32 #include "intel_dkl_phy_regs.h"
33 #include "intel_dpio_phy.h"
34 #include "intel_dpll.h"
35 #include "intel_dpll_mgr.h"
36 #include "intel_hti.h"
37 #include "intel_mg_phy_regs.h"
38 #include "intel_pch_refclk.h"
44 * Display PLLs used for driving outputs vary by platform. While some have
45 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
46 * from a pool. In the latter scenario, it is possible that multiple pipes
47 * share a PLL if their configurations match.
49 * This file provides an abstraction over display PLLs. The function
50 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
51 * users of a PLL are tracked and that tracking is integrated with the atomic
52 * modset interface. During an atomic operation, required PLLs can be reserved
53 * for a given CRTC and encoder configuration by calling
54 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
55 * with intel_release_shared_dplls().
56 * Changes to the users are first staged in the atomic state, and then made
57 * effective by calling intel_shared_dpll_swap_state() during the atomic
61 /* platform specific hooks for managing DPLLs */
62 struct intel_shared_dpll_funcs {
64 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
65 * the pll is not already enabled.
67 void (*enable)(struct drm_i915_private *i915,
68 struct intel_shared_dpll *pll,
69 const struct intel_dpll_hw_state *dpll_hw_state);
72 * Hook for disabling the pll, called from intel_disable_shared_dpll()
73 * only when it is safe to disable the pll, i.e., there are no more
74 * tracked users for it.
76 void (*disable)(struct drm_i915_private *i915,
77 struct intel_shared_dpll *pll);
80 * Hook for reading the values currently programmed to the DPLL
81 * registers. This is used for initial hw state readout and state
82 * verification after a mode set.
84 bool (*get_hw_state)(struct drm_i915_private *i915,
85 struct intel_shared_dpll *pll,
86 struct intel_dpll_hw_state *dpll_hw_state);
89 * Hook for calculating the pll's output frequency based on its passed
92 int (*get_freq)(struct drm_i915_private *i915,
93 const struct intel_shared_dpll *pll,
94 const struct intel_dpll_hw_state *dpll_hw_state);
97 struct intel_dpll_mgr {
98 const struct dpll_info *dpll_info;
100 int (*compute_dplls)(struct intel_atomic_state *state,
101 struct intel_crtc *crtc,
102 struct intel_encoder *encoder);
103 int (*get_dplls)(struct intel_atomic_state *state,
104 struct intel_crtc *crtc,
105 struct intel_encoder *encoder);
106 void (*put_dplls)(struct intel_atomic_state *state,
107 struct intel_crtc *crtc);
108 void (*update_active_dpll)(struct intel_atomic_state *state,
109 struct intel_crtc *crtc,
110 struct intel_encoder *encoder);
111 void (*update_ref_clks)(struct drm_i915_private *i915);
112 void (*dump_hw_state)(struct drm_printer *p,
113 const struct intel_dpll_hw_state *dpll_hw_state);
114 bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
115 const struct intel_dpll_hw_state *b);
119 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
120 struct intel_shared_dpll_state *shared_dpll)
122 struct intel_shared_dpll *pll;
125 /* Copy shared dpll state */
126 for_each_shared_dpll(i915, pll, i)
127 shared_dpll[pll->index] = pll->state;
130 static struct intel_shared_dpll_state *
131 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
133 struct intel_atomic_state *state = to_intel_atomic_state(s);
135 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
137 if (!state->dpll_set) {
138 state->dpll_set = true;
140 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
144 return state->shared_dpll;
148 * intel_get_shared_dpll_by_id - get a DPLL given its id
149 * @i915: i915 device instance
153 * A pointer to the DPLL with @id
155 struct intel_shared_dpll *
156 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
157 enum intel_dpll_id id)
159 struct intel_shared_dpll *pll;
162 for_each_shared_dpll(i915, pll, i) {
163 if (pll->info->id == id)
172 void assert_shared_dpll(struct drm_i915_private *i915,
173 struct intel_shared_dpll *pll,
177 struct intel_dpll_hw_state hw_state;
179 if (drm_WARN(&i915->drm, !pll,
180 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
183 cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
184 I915_STATE_WARN(i915, cur_state != state,
185 "%s assertion failure (expected %s, current %s)\n",
186 pll->info->name, str_on_off(state),
187 str_on_off(cur_state));
190 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
192 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
195 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
197 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
201 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
202 struct intel_shared_dpll *pll)
205 return DG1_DPLL_ENABLE(pll->info->id);
206 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
207 (pll->info->id == DPLL_ID_EHL_DPLL4))
208 return MG_PLL_ENABLE(0);
210 return ICL_DPLL_ENABLE(pll->info->id);
214 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
215 struct intel_shared_dpll *pll)
217 const enum intel_dpll_id id = pll->info->id;
218 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
220 if (IS_ALDERLAKE_P(i915))
221 return ADLP_PORTTC_PLL_ENABLE(tc_port);
223 return MG_PLL_ENABLE(tc_port);
226 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
227 struct intel_shared_dpll *pll)
229 if (pll->info->power_domain)
230 pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
232 pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
236 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
237 struct intel_shared_dpll *pll)
239 pll->info->funcs->disable(i915, pll);
242 if (pll->info->power_domain)
243 intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
247 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
248 * @crtc_state: CRTC, and its state, which has a shared DPLL
250 * Enable the shared DPLL used by @crtc.
252 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
254 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
255 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
256 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
257 unsigned int pipe_mask = BIT(crtc->pipe);
258 unsigned int old_mask;
260 if (drm_WARN_ON(&i915->drm, pll == NULL))
263 mutex_lock(&i915->display.dpll.lock);
264 old_mask = pll->active_mask;
266 if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
267 drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
270 pll->active_mask |= pipe_mask;
272 drm_dbg_kms(&i915->drm,
273 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
274 pll->info->name, pll->active_mask, pll->on,
275 crtc->base.base.id, crtc->base.name);
278 drm_WARN_ON(&i915->drm, !pll->on);
279 assert_shared_dpll_enabled(i915, pll);
282 drm_WARN_ON(&i915->drm, pll->on);
284 drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
286 _intel_enable_shared_dpll(i915, pll);
289 mutex_unlock(&i915->display.dpll.lock);
293 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
294 * @crtc_state: CRTC, and its state, which has a shared DPLL
296 * Disable the shared DPLL used by @crtc.
298 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
300 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
301 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
302 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
303 unsigned int pipe_mask = BIT(crtc->pipe);
305 /* PCH only available on ILK+ */
306 if (DISPLAY_VER(i915) < 5)
312 mutex_lock(&i915->display.dpll.lock);
313 if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
314 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
315 crtc->base.base.id, crtc->base.name))
318 drm_dbg_kms(&i915->drm,
319 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
320 pll->info->name, pll->active_mask, pll->on,
321 crtc->base.base.id, crtc->base.name);
323 assert_shared_dpll_enabled(i915, pll);
324 drm_WARN_ON(&i915->drm, !pll->on);
326 pll->active_mask &= ~pipe_mask;
327 if (pll->active_mask)
330 drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
332 _intel_disable_shared_dpll(i915, pll);
335 mutex_unlock(&i915->display.dpll.lock);
339 intel_dpll_mask_all(struct drm_i915_private *i915)
341 struct intel_shared_dpll *pll;
342 unsigned long dpll_mask = 0;
345 for_each_shared_dpll(i915, pll, i) {
346 drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
348 dpll_mask |= BIT(pll->info->id);
354 static struct intel_shared_dpll *
355 intel_find_shared_dpll(struct intel_atomic_state *state,
356 const struct intel_crtc *crtc,
357 const struct intel_dpll_hw_state *dpll_hw_state,
358 unsigned long dpll_mask)
360 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
361 unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
362 struct intel_shared_dpll_state *shared_dpll;
363 struct intel_shared_dpll *unused_pll = NULL;
364 enum intel_dpll_id id;
366 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
368 drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
370 for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
371 struct intel_shared_dpll *pll;
373 pll = intel_get_shared_dpll_by_id(i915, id);
377 /* Only want to check enabled timings first */
378 if (shared_dpll[pll->index].pipe_mask == 0) {
384 if (memcmp(dpll_hw_state,
385 &shared_dpll[pll->index].hw_state,
386 sizeof(*dpll_hw_state)) == 0) {
387 drm_dbg_kms(&i915->drm,
388 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
389 crtc->base.base.id, crtc->base.name,
391 shared_dpll[pll->index].pipe_mask,
397 /* Ok no matching timings, maybe there's a free one? */
399 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
400 crtc->base.base.id, crtc->base.name,
401 unused_pll->info->name);
409 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
410 * @crtc: CRTC on which behalf the reference is taken
411 * @pll: DPLL for which the reference is taken
412 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
414 * Take a reference for @pll tracking the use of it by @crtc.
417 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
418 const struct intel_shared_dpll *pll,
419 struct intel_shared_dpll_state *shared_dpll_state)
421 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
423 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
425 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
427 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
428 crtc->base.base.id, crtc->base.name, pll->info->name);
432 intel_reference_shared_dpll(struct intel_atomic_state *state,
433 const struct intel_crtc *crtc,
434 const struct intel_shared_dpll *pll,
435 const struct intel_dpll_hw_state *dpll_hw_state)
437 struct intel_shared_dpll_state *shared_dpll;
439 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
441 if (shared_dpll[pll->index].pipe_mask == 0)
442 shared_dpll[pll->index].hw_state = *dpll_hw_state;
444 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
448 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
449 * @crtc: CRTC on which behalf the reference is dropped
450 * @pll: DPLL for which the reference is dropped
451 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
453 * Drop a reference for @pll tracking the end of use of it by @crtc.
456 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
457 const struct intel_shared_dpll *pll,
458 struct intel_shared_dpll_state *shared_dpll_state)
460 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
462 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
464 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
466 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
467 crtc->base.base.id, crtc->base.name, pll->info->name);
470 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
471 const struct intel_crtc *crtc,
472 const struct intel_shared_dpll *pll)
474 struct intel_shared_dpll_state *shared_dpll;
476 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
478 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
481 static void intel_put_dpll(struct intel_atomic_state *state,
482 struct intel_crtc *crtc)
484 const struct intel_crtc_state *old_crtc_state =
485 intel_atomic_get_old_crtc_state(state, crtc);
486 struct intel_crtc_state *new_crtc_state =
487 intel_atomic_get_new_crtc_state(state, crtc);
489 new_crtc_state->shared_dpll = NULL;
491 if (!old_crtc_state->shared_dpll)
494 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
498 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
499 * @state: atomic state
501 * This is the dpll version of drm_atomic_helper_swap_state() since the
502 * helper does not handle driver-specific global state.
504 * For consistency with atomic helpers this function does a complete swap,
505 * i.e. it also puts the current state into @state, even though there is no
506 * need for that at this moment.
508 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
510 struct drm_i915_private *i915 = to_i915(state->base.dev);
511 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
512 struct intel_shared_dpll *pll;
515 if (!state->dpll_set)
518 for_each_shared_dpll(i915, pll, i)
519 swap(pll->state, shared_dpll[pll->index]);
522 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
523 struct intel_shared_dpll *pll,
524 struct intel_dpll_hw_state *dpll_hw_state)
526 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
527 const enum intel_dpll_id id = pll->info->id;
528 intel_wakeref_t wakeref;
531 wakeref = intel_display_power_get_if_enabled(i915,
532 POWER_DOMAIN_DISPLAY_CORE);
536 val = intel_de_read(i915, PCH_DPLL(id));
537 hw_state->dpll = val;
538 hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
539 hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
541 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
543 return val & DPLL_VCO_ENABLE;
546 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
551 val = intel_de_read(i915, PCH_DREF_CONTROL);
552 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
553 DREF_SUPERSPREAD_SOURCE_MASK));
554 I915_STATE_WARN(i915, !enabled,
555 "PCH refclk assertion failure, should be active but is disabled\n");
558 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
559 struct intel_shared_dpll *pll,
560 const struct intel_dpll_hw_state *dpll_hw_state)
562 const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
563 const enum intel_dpll_id id = pll->info->id;
565 /* PCH refclock must be enabled first */
566 ibx_assert_pch_refclk_enabled(i915);
568 intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
569 intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
571 intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
573 /* Wait for the clocks to stabilize. */
574 intel_de_posting_read(i915, PCH_DPLL(id));
577 /* The pixel multiplier can only be updated once the
578 * DPLL is enabled and the clocks are stable.
582 intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
583 intel_de_posting_read(i915, PCH_DPLL(id));
587 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
588 struct intel_shared_dpll *pll)
590 const enum intel_dpll_id id = pll->info->id;
592 intel_de_write(i915, PCH_DPLL(id), 0);
593 intel_de_posting_read(i915, PCH_DPLL(id));
597 static int ibx_compute_dpll(struct intel_atomic_state *state,
598 struct intel_crtc *crtc,
599 struct intel_encoder *encoder)
604 static int ibx_get_dpll(struct intel_atomic_state *state,
605 struct intel_crtc *crtc,
606 struct intel_encoder *encoder)
608 struct intel_crtc_state *crtc_state =
609 intel_atomic_get_new_crtc_state(state, crtc);
610 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
611 struct intel_shared_dpll *pll;
612 enum intel_dpll_id id;
614 if (HAS_PCH_IBX(i915)) {
615 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
616 id = (enum intel_dpll_id) crtc->pipe;
617 pll = intel_get_shared_dpll_by_id(i915, id);
619 drm_dbg_kms(&i915->drm,
620 "[CRTC:%d:%s] using pre-allocated %s\n",
621 crtc->base.base.id, crtc->base.name,
624 pll = intel_find_shared_dpll(state, crtc,
625 &crtc_state->dpll_hw_state,
626 BIT(DPLL_ID_PCH_PLL_B) |
627 BIT(DPLL_ID_PCH_PLL_A));
633 /* reference the pll */
634 intel_reference_shared_dpll(state, crtc,
635 pll, &crtc_state->dpll_hw_state);
637 crtc_state->shared_dpll = pll;
642 static void ibx_dump_hw_state(struct drm_printer *p,
643 const struct intel_dpll_hw_state *dpll_hw_state)
645 const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
647 drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
648 "fp0: 0x%x, fp1: 0x%x\n",
655 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
656 const struct intel_dpll_hw_state *_b)
658 const struct i9xx_dpll_hw_state *a = &_a->i9xx;
659 const struct i9xx_dpll_hw_state *b = &_b->i9xx;
661 return a->dpll == b->dpll &&
662 a->dpll_md == b->dpll_md &&
667 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
668 .enable = ibx_pch_dpll_enable,
669 .disable = ibx_pch_dpll_disable,
670 .get_hw_state = ibx_pch_dpll_get_hw_state,
673 static const struct dpll_info pch_plls[] = {
674 { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
675 { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
679 static const struct intel_dpll_mgr pch_pll_mgr = {
680 .dpll_info = pch_plls,
681 .compute_dplls = ibx_compute_dpll,
682 .get_dplls = ibx_get_dpll,
683 .put_dplls = intel_put_dpll,
684 .dump_hw_state = ibx_dump_hw_state,
685 .compare_hw_state = ibx_compare_hw_state,
688 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
689 struct intel_shared_dpll *pll,
690 const struct intel_dpll_hw_state *dpll_hw_state)
692 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
693 const enum intel_dpll_id id = pll->info->id;
695 intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
696 intel_de_posting_read(i915, WRPLL_CTL(id));
700 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
701 struct intel_shared_dpll *pll,
702 const struct intel_dpll_hw_state *dpll_hw_state)
704 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
706 intel_de_write(i915, SPLL_CTL, hw_state->spll);
707 intel_de_posting_read(i915, SPLL_CTL);
711 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
712 struct intel_shared_dpll *pll)
714 const enum intel_dpll_id id = pll->info->id;
716 intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
717 intel_de_posting_read(i915, WRPLL_CTL(id));
720 * Try to set up the PCH reference clock once all DPLLs
721 * that depend on it have been shut down.
723 if (i915->display.dpll.pch_ssc_use & BIT(id))
724 intel_init_pch_refclk(i915);
727 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
728 struct intel_shared_dpll *pll)
730 enum intel_dpll_id id = pll->info->id;
732 intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
733 intel_de_posting_read(i915, SPLL_CTL);
736 * Try to set up the PCH reference clock once all DPLLs
737 * that depend on it have been shut down.
739 if (i915->display.dpll.pch_ssc_use & BIT(id))
740 intel_init_pch_refclk(i915);
743 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
744 struct intel_shared_dpll *pll,
745 struct intel_dpll_hw_state *dpll_hw_state)
747 struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
748 const enum intel_dpll_id id = pll->info->id;
749 intel_wakeref_t wakeref;
752 wakeref = intel_display_power_get_if_enabled(i915,
753 POWER_DOMAIN_DISPLAY_CORE);
757 val = intel_de_read(i915, WRPLL_CTL(id));
758 hw_state->wrpll = val;
760 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
762 return val & WRPLL_PLL_ENABLE;
765 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
766 struct intel_shared_dpll *pll,
767 struct intel_dpll_hw_state *dpll_hw_state)
769 struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
770 intel_wakeref_t wakeref;
773 wakeref = intel_display_power_get_if_enabled(i915,
774 POWER_DOMAIN_DISPLAY_CORE);
778 val = intel_de_read(i915, SPLL_CTL);
779 hw_state->spll = val;
781 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
783 return val & SPLL_PLL_ENABLE;
787 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
793 /* Constraints for PLL good behavior */
799 struct hsw_wrpll_rnp {
803 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
867 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
868 unsigned int r2, unsigned int n2,
870 struct hsw_wrpll_rnp *best)
872 u64 a, b, c, d, diff, diff_best;
874 /* No best (r,n,p) yet */
883 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
887 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
890 * and we would like delta <= budget.
892 * If the discrepancy is above the PPM-based budget, always prefer to
893 * improve upon the previous solution. However, if you're within the
894 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
896 a = freq2k * budget * p * r2;
897 b = freq2k * budget * best->p * best->r2;
898 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
899 diff_best = abs_diff(freq2k * best->p * best->r2,
900 LC_FREQ_2K * best->n2);
902 d = 1000000 * diff_best;
904 if (a < c && b < d) {
905 /* If both are above the budget, pick the closer */
906 if (best->p * best->r2 * diff < p * r2 * diff_best) {
911 } else if (a >= c && b < d) {
912 /* If A is below the threshold but B is above it? Update. */
916 } else if (a >= c && b >= d) {
917 /* Both are below the limit, so pick the higher n2/(r2*r2) */
918 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
924 /* Otherwise a < c && b >= d, do nothing */
928 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
929 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
933 struct hsw_wrpll_rnp best = {};
936 freq2k = clock / 100;
938 budget = hsw_wrpll_get_budget_for_freq(clock);
940 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
941 * and directly pass the LC PLL to it. */
942 if (freq2k == 5400000) {
950 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
953 * We want R so that REF_MIN <= Ref <= REF_MAX.
954 * Injecting R2 = 2 * R gives:
955 * REF_MAX * r2 > LC_FREQ * 2 and
956 * REF_MIN * r2 < LC_FREQ * 2
958 * Which means the desired boundaries for r2 are:
959 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
962 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
963 r2 <= LC_FREQ * 2 / REF_MIN;
967 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
969 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
970 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
971 * VCO_MAX * r2 > n2 * LC_FREQ and
972 * VCO_MIN * r2 < n2 * LC_FREQ)
974 * Which means the desired boundaries for n2 are:
975 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
977 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
978 n2 <= VCO_MAX * r2 / LC_FREQ;
981 for (p = P_MIN; p <= P_MAX; p += P_INC)
982 hsw_wrpll_update_rnp(freq2k, budget,
992 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
993 const struct intel_shared_dpll *pll,
994 const struct intel_dpll_hw_state *dpll_hw_state)
996 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
999 u32 wrpll = hw_state->wrpll;
1001 switch (wrpll & WRPLL_REF_MASK) {
1002 case WRPLL_REF_SPECIAL_HSW:
1003 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1004 if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1005 refclk = i915->display.dpll.ref_clks.nssc;
1009 case WRPLL_REF_PCH_SSC:
1011 * We could calculate spread here, but our checking
1012 * code only cares about 5% accuracy, and spread is a max of
1015 refclk = i915->display.dpll.ref_clks.ssc;
1017 case WRPLL_REF_LCPLL:
1021 MISSING_CASE(wrpll);
1025 r = wrpll & WRPLL_DIVIDER_REF_MASK;
1026 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1027 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1029 /* Convert to KHz, p & r have a fixed point portion */
1030 return (refclk * n / 10) / (p * r) * 2;
1034 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1035 struct intel_crtc *crtc)
1037 struct drm_i915_private *i915 = to_i915(state->base.dev);
1038 struct intel_crtc_state *crtc_state =
1039 intel_atomic_get_new_crtc_state(state, crtc);
1040 struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1041 unsigned int p, n2, r2;
1043 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1046 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1047 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1048 WRPLL_DIVIDER_POST(p);
1050 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1051 &crtc_state->dpll_hw_state);
1056 static struct intel_shared_dpll *
1057 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1058 struct intel_crtc *crtc)
1060 struct intel_crtc_state *crtc_state =
1061 intel_atomic_get_new_crtc_state(state, crtc);
1063 return intel_find_shared_dpll(state, crtc,
1064 &crtc_state->dpll_hw_state,
1065 BIT(DPLL_ID_WRPLL2) |
1066 BIT(DPLL_ID_WRPLL1));
1070 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1072 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1073 int clock = crtc_state->port_clock;
1075 switch (clock / 2) {
1081 drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1087 static struct intel_shared_dpll *
1088 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1090 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1091 struct intel_shared_dpll *pll;
1092 enum intel_dpll_id pll_id;
1093 int clock = crtc_state->port_clock;
1095 switch (clock / 2) {
1097 pll_id = DPLL_ID_LCPLL_810;
1100 pll_id = DPLL_ID_LCPLL_1350;
1103 pll_id = DPLL_ID_LCPLL_2700;
1106 MISSING_CASE(clock / 2);
1110 pll = intel_get_shared_dpll_by_id(i915, pll_id);
1118 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1119 const struct intel_shared_dpll *pll,
1120 const struct intel_dpll_hw_state *dpll_hw_state)
1124 switch (pll->info->id) {
1125 case DPLL_ID_LCPLL_810:
1128 case DPLL_ID_LCPLL_1350:
1129 link_clock = 135000;
1131 case DPLL_ID_LCPLL_2700:
1132 link_clock = 270000;
1135 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1139 return link_clock * 2;
1143 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1144 struct intel_crtc *crtc)
1146 struct intel_crtc_state *crtc_state =
1147 intel_atomic_get_new_crtc_state(state, crtc);
1148 struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1150 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1154 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1159 static struct intel_shared_dpll *
1160 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1161 struct intel_crtc *crtc)
1163 struct intel_crtc_state *crtc_state =
1164 intel_atomic_get_new_crtc_state(state, crtc);
1166 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1170 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1171 const struct intel_shared_dpll *pll,
1172 const struct intel_dpll_hw_state *dpll_hw_state)
1174 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1177 switch (hw_state->spll & SPLL_FREQ_MASK) {
1178 case SPLL_FREQ_810MHz:
1181 case SPLL_FREQ_1350MHz:
1182 link_clock = 135000;
1184 case SPLL_FREQ_2700MHz:
1185 link_clock = 270000;
1188 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1192 return link_clock * 2;
1195 static int hsw_compute_dpll(struct intel_atomic_state *state,
1196 struct intel_crtc *crtc,
1197 struct intel_encoder *encoder)
1199 struct intel_crtc_state *crtc_state =
1200 intel_atomic_get_new_crtc_state(state, crtc);
1202 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1203 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1204 else if (intel_crtc_has_dp_encoder(crtc_state))
1205 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1206 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1207 return hsw_ddi_spll_compute_dpll(state, crtc);
1212 static int hsw_get_dpll(struct intel_atomic_state *state,
1213 struct intel_crtc *crtc,
1214 struct intel_encoder *encoder)
1216 struct intel_crtc_state *crtc_state =
1217 intel_atomic_get_new_crtc_state(state, crtc);
1218 struct intel_shared_dpll *pll = NULL;
1220 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1221 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1222 else if (intel_crtc_has_dp_encoder(crtc_state))
1223 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1224 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1225 pll = hsw_ddi_spll_get_dpll(state, crtc);
1230 intel_reference_shared_dpll(state, crtc,
1231 pll, &crtc_state->dpll_hw_state);
1233 crtc_state->shared_dpll = pll;
1238 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1240 i915->display.dpll.ref_clks.ssc = 135000;
1241 /* Non-SSC is only used on non-ULT HSW. */
1242 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1243 i915->display.dpll.ref_clks.nssc = 24000;
1245 i915->display.dpll.ref_clks.nssc = 135000;
1248 static void hsw_dump_hw_state(struct drm_printer *p,
1249 const struct intel_dpll_hw_state *dpll_hw_state)
1251 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1253 drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1254 hw_state->wrpll, hw_state->spll);
1257 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1258 const struct intel_dpll_hw_state *_b)
1260 const struct hsw_dpll_hw_state *a = &_a->hsw;
1261 const struct hsw_dpll_hw_state *b = &_b->hsw;
1263 return a->wrpll == b->wrpll &&
1267 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1268 .enable = hsw_ddi_wrpll_enable,
1269 .disable = hsw_ddi_wrpll_disable,
1270 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1271 .get_freq = hsw_ddi_wrpll_get_freq,
1274 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1275 .enable = hsw_ddi_spll_enable,
1276 .disable = hsw_ddi_spll_disable,
1277 .get_hw_state = hsw_ddi_spll_get_hw_state,
1278 .get_freq = hsw_ddi_spll_get_freq,
1281 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1282 struct intel_shared_dpll *pll,
1283 const struct intel_dpll_hw_state *hw_state)
1287 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1288 struct intel_shared_dpll *pll)
1292 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1293 struct intel_shared_dpll *pll,
1294 struct intel_dpll_hw_state *dpll_hw_state)
1299 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1300 .enable = hsw_ddi_lcpll_enable,
1301 .disable = hsw_ddi_lcpll_disable,
1302 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1303 .get_freq = hsw_ddi_lcpll_get_freq,
1306 static const struct dpll_info hsw_plls[] = {
1307 { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1308 { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1309 { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1310 { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1311 .always_on = true, },
1312 { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1313 .always_on = true, },
1314 { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1315 .always_on = true, },
1319 static const struct intel_dpll_mgr hsw_pll_mgr = {
1320 .dpll_info = hsw_plls,
1321 .compute_dplls = hsw_compute_dpll,
1322 .get_dplls = hsw_get_dpll,
1323 .put_dplls = intel_put_dpll,
1324 .update_ref_clks = hsw_update_dpll_ref_clks,
1325 .dump_hw_state = hsw_dump_hw_state,
1326 .compare_hw_state = hsw_compare_hw_state,
1329 struct skl_dpll_regs {
1330 i915_reg_t ctl, cfgcr1, cfgcr2;
1333 /* this array is indexed by the *shared* pll id */
1334 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1338 /* DPLL 0 doesn't support HDMI mode */
1343 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1344 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1348 .ctl = WRPLL_CTL(0),
1349 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1350 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1354 .ctl = WRPLL_CTL(1),
1355 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1356 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1360 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1361 struct intel_shared_dpll *pll,
1362 const struct skl_dpll_hw_state *hw_state)
1364 const enum intel_dpll_id id = pll->info->id;
1366 intel_de_rmw(i915, DPLL_CTRL1,
1367 DPLL_CTRL1_HDMI_MODE(id) |
1368 DPLL_CTRL1_SSC(id) |
1369 DPLL_CTRL1_LINK_RATE_MASK(id),
1370 hw_state->ctrl1 << (id * 6));
1371 intel_de_posting_read(i915, DPLL_CTRL1);
1374 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1375 struct intel_shared_dpll *pll,
1376 const struct intel_dpll_hw_state *dpll_hw_state)
1378 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1379 const struct skl_dpll_regs *regs = skl_dpll_regs;
1380 const enum intel_dpll_id id = pll->info->id;
1382 skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1384 intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1385 intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1386 intel_de_posting_read(i915, regs[id].cfgcr1);
1387 intel_de_posting_read(i915, regs[id].cfgcr2);
1389 /* the enable bit is always bit 31 */
1390 intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1392 if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1393 drm_err(&i915->drm, "DPLL %d not locked\n", id);
1396 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1397 struct intel_shared_dpll *pll,
1398 const struct intel_dpll_hw_state *dpll_hw_state)
1400 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1402 skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1405 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1406 struct intel_shared_dpll *pll)
1408 const struct skl_dpll_regs *regs = skl_dpll_regs;
1409 const enum intel_dpll_id id = pll->info->id;
1411 /* the enable bit is always bit 31 */
1412 intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1413 intel_de_posting_read(i915, regs[id].ctl);
1416 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1417 struct intel_shared_dpll *pll)
1421 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1422 struct intel_shared_dpll *pll,
1423 struct intel_dpll_hw_state *dpll_hw_state)
1425 struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1426 const struct skl_dpll_regs *regs = skl_dpll_regs;
1427 const enum intel_dpll_id id = pll->info->id;
1428 intel_wakeref_t wakeref;
1432 wakeref = intel_display_power_get_if_enabled(i915,
1433 POWER_DOMAIN_DISPLAY_CORE);
1439 val = intel_de_read(i915, regs[id].ctl);
1440 if (!(val & LCPLL_PLL_ENABLE))
1443 val = intel_de_read(i915, DPLL_CTRL1);
1444 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1446 /* avoid reading back stale values if HDMI mode is not enabled */
1447 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1448 hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1449 hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1454 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1459 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1460 struct intel_shared_dpll *pll,
1461 struct intel_dpll_hw_state *dpll_hw_state)
1463 struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1464 const struct skl_dpll_regs *regs = skl_dpll_regs;
1465 const enum intel_dpll_id id = pll->info->id;
1466 intel_wakeref_t wakeref;
1470 wakeref = intel_display_power_get_if_enabled(i915,
1471 POWER_DOMAIN_DISPLAY_CORE);
1477 /* DPLL0 is always enabled since it drives CDCLK */
1478 val = intel_de_read(i915, regs[id].ctl);
1479 if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1482 val = intel_de_read(i915, DPLL_CTRL1);
1483 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1488 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1493 struct skl_wrpll_context {
1494 u64 min_deviation; /* current minimal deviation */
1495 u64 central_freq; /* chosen central freq */
1496 u64 dco_freq; /* chosen dco freq */
1497 unsigned int p; /* chosen divider */
1500 /* DCO freq must be within +1%/-6% of the DCO central freq */
1501 #define SKL_DCO_MAX_PDEVIATION 100
1502 #define SKL_DCO_MAX_NDEVIATION 600
1504 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1507 unsigned int divider)
1511 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1514 /* positive deviation */
1515 if (dco_freq >= central_freq) {
1516 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1517 deviation < ctx->min_deviation) {
1518 ctx->min_deviation = deviation;
1519 ctx->central_freq = central_freq;
1520 ctx->dco_freq = dco_freq;
1523 /* negative deviation */
1524 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1525 deviation < ctx->min_deviation) {
1526 ctx->min_deviation = deviation;
1527 ctx->central_freq = central_freq;
1528 ctx->dco_freq = dco_freq;
1533 static void skl_wrpll_get_multipliers(unsigned int p,
1534 unsigned int *p0 /* out */,
1535 unsigned int *p1 /* out */,
1536 unsigned int *p2 /* out */)
1540 unsigned int half = p / 2;
1542 if (half == 1 || half == 2 || half == 3 || half == 5) {
1546 } else if (half % 2 == 0) {
1550 } else if (half % 3 == 0) {
1554 } else if (half % 7 == 0) {
1559 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1563 } else if (p == 5 || p == 7) {
1567 } else if (p == 15) {
1571 } else if (p == 21) {
1575 } else if (p == 35) {
1582 struct skl_wrpll_params {
1592 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1596 u32 p0, u32 p1, u32 p2)
1600 switch (central_freq) {
1602 params->central_freq = 0;
1605 params->central_freq = 1;
1608 params->central_freq = 3;
1625 WARN(1, "Incorrect PDiv\n");
1642 WARN(1, "Incorrect KDiv\n");
1645 params->qdiv_ratio = p1;
1646 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1648 dco_freq = p0 * p1 * p2 * afe_clock;
1651 * Intermediate values are in Hz.
1652 * Divide by MHz to match bsepc
1654 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1655 params->dco_fraction =
1656 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1657 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1661 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1663 struct skl_wrpll_params *wrpll_params)
1665 static const u64 dco_central_freq[3] = { 8400000000ULL,
1668 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1669 24, 28, 30, 32, 36, 40, 42, 44,
1670 48, 52, 54, 56, 60, 64, 66, 68,
1671 70, 72, 76, 78, 80, 84, 88, 90,
1673 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1674 static const struct {
1678 { even_dividers, ARRAY_SIZE(even_dividers) },
1679 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1681 struct skl_wrpll_context ctx = {
1682 .min_deviation = U64_MAX,
1684 unsigned int dco, d, i;
1685 unsigned int p0, p1, p2;
1686 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1688 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1689 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1690 for (i = 0; i < dividers[d].n_dividers; i++) {
1691 unsigned int p = dividers[d].list[i];
1692 u64 dco_freq = p * afe_clock;
1694 skl_wrpll_try_divider(&ctx,
1695 dco_central_freq[dco],
1699 * Skip the remaining dividers if we're sure to
1700 * have found the definitive divider, we can't
1701 * improve a 0 deviation.
1703 if (ctx.min_deviation == 0)
1704 goto skip_remaining_dividers;
1708 skip_remaining_dividers:
1710 * If a solution is found with an even divider, prefer
1713 if (d == 0 && ctx.p)
1721 * gcc incorrectly analyses that these can be used without being
1722 * initialized. To be fair, it's hard to guess.
1725 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1726 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1727 ctx.central_freq, p0, p1, p2);
1732 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1733 const struct intel_shared_dpll *pll,
1734 const struct intel_dpll_hw_state *dpll_hw_state)
1736 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1737 int ref_clock = i915->display.dpll.ref_clks.nssc;
1738 u32 p0, p1, p2, dco_freq;
1740 p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1741 p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1743 if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1744 p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1750 case DPLL_CFGCR2_PDIV_1:
1753 case DPLL_CFGCR2_PDIV_2:
1756 case DPLL_CFGCR2_PDIV_3:
1759 case DPLL_CFGCR2_PDIV_7_INVALID:
1761 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1762 * handling it the same way as PDIV_7.
1764 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1766 case DPLL_CFGCR2_PDIV_7:
1775 case DPLL_CFGCR2_KDIV_5:
1778 case DPLL_CFGCR2_KDIV_2:
1781 case DPLL_CFGCR2_KDIV_3:
1784 case DPLL_CFGCR2_KDIV_1:
1792 dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1795 dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1798 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1801 return dco_freq / (p0 * p1 * p2 * 5);
1804 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1806 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1807 struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1808 struct skl_wrpll_params wrpll_params = {};
1811 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1812 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1817 * See comment in intel_dpll_hw_state to understand why we always use 0
1818 * as the DPLL id in this function.
1821 DPLL_CTRL1_OVERRIDE(0) |
1822 DPLL_CTRL1_HDMI_MODE(0);
1825 DPLL_CFGCR1_FREQ_ENABLE |
1826 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1827 wrpll_params.dco_integer;
1830 DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1831 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1832 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1833 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1834 wrpll_params.central_freq;
1836 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1837 &crtc_state->dpll_hw_state);
1843 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1845 struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1849 * See comment in intel_dpll_hw_state to understand why we always use 0
1850 * as the DPLL id in this function.
1852 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1853 switch (crtc_state->port_clock / 2) {
1855 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1858 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1861 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1865 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1868 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1871 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1875 hw_state->ctrl1 = ctrl1;
1880 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1881 const struct intel_shared_dpll *pll,
1882 const struct intel_dpll_hw_state *dpll_hw_state)
1884 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1887 switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1888 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1889 case DPLL_CTRL1_LINK_RATE_810:
1892 case DPLL_CTRL1_LINK_RATE_1080:
1893 link_clock = 108000;
1895 case DPLL_CTRL1_LINK_RATE_1350:
1896 link_clock = 135000;
1898 case DPLL_CTRL1_LINK_RATE_1620:
1899 link_clock = 162000;
1901 case DPLL_CTRL1_LINK_RATE_2160:
1902 link_clock = 216000;
1904 case DPLL_CTRL1_LINK_RATE_2700:
1905 link_clock = 270000;
1908 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1912 return link_clock * 2;
1915 static int skl_compute_dpll(struct intel_atomic_state *state,
1916 struct intel_crtc *crtc,
1917 struct intel_encoder *encoder)
1919 struct intel_crtc_state *crtc_state =
1920 intel_atomic_get_new_crtc_state(state, crtc);
1922 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1923 return skl_ddi_hdmi_pll_dividers(crtc_state);
1924 else if (intel_crtc_has_dp_encoder(crtc_state))
1925 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1930 static int skl_get_dpll(struct intel_atomic_state *state,
1931 struct intel_crtc *crtc,
1932 struct intel_encoder *encoder)
1934 struct intel_crtc_state *crtc_state =
1935 intel_atomic_get_new_crtc_state(state, crtc);
1936 struct intel_shared_dpll *pll;
1938 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1939 pll = intel_find_shared_dpll(state, crtc,
1940 &crtc_state->dpll_hw_state,
1941 BIT(DPLL_ID_SKL_DPLL0));
1943 pll = intel_find_shared_dpll(state, crtc,
1944 &crtc_state->dpll_hw_state,
1945 BIT(DPLL_ID_SKL_DPLL3) |
1946 BIT(DPLL_ID_SKL_DPLL2) |
1947 BIT(DPLL_ID_SKL_DPLL1));
1951 intel_reference_shared_dpll(state, crtc,
1952 pll, &crtc_state->dpll_hw_state);
1954 crtc_state->shared_dpll = pll;
1959 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1960 const struct intel_shared_dpll *pll,
1961 const struct intel_dpll_hw_state *dpll_hw_state)
1963 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1966 * ctrl1 register is already shifted for each pll, just use 0 to get
1967 * the internal shift for each field
1969 if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1970 return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1972 return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1975 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1978 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1981 static void skl_dump_hw_state(struct drm_printer *p,
1982 const struct intel_dpll_hw_state *dpll_hw_state)
1984 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1986 drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1987 hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1990 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1991 const struct intel_dpll_hw_state *_b)
1993 const struct skl_dpll_hw_state *a = &_a->skl;
1994 const struct skl_dpll_hw_state *b = &_b->skl;
1996 return a->ctrl1 == b->ctrl1 &&
1997 a->cfgcr1 == b->cfgcr1 &&
1998 a->cfgcr2 == b->cfgcr2;
2001 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2002 .enable = skl_ddi_pll_enable,
2003 .disable = skl_ddi_pll_disable,
2004 .get_hw_state = skl_ddi_pll_get_hw_state,
2005 .get_freq = skl_ddi_pll_get_freq,
2008 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2009 .enable = skl_ddi_dpll0_enable,
2010 .disable = skl_ddi_dpll0_disable,
2011 .get_hw_state = skl_ddi_dpll0_get_hw_state,
2012 .get_freq = skl_ddi_pll_get_freq,
2015 static const struct dpll_info skl_plls[] = {
2016 { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2017 .always_on = true, },
2018 { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2019 { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2020 { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2024 static const struct intel_dpll_mgr skl_pll_mgr = {
2025 .dpll_info = skl_plls,
2026 .compute_dplls = skl_compute_dpll,
2027 .get_dplls = skl_get_dpll,
2028 .put_dplls = intel_put_dpll,
2029 .update_ref_clks = skl_update_dpll_ref_clks,
2030 .dump_hw_state = skl_dump_hw_state,
2031 .compare_hw_state = skl_compare_hw_state,
2034 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2035 struct intel_shared_dpll *pll,
2036 const struct intel_dpll_hw_state *dpll_hw_state)
2038 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2039 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2041 enum dpio_channel ch;
2044 bxt_port_to_phy_channel(i915, port, &phy, &ch);
2046 /* Non-SSC reference */
2047 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2049 if (IS_GEMINILAKE(i915)) {
2050 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2051 0, PORT_PLL_POWER_ENABLE);
2053 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2054 PORT_PLL_POWER_STATE), 200))
2056 "Power state not set for PLL:%d\n", port);
2059 /* Disable 10 bit clock */
2060 intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2061 PORT_PLL_10BIT_CLK_ENABLE, 0);
2064 intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2065 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2067 /* Write M2 integer */
2068 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2069 PORT_PLL_M2_INT_MASK, hw_state->pll0);
2072 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2073 PORT_PLL_N_MASK, hw_state->pll1);
2075 /* Write M2 fraction */
2076 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2077 PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2079 /* Write M2 fraction enable */
2080 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2081 PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2084 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2085 temp &= ~PORT_PLL_PROP_COEFF_MASK;
2086 temp &= ~PORT_PLL_INT_COEFF_MASK;
2087 temp &= ~PORT_PLL_GAIN_CTL_MASK;
2088 temp |= hw_state->pll6;
2089 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2091 /* Write calibration val */
2092 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2093 PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2095 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2096 PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2098 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2099 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2100 temp &= ~PORT_PLL_DCO_AMP_MASK;
2101 temp |= hw_state->pll10;
2102 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2104 /* Recalibrate with new settings */
2105 temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2106 temp |= PORT_PLL_RECALIBRATE;
2107 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2108 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2109 temp |= hw_state->ebb4;
2110 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2113 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2114 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2116 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2118 drm_err(&i915->drm, "PLL %d not locked\n", port);
2120 if (IS_GEMINILAKE(i915)) {
2121 temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2122 temp |= DCC_DELAY_RANGE_2;
2123 intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2127 * While we write to the group register to program all lanes at once we
2128 * can read only lane registers and we pick lanes 0/1 for that.
2130 temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2131 temp &= ~LANE_STAGGER_MASK;
2132 temp &= ~LANESTAGGER_STRAP_OVRD;
2133 temp |= hw_state->pcsdw12;
2134 intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2137 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2138 struct intel_shared_dpll *pll)
2140 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2142 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2143 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2145 if (IS_GEMINILAKE(i915)) {
2146 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2147 PORT_PLL_POWER_ENABLE, 0);
2149 if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2150 PORT_PLL_POWER_STATE), 200))
2152 "Power state not reset for PLL:%d\n", port);
2156 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2157 struct intel_shared_dpll *pll,
2158 struct intel_dpll_hw_state *dpll_hw_state)
2160 struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2161 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2162 intel_wakeref_t wakeref;
2164 enum dpio_channel ch;
2168 bxt_port_to_phy_channel(i915, port, &phy, &ch);
2170 wakeref = intel_display_power_get_if_enabled(i915,
2171 POWER_DOMAIN_DISPLAY_CORE);
2177 val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2178 if (!(val & PORT_PLL_ENABLE))
2181 hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2182 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2184 hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2185 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2187 hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2188 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2190 hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2191 hw_state->pll1 &= PORT_PLL_N_MASK;
2193 hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2194 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2196 hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2197 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2199 hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2200 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2201 PORT_PLL_INT_COEFF_MASK |
2202 PORT_PLL_GAIN_CTL_MASK;
2204 hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2205 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2207 hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2208 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2210 hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2211 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2212 PORT_PLL_DCO_AMP_MASK;
2215 * While we write to the group register to program all lanes at once we
2216 * can read only lane registers. We configure all lanes the same way, so
2217 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2219 hw_state->pcsdw12 = intel_de_read(i915,
2220 BXT_PORT_PCS_DW12_LN01(phy, ch));
2221 if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2223 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2226 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2227 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2232 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2237 /* pre-calculated values for DP linkrates */
2238 static const struct dpll bxt_dp_clk_val[] = {
2239 /* m2 is .22 binary fixed point */
2240 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2241 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2242 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2243 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2244 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2245 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2246 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2250 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2251 struct dpll *clk_div)
2253 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2255 /* Calculate HDMI div */
2257 * FIXME: tie the following calculation into
2258 * i9xx_crtc_compute_clock
2260 if (!bxt_find_best_dpll(crtc_state, clk_div))
2263 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2268 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2269 struct dpll *clk_div)
2271 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2274 *clk_div = bxt_dp_clk_val[0];
2275 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2276 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2277 *clk_div = bxt_dp_clk_val[i];
2282 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2284 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2285 clk_div->dot != crtc_state->port_clock);
2288 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2289 const struct dpll *clk_div)
2291 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2292 struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2293 int clock = crtc_state->port_clock;
2294 int vco = clk_div->vco;
2295 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2298 if (vco >= 6200000 && vco <= 6700000) {
2303 } else if ((vco > 5400000 && vco < 6200000) ||
2304 (vco >= 4800000 && vco < 5400000)) {
2309 } else if (vco == 5400000) {
2315 drm_err(&i915->drm, "Invalid VCO\n");
2321 else if (clock > 135000)
2323 else if (clock > 67000)
2325 else if (clock > 33000)
2330 hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2331 hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2332 hw_state->pll1 = PORT_PLL_N(clk_div->n);
2333 hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2335 if (clk_div->m2 & 0x3fffff)
2336 hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2338 hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2339 PORT_PLL_INT_COEFF(int_coef) |
2340 PORT_PLL_GAIN_CTL(gain_ctl);
2342 hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2344 hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2346 hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2347 PORT_PLL_DCO_AMP_OVR_EN_H;
2349 hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2351 hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2356 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2357 const struct intel_shared_dpll *pll,
2358 const struct intel_dpll_hw_state *dpll_hw_state)
2360 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2364 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2365 if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2366 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2368 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2369 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2370 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2372 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2376 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2378 struct dpll clk_div = {};
2380 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2382 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2386 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2388 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2389 struct dpll clk_div = {};
2392 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2394 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2398 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2399 &crtc_state->dpll_hw_state);
2404 static int bxt_compute_dpll(struct intel_atomic_state *state,
2405 struct intel_crtc *crtc,
2406 struct intel_encoder *encoder)
2408 struct intel_crtc_state *crtc_state =
2409 intel_atomic_get_new_crtc_state(state, crtc);
2411 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2412 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2413 else if (intel_crtc_has_dp_encoder(crtc_state))
2414 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2419 static int bxt_get_dpll(struct intel_atomic_state *state,
2420 struct intel_crtc *crtc,
2421 struct intel_encoder *encoder)
2423 struct intel_crtc_state *crtc_state =
2424 intel_atomic_get_new_crtc_state(state, crtc);
2425 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2426 struct intel_shared_dpll *pll;
2427 enum intel_dpll_id id;
2429 /* 1:1 mapping between ports and PLLs */
2430 id = (enum intel_dpll_id) encoder->port;
2431 pll = intel_get_shared_dpll_by_id(i915, id);
2433 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2434 crtc->base.base.id, crtc->base.name, pll->info->name);
2436 intel_reference_shared_dpll(state, crtc,
2437 pll, &crtc_state->dpll_hw_state);
2439 crtc_state->shared_dpll = pll;
2444 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2446 i915->display.dpll.ref_clks.ssc = 100000;
2447 i915->display.dpll.ref_clks.nssc = 100000;
2448 /* DSI non-SSC ref 19.2MHz */
2451 static void bxt_dump_hw_state(struct drm_printer *p,
2452 const struct intel_dpll_hw_state *dpll_hw_state)
2454 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2456 drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2457 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2458 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2459 hw_state->ebb0, hw_state->ebb4,
2460 hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2461 hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2465 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2466 const struct intel_dpll_hw_state *_b)
2468 const struct bxt_dpll_hw_state *a = &_a->bxt;
2469 const struct bxt_dpll_hw_state *b = &_b->bxt;
2471 return a->ebb0 == b->ebb0 &&
2472 a->ebb4 == b->ebb4 &&
2473 a->pll0 == b->pll0 &&
2474 a->pll1 == b->pll1 &&
2475 a->pll2 == b->pll2 &&
2476 a->pll3 == b->pll3 &&
2477 a->pll6 == b->pll6 &&
2478 a->pll8 == b->pll8 &&
2479 a->pll10 == b->pll10 &&
2480 a->pcsdw12 == b->pcsdw12;
2483 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2484 .enable = bxt_ddi_pll_enable,
2485 .disable = bxt_ddi_pll_disable,
2486 .get_hw_state = bxt_ddi_pll_get_hw_state,
2487 .get_freq = bxt_ddi_pll_get_freq,
2490 static const struct dpll_info bxt_plls[] = {
2491 { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2492 { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2493 { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2497 static const struct intel_dpll_mgr bxt_pll_mgr = {
2498 .dpll_info = bxt_plls,
2499 .compute_dplls = bxt_compute_dpll,
2500 .get_dplls = bxt_get_dpll,
2501 .put_dplls = intel_put_dpll,
2502 .update_ref_clks = bxt_update_dpll_ref_clks,
2503 .dump_hw_state = bxt_dump_hw_state,
2504 .compare_hw_state = bxt_compare_hw_state,
2507 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2508 int *qdiv, int *kdiv)
2511 if (bestdiv % 2 == 0) {
2516 } else if (bestdiv % 4 == 0) {
2518 *qdiv = bestdiv / 4;
2520 } else if (bestdiv % 6 == 0) {
2522 *qdiv = bestdiv / 6;
2524 } else if (bestdiv % 5 == 0) {
2526 *qdiv = bestdiv / 10;
2528 } else if (bestdiv % 14 == 0) {
2530 *qdiv = bestdiv / 14;
2534 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2538 } else { /* 9, 15, 21 */
2539 *pdiv = bestdiv / 3;
2546 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2547 u32 dco_freq, u32 ref_freq,
2548 int pdiv, int qdiv, int kdiv)
2563 WARN(1, "Incorrect KDiv\n");
2580 WARN(1, "Incorrect PDiv\n");
2583 WARN_ON(kdiv != 2 && qdiv != 1);
2585 params->qdiv_ratio = qdiv;
2586 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2588 dco = div_u64((u64)dco_freq << 15, ref_freq);
2590 params->dco_integer = dco >> 15;
2591 params->dco_fraction = dco & 0x7fff;
2595 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2596 * Program half of the nominal DCO divider fraction value.
2599 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2601 return ((IS_ELKHARTLAKE(i915) &&
2602 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2603 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2604 i915->display.dpll.ref_clks.nssc == 38400;
2607 struct icl_combo_pll_params {
2609 struct skl_wrpll_params wrpll;
2613 * These values alrea already adjusted: they're the bits we write to the
2614 * registers, not the logical values.
2616 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2618 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2619 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2621 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2622 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2624 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2625 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2627 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2628 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2630 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2631 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2633 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2634 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2636 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2637 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2639 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2640 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2644 /* Also used for 38.4 MHz values. */
2645 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2647 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2648 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2650 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2651 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2653 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2654 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2656 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2657 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2659 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2660 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2662 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2663 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2665 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2666 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2668 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2669 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2672 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2673 .dco_integer = 0x151, .dco_fraction = 0x4000,
2674 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2677 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2678 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2679 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2682 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2683 .dco_integer = 0x54, .dco_fraction = 0x3000,
2684 /* the following params are unused */
2685 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2688 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2689 .dco_integer = 0x43, .dco_fraction = 0x4000,
2690 /* the following params are unused */
2693 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2694 struct skl_wrpll_params *pll_params)
2696 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2697 const struct icl_combo_pll_params *params =
2698 i915->display.dpll.ref_clks.nssc == 24000 ?
2699 icl_dp_combo_pll_24MHz_values :
2700 icl_dp_combo_pll_19_2MHz_values;
2701 int clock = crtc_state->port_clock;
2704 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2705 if (clock == params[i].clock) {
2706 *pll_params = params[i].wrpll;
2711 MISSING_CASE(clock);
2715 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2716 struct skl_wrpll_params *pll_params)
2718 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2720 if (DISPLAY_VER(i915) >= 12) {
2721 switch (i915->display.dpll.ref_clks.nssc) {
2723 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2727 *pll_params = tgl_tbt_pll_19_2MHz_values;
2730 *pll_params = tgl_tbt_pll_24MHz_values;
2734 switch (i915->display.dpll.ref_clks.nssc) {
2736 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2740 *pll_params = icl_tbt_pll_19_2MHz_values;
2743 *pll_params = icl_tbt_pll_24MHz_values;
2751 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2752 const struct intel_shared_dpll *pll,
2753 const struct intel_dpll_hw_state *dpll_hw_state)
2756 * The PLL outputs multiple frequencies at the same time, selection is
2757 * made at DDI clock mux level.
2759 drm_WARN_ON(&i915->drm, 1);
2764 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2766 int ref_clock = i915->display.dpll.ref_clks.nssc;
2769 * For ICL+, the spec states: if reference frequency is 38.4,
2770 * use 19.2 because the DPLL automatically divides that by 2.
2772 if (ref_clock == 38400)
2779 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2780 struct skl_wrpll_params *wrpll_params)
2782 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2783 int ref_clock = icl_wrpll_ref_clock(i915);
2784 u32 afe_clock = crtc_state->port_clock * 5;
2785 u32 dco_min = 7998000;
2786 u32 dco_max = 10000000;
2787 u32 dco_mid = (dco_min + dco_max) / 2;
2788 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2789 18, 20, 24, 28, 30, 32, 36, 40,
2790 42, 44, 48, 50, 52, 54, 56, 60,
2791 64, 66, 68, 70, 72, 76, 78, 80,
2792 84, 88, 90, 92, 96, 98, 100, 102,
2793 3, 5, 7, 9, 15, 21 };
2794 u32 dco, best_dco = 0, dco_centrality = 0;
2795 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2796 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2798 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2799 dco = afe_clock * dividers[d];
2801 if (dco <= dco_max && dco >= dco_min) {
2802 dco_centrality = abs(dco - dco_mid);
2804 if (dco_centrality < best_dco_centrality) {
2805 best_dco_centrality = dco_centrality;
2806 best_div = dividers[d];
2815 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2816 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2822 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2823 const struct intel_shared_dpll *pll,
2824 const struct intel_dpll_hw_state *dpll_hw_state)
2826 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2827 int ref_clock = icl_wrpll_ref_clock(i915);
2829 u32 p0, p1, p2, dco_freq;
2831 p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2832 p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2834 if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2835 p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2836 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2841 case DPLL_CFGCR1_PDIV_2:
2844 case DPLL_CFGCR1_PDIV_3:
2847 case DPLL_CFGCR1_PDIV_5:
2850 case DPLL_CFGCR1_PDIV_7:
2856 case DPLL_CFGCR1_KDIV_1:
2859 case DPLL_CFGCR1_KDIV_2:
2862 case DPLL_CFGCR1_KDIV_3:
2867 dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2870 dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2871 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2873 if (ehl_combo_pll_div_frac_wa_needed(i915))
2876 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2878 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2881 return dco_freq / (p0 * p1 * p2 * 5);
2884 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2885 const struct skl_wrpll_params *pll_params,
2886 struct intel_dpll_hw_state *dpll_hw_state)
2888 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2889 u32 dco_fraction = pll_params->dco_fraction;
2891 if (ehl_combo_pll_div_frac_wa_needed(i915))
2892 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2894 hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2895 pll_params->dco_integer;
2897 hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2898 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2899 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2900 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2902 if (DISPLAY_VER(i915) >= 12)
2903 hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2905 hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2907 if (i915->display.vbt.override_afc_startup)
2908 hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2911 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2912 u32 *target_dco_khz,
2913 struct icl_dpll_hw_state *hw_state,
2916 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2917 u32 dco_min_freq, dco_max_freq;
2921 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2922 dco_max_freq = is_dp ? 8100000 : 10000000;
2924 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2925 int div1 = div1_vals[i];
2927 for (div2 = 10; div2 > 0; div2--) {
2928 int dco = div1 * div2 * clock_khz * 5;
2929 int a_divratio, tlinedrv, inputsel;
2932 if (dco < dco_min_freq || dco > dco_max_freq)
2937 * Note: a_divratio not matching TGL BSpec
2938 * algorithm but matching hardcoded values and
2939 * working on HW for DP alt-mode at least
2941 a_divratio = is_dp ? 10 : 5;
2942 tlinedrv = is_dkl ? 1 : 2;
2947 inputsel = is_dp ? 0 : 1;
2954 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2957 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2960 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2963 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2967 *target_dco_khz = dco;
2969 hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2971 hw_state->mg_clktop2_coreclkctl1 =
2972 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2974 hw_state->mg_clktop2_hsclkctl =
2975 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2976 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2978 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2988 * The specification for this function uses real numbers, so the math had to be
2989 * adapted to integer-only calculation, that's why it looks so different.
2991 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2992 struct intel_dpll_hw_state *dpll_hw_state)
2994 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2995 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2996 int refclk_khz = i915->display.dpll.ref_clks.nssc;
2997 int clock = crtc_state->port_clock;
2998 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2999 u32 iref_ndiv, iref_trim, iref_pulse_w;
3000 u32 prop_coeff, int_coeff;
3001 u32 tdc_targetcnt, feedfwgain;
3002 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3004 bool use_ssc = false;
3005 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3006 bool is_dkl = DISPLAY_VER(i915) >= 12;
3009 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3015 m2div_int = dco_khz / (refclk_khz * m1div);
3016 if (m2div_int > 255) {
3019 m2div_int = dco_khz / (refclk_khz * m1div);
3022 if (m2div_int > 255)
3025 m2div_rem = dco_khz % (refclk_khz * m1div);
3027 tmp = (u64)m2div_rem * (1 << 22);
3028 do_div(tmp, refclk_khz * m1div);
3031 switch (refclk_khz) {
3048 MISSING_CASE(refclk_khz);
3053 * tdc_res = 0.000003
3054 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3056 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3057 * was supposed to be a division, but we rearranged the operations of
3058 * the formula to avoid early divisions so we don't multiply the
3061 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3062 * we also rearrange to work with integers.
3064 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3065 * last division by 10.
3067 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3070 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3071 * 32 bits. That's not a problem since we round the division down
3074 feedfwgain = (use_ssc || m2div_rem > 0) ?
3075 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3077 if (dco_khz >= 9000000) {
3086 tmp = mul_u32_u32(dco_khz, 47 * 32);
3087 do_div(tmp, refclk_khz * m1div * 10000);
3090 tmp = mul_u32_u32(dco_khz, 1000);
3091 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3098 /* write pll_state calculations */
3100 hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3101 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3102 DKL_PLL_DIV0_FBPREDIV(m1div) |
3103 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3104 if (i915->display.vbt.override_afc_startup) {
3105 u8 val = i915->display.vbt.override_afc_startup_val;
3107 hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3110 hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3111 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3113 hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3114 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3115 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3116 (use_ssc ? DKL_PLL_SSC_EN : 0);
3118 hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3119 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3121 hw_state->mg_pll_tdc_coldst_bias =
3122 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3123 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3126 hw_state->mg_pll_div0 =
3127 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3128 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3129 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3131 hw_state->mg_pll_div1 =
3132 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3133 MG_PLL_DIV1_DITHER_DIV_2 |
3134 MG_PLL_DIV1_NDIVRATIO(1) |
3135 MG_PLL_DIV1_FBPREDIV(m1div);
3137 hw_state->mg_pll_lf =
3138 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3139 MG_PLL_LF_AFCCNTSEL_512 |
3140 MG_PLL_LF_GAINCTRL(1) |
3141 MG_PLL_LF_INT_COEFF(int_coeff) |
3142 MG_PLL_LF_PROP_COEFF(prop_coeff);
3144 hw_state->mg_pll_frac_lock =
3145 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3146 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3147 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3148 MG_PLL_FRAC_LOCK_DCODITHEREN |
3149 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3150 if (use_ssc || m2div_rem > 0)
3151 hw_state->mg_pll_frac_lock |=
3152 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3154 hw_state->mg_pll_ssc =
3155 (use_ssc ? MG_PLL_SSC_EN : 0) |
3156 MG_PLL_SSC_TYPE(2) |
3157 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3158 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3160 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3162 hw_state->mg_pll_tdc_coldst_bias =
3163 MG_PLL_TDC_COLDST_COLDSTART |
3164 MG_PLL_TDC_COLDST_IREFINT_EN |
3165 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3166 MG_PLL_TDC_TDCOVCCORR_EN |
3167 MG_PLL_TDC_TDCSEL(3);
3169 hw_state->mg_pll_bias =
3170 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3171 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3172 MG_PLL_BIAS_BIAS_BONUS(10) |
3173 MG_PLL_BIAS_BIASCAL_EN |
3174 MG_PLL_BIAS_CTRIM(12) |
3175 MG_PLL_BIAS_VREF_RDAC(4) |
3176 MG_PLL_BIAS_IREFTRIM(iref_trim);
3178 if (refclk_khz == 38400) {
3179 hw_state->mg_pll_tdc_coldst_bias_mask =
3180 MG_PLL_TDC_COLDST_COLDSTART;
3181 hw_state->mg_pll_bias_mask = 0;
3183 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3184 hw_state->mg_pll_bias_mask = -1U;
3187 hw_state->mg_pll_tdc_coldst_bias &=
3188 hw_state->mg_pll_tdc_coldst_bias_mask;
3189 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3195 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3196 const struct intel_shared_dpll *pll,
3197 const struct intel_dpll_hw_state *dpll_hw_state)
3199 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3200 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3203 ref_clock = i915->display.dpll.ref_clks.nssc;
3205 if (DISPLAY_VER(i915) >= 12) {
3206 m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3207 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3208 m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3210 if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3211 m2_frac = hw_state->mg_pll_bias &
3212 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3213 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3218 m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3219 m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3221 if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3222 m2_frac = hw_state->mg_pll_div0 &
3223 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3224 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3230 switch (hw_state->mg_clktop2_hsclkctl &
3231 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3232 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3235 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3238 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3241 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3245 MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3249 div2 = (hw_state->mg_clktop2_hsclkctl &
3250 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3251 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3253 /* div2 value of 0 is same as 1 means no div */
3258 * Adjust the original formula to delay the division by 2^22 in order to
3259 * minimize possible rounding errors.
3261 tmp = (u64)m1 * m2_int * ref_clock +
3262 (((u64)m1 * m2_frac * ref_clock) >> 22);
3263 tmp = div_u64(tmp, 5 * div1 * div2);
3269 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3270 * @crtc_state: state for the CRTC to select the DPLL for
3271 * @port_dpll_id: the active @port_dpll_id to select
3273 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3276 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3277 enum icl_port_dpll_id port_dpll_id)
3279 struct icl_port_dpll *port_dpll =
3280 &crtc_state->icl_port_dplls[port_dpll_id];
3282 crtc_state->shared_dpll = port_dpll->pll;
3283 crtc_state->dpll_hw_state = port_dpll->hw_state;
3286 static void icl_update_active_dpll(struct intel_atomic_state *state,
3287 struct intel_crtc *crtc,
3288 struct intel_encoder *encoder)
3290 struct intel_crtc_state *crtc_state =
3291 intel_atomic_get_new_crtc_state(state, crtc);
3292 struct intel_digital_port *primary_port;
3293 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3295 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3296 enc_to_mst(encoder)->primary :
3297 enc_to_dig_port(encoder);
3300 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3301 intel_tc_port_in_legacy_mode(primary_port)))
3302 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3304 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3307 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3308 struct intel_crtc *crtc)
3310 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3311 struct intel_crtc_state *crtc_state =
3312 intel_atomic_get_new_crtc_state(state, crtc);
3313 struct icl_port_dpll *port_dpll =
3314 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3315 struct skl_wrpll_params pll_params = {};
3318 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3319 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3320 ret = icl_calc_wrpll(crtc_state, &pll_params);
3322 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3327 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3329 /* this is mainly for the fastset check */
3330 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3332 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3333 &port_dpll->hw_state);
3338 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3339 struct intel_crtc *crtc,
3340 struct intel_encoder *encoder)
3342 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3343 struct intel_crtc_state *crtc_state =
3344 intel_atomic_get_new_crtc_state(state, crtc);
3345 struct icl_port_dpll *port_dpll =
3346 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3347 enum port port = encoder->port;
3348 unsigned long dpll_mask;
3350 if (IS_ALDERLAKE_S(i915)) {
3352 BIT(DPLL_ID_DG1_DPLL3) |
3353 BIT(DPLL_ID_DG1_DPLL2) |
3354 BIT(DPLL_ID_ICL_DPLL1) |
3355 BIT(DPLL_ID_ICL_DPLL0);
3356 } else if (IS_DG1(i915)) {
3357 if (port == PORT_D || port == PORT_E) {
3359 BIT(DPLL_ID_DG1_DPLL2) |
3360 BIT(DPLL_ID_DG1_DPLL3);
3363 BIT(DPLL_ID_DG1_DPLL0) |
3364 BIT(DPLL_ID_DG1_DPLL1);
3366 } else if (IS_ROCKETLAKE(i915)) {
3368 BIT(DPLL_ID_EHL_DPLL4) |
3369 BIT(DPLL_ID_ICL_DPLL1) |
3370 BIT(DPLL_ID_ICL_DPLL0);
3371 } else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3374 BIT(DPLL_ID_EHL_DPLL4) |
3375 BIT(DPLL_ID_ICL_DPLL1) |
3376 BIT(DPLL_ID_ICL_DPLL0);
3378 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3381 /* Eliminate DPLLs from consideration if reserved by HTI */
3382 dpll_mask &= ~intel_hti_dpll_mask(i915);
3384 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3385 &port_dpll->hw_state,
3387 if (!port_dpll->pll)
3390 intel_reference_shared_dpll(state, crtc,
3391 port_dpll->pll, &port_dpll->hw_state);
3393 icl_update_active_dpll(state, crtc, encoder);
3398 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3399 struct intel_crtc *crtc)
3401 struct drm_i915_private *i915 = to_i915(state->base.dev);
3402 struct intel_crtc_state *crtc_state =
3403 intel_atomic_get_new_crtc_state(state, crtc);
3404 const struct intel_crtc_state *old_crtc_state =
3405 intel_atomic_get_old_crtc_state(state, crtc);
3406 struct icl_port_dpll *port_dpll =
3407 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3408 struct skl_wrpll_params pll_params = {};
3411 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3412 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3416 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3418 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3419 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3423 /* this is mainly for the fastset check */
3424 if (old_crtc_state->shared_dpll &&
3425 old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3426 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3428 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3430 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3431 &port_dpll->hw_state);
3436 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3437 struct intel_crtc *crtc,
3438 struct intel_encoder *encoder)
3440 struct intel_crtc_state *crtc_state =
3441 intel_atomic_get_new_crtc_state(state, crtc);
3442 struct icl_port_dpll *port_dpll =
3443 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3444 enum intel_dpll_id dpll_id;
3447 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3448 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3449 &port_dpll->hw_state,
3450 BIT(DPLL_ID_ICL_TBTPLL));
3451 if (!port_dpll->pll)
3453 intel_reference_shared_dpll(state, crtc,
3454 port_dpll->pll, &port_dpll->hw_state);
3457 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3458 dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3459 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3460 &port_dpll->hw_state,
3462 if (!port_dpll->pll) {
3464 goto err_unreference_tbt_pll;
3466 intel_reference_shared_dpll(state, crtc,
3467 port_dpll->pll, &port_dpll->hw_state);
3469 icl_update_active_dpll(state, crtc, encoder);
3473 err_unreference_tbt_pll:
3474 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3475 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3480 static int icl_compute_dplls(struct intel_atomic_state *state,
3481 struct intel_crtc *crtc,
3482 struct intel_encoder *encoder)
3484 if (intel_encoder_is_combo(encoder))
3485 return icl_compute_combo_phy_dpll(state, crtc);
3486 else if (intel_encoder_is_tc(encoder))
3487 return icl_compute_tc_phy_dplls(state, crtc);
3489 MISSING_CASE(encoder->port);
3494 static int icl_get_dplls(struct intel_atomic_state *state,
3495 struct intel_crtc *crtc,
3496 struct intel_encoder *encoder)
3498 if (intel_encoder_is_combo(encoder))
3499 return icl_get_combo_phy_dpll(state, crtc, encoder);
3500 else if (intel_encoder_is_tc(encoder))
3501 return icl_get_tc_phy_dplls(state, crtc, encoder);
3503 MISSING_CASE(encoder->port);
3508 static void icl_put_dplls(struct intel_atomic_state *state,
3509 struct intel_crtc *crtc)
3511 const struct intel_crtc_state *old_crtc_state =
3512 intel_atomic_get_old_crtc_state(state, crtc);
3513 struct intel_crtc_state *new_crtc_state =
3514 intel_atomic_get_new_crtc_state(state, crtc);
3515 enum icl_port_dpll_id id;
3517 new_crtc_state->shared_dpll = NULL;
3519 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3520 const struct icl_port_dpll *old_port_dpll =
3521 &old_crtc_state->icl_port_dplls[id];
3522 struct icl_port_dpll *new_port_dpll =
3523 &new_crtc_state->icl_port_dplls[id];
3525 new_port_dpll->pll = NULL;
3527 if (!old_port_dpll->pll)
3530 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3534 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3535 struct intel_shared_dpll *pll,
3536 struct intel_dpll_hw_state *dpll_hw_state)
3538 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3539 const enum intel_dpll_id id = pll->info->id;
3540 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3541 intel_wakeref_t wakeref;
3545 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3547 wakeref = intel_display_power_get_if_enabled(i915,
3548 POWER_DOMAIN_DISPLAY_CORE);
3552 val = intel_de_read(i915, enable_reg);
3553 if (!(val & PLL_ENABLE))
3556 hw_state->mg_refclkin_ctl = intel_de_read(i915,
3557 MG_REFCLKIN_CTL(tc_port));
3558 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3560 hw_state->mg_clktop2_coreclkctl1 =
3561 intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3562 hw_state->mg_clktop2_coreclkctl1 &=
3563 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3565 hw_state->mg_clktop2_hsclkctl =
3566 intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3567 hw_state->mg_clktop2_hsclkctl &=
3568 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3569 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3570 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3571 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3573 hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3574 hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3575 hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3576 hw_state->mg_pll_frac_lock = intel_de_read(i915,
3577 MG_PLL_FRAC_LOCK(tc_port));
3578 hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3580 hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3581 hw_state->mg_pll_tdc_coldst_bias =
3582 intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3584 if (i915->display.dpll.ref_clks.nssc == 38400) {
3585 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3586 hw_state->mg_pll_bias_mask = 0;
3588 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3589 hw_state->mg_pll_bias_mask = -1U;
3592 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3593 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3597 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3601 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3602 struct intel_shared_dpll *pll,
3603 struct intel_dpll_hw_state *dpll_hw_state)
3605 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3606 const enum intel_dpll_id id = pll->info->id;
3607 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3608 intel_wakeref_t wakeref;
3612 wakeref = intel_display_power_get_if_enabled(i915,
3613 POWER_DOMAIN_DISPLAY_CORE);
3617 val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3618 if (!(val & PLL_ENABLE))
3622 * All registers read here have the same HIP_INDEX_REG even though
3623 * they are on different building blocks
3625 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3626 DKL_REFCLKIN_CTL(tc_port));
3627 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3629 hw_state->mg_clktop2_hsclkctl =
3630 intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3631 hw_state->mg_clktop2_hsclkctl &=
3632 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3633 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3634 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3635 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3637 hw_state->mg_clktop2_coreclkctl1 =
3638 intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3639 hw_state->mg_clktop2_coreclkctl1 &=
3640 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3642 hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3643 val = DKL_PLL_DIV0_MASK;
3644 if (i915->display.vbt.override_afc_startup)
3645 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3646 hw_state->mg_pll_div0 &= val;
3648 hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3649 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3650 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3652 hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3653 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3654 DKL_PLL_SSC_STEP_LEN_MASK |
3655 DKL_PLL_SSC_STEP_NUM_MASK |
3658 hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3659 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3660 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3662 hw_state->mg_pll_tdc_coldst_bias =
3663 intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3664 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3665 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3669 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3673 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3674 struct intel_shared_dpll *pll,
3675 struct intel_dpll_hw_state *dpll_hw_state,
3676 i915_reg_t enable_reg)
3678 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3679 const enum intel_dpll_id id = pll->info->id;
3680 intel_wakeref_t wakeref;
3684 wakeref = intel_display_power_get_if_enabled(i915,
3685 POWER_DOMAIN_DISPLAY_CORE);
3689 val = intel_de_read(i915, enable_reg);
3690 if (!(val & PLL_ENABLE))
3693 if (IS_ALDERLAKE_S(i915)) {
3694 hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3695 hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3696 } else if (IS_DG1(i915)) {
3697 hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3698 hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3699 } else if (IS_ROCKETLAKE(i915)) {
3700 hw_state->cfgcr0 = intel_de_read(i915,
3701 RKL_DPLL_CFGCR0(id));
3702 hw_state->cfgcr1 = intel_de_read(i915,
3703 RKL_DPLL_CFGCR1(id));
3704 } else if (DISPLAY_VER(i915) >= 12) {
3705 hw_state->cfgcr0 = intel_de_read(i915,
3706 TGL_DPLL_CFGCR0(id));
3707 hw_state->cfgcr1 = intel_de_read(i915,
3708 TGL_DPLL_CFGCR1(id));
3709 if (i915->display.vbt.override_afc_startup) {
3710 hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3711 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3714 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3715 id == DPLL_ID_EHL_DPLL4) {
3716 hw_state->cfgcr0 = intel_de_read(i915,
3717 ICL_DPLL_CFGCR0(4));
3718 hw_state->cfgcr1 = intel_de_read(i915,
3719 ICL_DPLL_CFGCR1(4));
3721 hw_state->cfgcr0 = intel_de_read(i915,
3722 ICL_DPLL_CFGCR0(id));
3723 hw_state->cfgcr1 = intel_de_read(i915,
3724 ICL_DPLL_CFGCR1(id));
3730 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3734 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3735 struct intel_shared_dpll *pll,
3736 struct intel_dpll_hw_state *dpll_hw_state)
3738 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3740 return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3743 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3744 struct intel_shared_dpll *pll,
3745 struct intel_dpll_hw_state *dpll_hw_state)
3747 return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3750 static void icl_dpll_write(struct drm_i915_private *i915,
3751 struct intel_shared_dpll *pll,
3752 const struct icl_dpll_hw_state *hw_state)
3754 const enum intel_dpll_id id = pll->info->id;
3755 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3757 if (IS_ALDERLAKE_S(i915)) {
3758 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3759 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3760 } else if (IS_DG1(i915)) {
3761 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3762 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3763 } else if (IS_ROCKETLAKE(i915)) {
3764 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3765 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3766 } else if (DISPLAY_VER(i915) >= 12) {
3767 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3768 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3769 div0_reg = TGL_DPLL0_DIV0(id);
3771 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3772 id == DPLL_ID_EHL_DPLL4) {
3773 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3774 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3776 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3777 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3781 intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3782 intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3783 drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3784 !i915_mmio_reg_valid(div0_reg));
3785 if (i915->display.vbt.override_afc_startup &&
3786 i915_mmio_reg_valid(div0_reg))
3787 intel_de_rmw(i915, div0_reg,
3788 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3789 intel_de_posting_read(i915, cfgcr1_reg);
3792 static void icl_mg_pll_write(struct drm_i915_private *i915,
3793 struct intel_shared_dpll *pll,
3794 const struct icl_dpll_hw_state *hw_state)
3796 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3799 * Some of the following registers have reserved fields, so program
3800 * these with RMW based on a mask. The mask can be fixed or generated
3801 * during the calc/readout phase if the mask depends on some other HW
3802 * state like refclk, see icl_calc_mg_pll_state().
3804 intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3805 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3807 intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3808 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3809 hw_state->mg_clktop2_coreclkctl1);
3811 intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3812 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3813 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3814 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3815 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3816 hw_state->mg_clktop2_hsclkctl);
3818 intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3819 intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3820 intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3821 intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3822 hw_state->mg_pll_frac_lock);
3823 intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3825 intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3826 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3828 intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3829 hw_state->mg_pll_tdc_coldst_bias_mask,
3830 hw_state->mg_pll_tdc_coldst_bias);
3832 intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3835 static void dkl_pll_write(struct drm_i915_private *i915,
3836 struct intel_shared_dpll *pll,
3837 const struct icl_dpll_hw_state *hw_state)
3839 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3843 * All registers programmed here have the same HIP_INDEX_REG even
3844 * though on different building block
3846 /* All the registers are RMW */
3847 val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3848 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3849 val |= hw_state->mg_refclkin_ctl;
3850 intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3852 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3853 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3854 val |= hw_state->mg_clktop2_coreclkctl1;
3855 intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3857 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3858 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3859 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3860 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3861 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3862 val |= hw_state->mg_clktop2_hsclkctl;
3863 intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3865 val = DKL_PLL_DIV0_MASK;
3866 if (i915->display.vbt.override_afc_startup)
3867 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3868 intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3869 hw_state->mg_pll_div0);
3871 val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3872 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3873 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3874 val |= hw_state->mg_pll_div1;
3875 intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3877 val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3878 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3879 DKL_PLL_SSC_STEP_LEN_MASK |
3880 DKL_PLL_SSC_STEP_NUM_MASK |
3882 val |= hw_state->mg_pll_ssc;
3883 intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3885 val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3886 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3887 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3888 val |= hw_state->mg_pll_bias;
3889 intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3891 val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3892 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3893 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3894 val |= hw_state->mg_pll_tdc_coldst_bias;
3895 intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3897 intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3900 static void icl_pll_power_enable(struct drm_i915_private *i915,
3901 struct intel_shared_dpll *pll,
3902 i915_reg_t enable_reg)
3904 intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3907 * The spec says we need to "wait" but it also says it should be
3910 if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3911 drm_err(&i915->drm, "PLL %d Power not enabled\n",
3915 static void icl_pll_enable(struct drm_i915_private *i915,
3916 struct intel_shared_dpll *pll,
3917 i915_reg_t enable_reg)
3919 intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3921 /* Timeout is actually 600us. */
3922 if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3923 drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3926 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3930 if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3931 pll->info->id != DPLL_ID_ICL_DPLL0)
3934 * Wa_16011069516:adl-p[a0]
3936 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3937 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3938 * sanity check this assumption with a double read, which presumably
3939 * returns the correct value even with clock gating on.
3941 * Instead of the usual place for workarounds we apply this one here,
3942 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3944 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3945 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3946 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3947 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3950 static void combo_pll_enable(struct drm_i915_private *i915,
3951 struct intel_shared_dpll *pll,
3952 const struct intel_dpll_hw_state *dpll_hw_state)
3954 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3955 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3957 icl_pll_power_enable(i915, pll, enable_reg);
3959 icl_dpll_write(i915, pll, hw_state);
3962 * DVFS pre sequence would be here, but in our driver the cdclk code
3963 * paths should already be setting the appropriate voltage, hence we do
3967 icl_pll_enable(i915, pll, enable_reg);
3969 adlp_cmtg_clock_gating_wa(i915, pll);
3971 /* DVFS post sequence would be here. See the comment above. */
3974 static void tbt_pll_enable(struct drm_i915_private *i915,
3975 struct intel_shared_dpll *pll,
3976 const struct intel_dpll_hw_state *dpll_hw_state)
3978 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3980 icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3982 icl_dpll_write(i915, pll, hw_state);
3985 * DVFS pre sequence would be here, but in our driver the cdclk code
3986 * paths should already be setting the appropriate voltage, hence we do
3990 icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3992 /* DVFS post sequence would be here. See the comment above. */
3995 static void mg_pll_enable(struct drm_i915_private *i915,
3996 struct intel_shared_dpll *pll,
3997 const struct intel_dpll_hw_state *dpll_hw_state)
3999 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4000 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4002 icl_pll_power_enable(i915, pll, enable_reg);
4004 if (DISPLAY_VER(i915) >= 12)
4005 dkl_pll_write(i915, pll, hw_state);
4007 icl_mg_pll_write(i915, pll, hw_state);
4010 * DVFS pre sequence would be here, but in our driver the cdclk code
4011 * paths should already be setting the appropriate voltage, hence we do
4015 icl_pll_enable(i915, pll, enable_reg);
4017 /* DVFS post sequence would be here. See the comment above. */
4020 static void icl_pll_disable(struct drm_i915_private *i915,
4021 struct intel_shared_dpll *pll,
4022 i915_reg_t enable_reg)
4024 /* The first steps are done by intel_ddi_post_disable(). */
4027 * DVFS pre sequence would be here, but in our driver the cdclk code
4028 * paths should already be setting the appropriate voltage, hence we do
4032 intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
4034 /* Timeout is actually 1us. */
4035 if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4036 drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4038 /* DVFS post sequence would be here. See the comment above. */
4040 intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
4043 * The spec says we need to "wait" but it also says it should be
4046 if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4047 drm_err(&i915->drm, "PLL %d Power not disabled\n",
4051 static void combo_pll_disable(struct drm_i915_private *i915,
4052 struct intel_shared_dpll *pll)
4054 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4056 icl_pll_disable(i915, pll, enable_reg);
4059 static void tbt_pll_disable(struct drm_i915_private *i915,
4060 struct intel_shared_dpll *pll)
4062 icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4065 static void mg_pll_disable(struct drm_i915_private *i915,
4066 struct intel_shared_dpll *pll)
4068 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4070 icl_pll_disable(i915, pll, enable_reg);
4073 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4076 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4079 static void icl_dump_hw_state(struct drm_printer *p,
4080 const struct intel_dpll_hw_state *dpll_hw_state)
4082 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4084 drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4085 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4086 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4087 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4088 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4089 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4090 hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4091 hw_state->mg_refclkin_ctl,
4092 hw_state->mg_clktop2_coreclkctl1,
4093 hw_state->mg_clktop2_hsclkctl,
4094 hw_state->mg_pll_div0,
4095 hw_state->mg_pll_div1,
4096 hw_state->mg_pll_lf,
4097 hw_state->mg_pll_frac_lock,
4098 hw_state->mg_pll_ssc,
4099 hw_state->mg_pll_bias,
4100 hw_state->mg_pll_tdc_coldst_bias);
4103 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4104 const struct intel_dpll_hw_state *_b)
4106 const struct icl_dpll_hw_state *a = &_a->icl;
4107 const struct icl_dpll_hw_state *b = &_b->icl;
4109 /* FIXME split combo vs. mg more thoroughly */
4110 return a->cfgcr0 == b->cfgcr0 &&
4111 a->cfgcr1 == b->cfgcr1 &&
4112 a->div0 == b->div0 &&
4113 a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4114 a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4115 a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4116 a->mg_pll_div0 == b->mg_pll_div0 &&
4117 a->mg_pll_div1 == b->mg_pll_div1 &&
4118 a->mg_pll_lf == b->mg_pll_lf &&
4119 a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4120 a->mg_pll_ssc == b->mg_pll_ssc &&
4121 a->mg_pll_bias == b->mg_pll_bias &&
4122 a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4125 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4126 .enable = combo_pll_enable,
4127 .disable = combo_pll_disable,
4128 .get_hw_state = combo_pll_get_hw_state,
4129 .get_freq = icl_ddi_combo_pll_get_freq,
4132 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4133 .enable = tbt_pll_enable,
4134 .disable = tbt_pll_disable,
4135 .get_hw_state = tbt_pll_get_hw_state,
4136 .get_freq = icl_ddi_tbt_pll_get_freq,
4139 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4140 .enable = mg_pll_enable,
4141 .disable = mg_pll_disable,
4142 .get_hw_state = mg_pll_get_hw_state,
4143 .get_freq = icl_ddi_mg_pll_get_freq,
4146 static const struct dpll_info icl_plls[] = {
4147 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4148 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4149 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4150 .is_alt_port_dpll = true, },
4151 { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4152 { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4153 { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4154 { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4158 static const struct intel_dpll_mgr icl_pll_mgr = {
4159 .dpll_info = icl_plls,
4160 .compute_dplls = icl_compute_dplls,
4161 .get_dplls = icl_get_dplls,
4162 .put_dplls = icl_put_dplls,
4163 .update_active_dpll = icl_update_active_dpll,
4164 .update_ref_clks = icl_update_dpll_ref_clks,
4165 .dump_hw_state = icl_dump_hw_state,
4166 .compare_hw_state = icl_compare_hw_state,
4169 static const struct dpll_info ehl_plls[] = {
4170 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4171 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4172 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4173 .power_domain = POWER_DOMAIN_DC_OFF, },
4177 static const struct intel_dpll_mgr ehl_pll_mgr = {
4178 .dpll_info = ehl_plls,
4179 .compute_dplls = icl_compute_dplls,
4180 .get_dplls = icl_get_dplls,
4181 .put_dplls = icl_put_dplls,
4182 .update_ref_clks = icl_update_dpll_ref_clks,
4183 .dump_hw_state = icl_dump_hw_state,
4184 .compare_hw_state = icl_compare_hw_state,
4187 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4188 .enable = mg_pll_enable,
4189 .disable = mg_pll_disable,
4190 .get_hw_state = dkl_pll_get_hw_state,
4191 .get_freq = icl_ddi_mg_pll_get_freq,
4194 static const struct dpll_info tgl_plls[] = {
4195 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4196 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4197 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4198 .is_alt_port_dpll = true, },
4199 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4200 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4201 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4202 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4203 { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4204 { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4208 static const struct intel_dpll_mgr tgl_pll_mgr = {
4209 .dpll_info = tgl_plls,
4210 .compute_dplls = icl_compute_dplls,
4211 .get_dplls = icl_get_dplls,
4212 .put_dplls = icl_put_dplls,
4213 .update_active_dpll = icl_update_active_dpll,
4214 .update_ref_clks = icl_update_dpll_ref_clks,
4215 .dump_hw_state = icl_dump_hw_state,
4216 .compare_hw_state = icl_compare_hw_state,
4219 static const struct dpll_info rkl_plls[] = {
4220 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4221 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4222 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4226 static const struct intel_dpll_mgr rkl_pll_mgr = {
4227 .dpll_info = rkl_plls,
4228 .compute_dplls = icl_compute_dplls,
4229 .get_dplls = icl_get_dplls,
4230 .put_dplls = icl_put_dplls,
4231 .update_ref_clks = icl_update_dpll_ref_clks,
4232 .dump_hw_state = icl_dump_hw_state,
4233 .compare_hw_state = icl_compare_hw_state,
4236 static const struct dpll_info dg1_plls[] = {
4237 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4238 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4239 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4240 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4244 static const struct intel_dpll_mgr dg1_pll_mgr = {
4245 .dpll_info = dg1_plls,
4246 .compute_dplls = icl_compute_dplls,
4247 .get_dplls = icl_get_dplls,
4248 .put_dplls = icl_put_dplls,
4249 .update_ref_clks = icl_update_dpll_ref_clks,
4250 .dump_hw_state = icl_dump_hw_state,
4251 .compare_hw_state = icl_compare_hw_state,
4254 static const struct dpll_info adls_plls[] = {
4255 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4256 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4257 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4258 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4262 static const struct intel_dpll_mgr adls_pll_mgr = {
4263 .dpll_info = adls_plls,
4264 .compute_dplls = icl_compute_dplls,
4265 .get_dplls = icl_get_dplls,
4266 .put_dplls = icl_put_dplls,
4267 .update_ref_clks = icl_update_dpll_ref_clks,
4268 .dump_hw_state = icl_dump_hw_state,
4269 .compare_hw_state = icl_compare_hw_state,
4272 static const struct dpll_info adlp_plls[] = {
4273 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4274 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4275 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4276 .is_alt_port_dpll = true, },
4277 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4278 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4279 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4280 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4284 static const struct intel_dpll_mgr adlp_pll_mgr = {
4285 .dpll_info = adlp_plls,
4286 .compute_dplls = icl_compute_dplls,
4287 .get_dplls = icl_get_dplls,
4288 .put_dplls = icl_put_dplls,
4289 .update_active_dpll = icl_update_active_dpll,
4290 .update_ref_clks = icl_update_dpll_ref_clks,
4291 .dump_hw_state = icl_dump_hw_state,
4292 .compare_hw_state = icl_compare_hw_state,
4296 * intel_shared_dpll_init - Initialize shared DPLLs
4297 * @i915: i915 device
4299 * Initialize shared DPLLs for @i915.
4301 void intel_shared_dpll_init(struct drm_i915_private *i915)
4303 const struct intel_dpll_mgr *dpll_mgr = NULL;
4304 const struct dpll_info *dpll_info;
4307 mutex_init(&i915->display.dpll.lock);
4309 if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4310 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4312 else if (IS_ALDERLAKE_P(i915))
4313 dpll_mgr = &adlp_pll_mgr;
4314 else if (IS_ALDERLAKE_S(i915))
4315 dpll_mgr = &adls_pll_mgr;
4316 else if (IS_DG1(i915))
4317 dpll_mgr = &dg1_pll_mgr;
4318 else if (IS_ROCKETLAKE(i915))
4319 dpll_mgr = &rkl_pll_mgr;
4320 else if (DISPLAY_VER(i915) >= 12)
4321 dpll_mgr = &tgl_pll_mgr;
4322 else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4323 dpll_mgr = &ehl_pll_mgr;
4324 else if (DISPLAY_VER(i915) >= 11)
4325 dpll_mgr = &icl_pll_mgr;
4326 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4327 dpll_mgr = &bxt_pll_mgr;
4328 else if (DISPLAY_VER(i915) == 9)
4329 dpll_mgr = &skl_pll_mgr;
4330 else if (HAS_DDI(i915))
4331 dpll_mgr = &hsw_pll_mgr;
4332 else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4333 dpll_mgr = &pch_pll_mgr;
4338 dpll_info = dpll_mgr->dpll_info;
4340 for (i = 0; dpll_info[i].name; i++) {
4341 if (drm_WARN_ON(&i915->drm,
4342 i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4345 /* must fit into unsigned long bitmask on 32bit */
4346 if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4349 i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4350 i915->display.dpll.shared_dplls[i].index = i;
4353 i915->display.dpll.mgr = dpll_mgr;
4354 i915->display.dpll.num_shared_dpll = i;
4358 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4359 * @state: atomic state
4360 * @crtc: CRTC to compute DPLLs for
4363 * This function computes the DPLL state for the given CRTC and encoder.
4365 * The new configuration in the atomic commit @state is made effective by
4366 * calling intel_shared_dpll_swap_state().
4369 * 0 on success, negative error code on falure.
4371 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4372 struct intel_crtc *crtc,
4373 struct intel_encoder *encoder)
4375 struct drm_i915_private *i915 = to_i915(state->base.dev);
4376 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4378 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4381 return dpll_mgr->compute_dplls(state, crtc, encoder);
4385 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4386 * @state: atomic state
4387 * @crtc: CRTC to reserve DPLLs for
4390 * This function reserves all required DPLLs for the given CRTC and encoder
4391 * combination in the current atomic commit @state and the new @crtc atomic
4394 * The new configuration in the atomic commit @state is made effective by
4395 * calling intel_shared_dpll_swap_state().
4397 * The reserved DPLLs should be released by calling
4398 * intel_release_shared_dplls().
4401 * 0 if all required DPLLs were successfully reserved,
4402 * negative error code otherwise.
4404 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4405 struct intel_crtc *crtc,
4406 struct intel_encoder *encoder)
4408 struct drm_i915_private *i915 = to_i915(state->base.dev);
4409 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4411 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4414 return dpll_mgr->get_dplls(state, crtc, encoder);
4418 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4419 * @state: atomic state
4420 * @crtc: crtc from which the DPLLs are to be released
4422 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4423 * from the current atomic commit @state and the old @crtc atomic state.
4425 * The new configuration in the atomic commit @state is made effective by
4426 * calling intel_shared_dpll_swap_state().
4428 void intel_release_shared_dplls(struct intel_atomic_state *state,
4429 struct intel_crtc *crtc)
4431 struct drm_i915_private *i915 = to_i915(state->base.dev);
4432 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4435 * FIXME: this function is called for every platform having a
4436 * compute_clock hook, even though the platform doesn't yet support
4437 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4443 dpll_mgr->put_dplls(state, crtc);
4447 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4448 * @state: atomic state
4449 * @crtc: the CRTC for which to update the active DPLL
4450 * @encoder: encoder determining the type of port DPLL
4452 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4453 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4454 * DPLL selected will be based on the current mode of the encoder's port.
4456 void intel_update_active_dpll(struct intel_atomic_state *state,
4457 struct intel_crtc *crtc,
4458 struct intel_encoder *encoder)
4460 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4461 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4463 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4466 dpll_mgr->update_active_dpll(state, crtc, encoder);
4470 * intel_dpll_get_freq - calculate the DPLL's output frequency
4471 * @i915: i915 device
4472 * @pll: DPLL for which to calculate the output frequency
4473 * @dpll_hw_state: DPLL state from which to calculate the output frequency
4475 * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4477 int intel_dpll_get_freq(struct drm_i915_private *i915,
4478 const struct intel_shared_dpll *pll,
4479 const struct intel_dpll_hw_state *dpll_hw_state)
4481 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4484 return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4488 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4489 * @i915: i915 device
4490 * @pll: DPLL for which to calculate the output frequency
4491 * @dpll_hw_state: DPLL's hardware state
4493 * Read out @pll's hardware state into @dpll_hw_state.
4495 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4496 struct intel_shared_dpll *pll,
4497 struct intel_dpll_hw_state *dpll_hw_state)
4499 return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4502 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4503 struct intel_shared_dpll *pll)
4505 struct intel_crtc *crtc;
4507 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4509 if (pll->on && pll->info->power_domain)
4510 pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4512 pll->state.pipe_mask = 0;
4513 for_each_intel_crtc(&i915->drm, crtc) {
4514 struct intel_crtc_state *crtc_state =
4515 to_intel_crtc_state(crtc->base.state);
4517 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4518 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4520 pll->active_mask = pll->state.pipe_mask;
4522 drm_dbg_kms(&i915->drm,
4523 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4524 pll->info->name, pll->state.pipe_mask, pll->on);
4527 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4529 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4530 i915->display.dpll.mgr->update_ref_clks(i915);
4533 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4535 struct intel_shared_dpll *pll;
4538 for_each_shared_dpll(i915, pll, i)
4539 readout_dpll_hw_state(i915, pll);
4542 static void sanitize_dpll_state(struct drm_i915_private *i915,
4543 struct intel_shared_dpll *pll)
4548 adlp_cmtg_clock_gating_wa(i915, pll);
4550 if (pll->active_mask)
4553 drm_dbg_kms(&i915->drm,
4554 "%s enabled but not in use, disabling\n",
4557 _intel_disable_shared_dpll(i915, pll);
4560 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4562 struct intel_shared_dpll *pll;
4565 for_each_shared_dpll(i915, pll, i)
4566 sanitize_dpll_state(i915, pll);
4570 * intel_dpll_dump_hw_state - dump hw_state
4571 * @i915: i915 drm device
4572 * @p: where to print the state to
4573 * @dpll_hw_state: hw state to be dumped
4575 * Dumo out the relevant values in @dpll_hw_state.
4577 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4578 struct drm_printer *p,
4579 const struct intel_dpll_hw_state *dpll_hw_state)
4581 if (i915->display.dpll.mgr) {
4582 i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4584 /* fallback for platforms that don't use the shared dpll
4587 ibx_dump_hw_state(p, dpll_hw_state);
4592 * intel_dpll_compare_hw_state - compare the two states
4593 * @i915: i915 drm device
4594 * @a: first DPLL hw state
4595 * @b: second DPLL hw state
4597 * Compare DPLL hw states @a and @b.
4599 * Returns: true if the states are equal, false if the differ
4601 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4602 const struct intel_dpll_hw_state *a,
4603 const struct intel_dpll_hw_state *b)
4605 if (i915->display.dpll.mgr) {
4606 return i915->display.dpll.mgr->compare_hw_state(a, b);
4608 /* fallback for platforms that don't use the shared dpll
4611 return ibx_compare_hw_state(a, b);
4616 verify_single_dpll_state(struct drm_i915_private *i915,
4617 struct intel_shared_dpll *pll,
4618 struct intel_crtc *crtc,
4619 const struct intel_crtc_state *new_crtc_state)
4621 struct intel_dpll_hw_state dpll_hw_state = {};
4625 active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4627 if (!pll->info->always_on) {
4628 I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4629 "%s: pll in active use but not on in sw tracking\n",
4631 I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4632 "%s: pll is on but not used by any active pipe\n",
4634 I915_STATE_WARN(i915, pll->on != active,
4635 "%s: pll on state mismatch (expected %i, found %i)\n",
4636 pll->info->name, pll->on, active);
4640 I915_STATE_WARN(i915,
4641 pll->active_mask & ~pll->state.pipe_mask,
4642 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4643 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4648 pipe_mask = BIT(crtc->pipe);
4650 if (new_crtc_state->hw.active)
4651 I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4652 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4653 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4655 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4656 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4657 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4659 I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4660 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4661 pll->info->name, pipe_mask, pll->state.pipe_mask);
4663 I915_STATE_WARN(i915,
4664 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4665 sizeof(dpll_hw_state)),
4666 "%s: pll hw state mismatch\n",
4670 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4671 const struct intel_shared_dpll *new_pll)
4673 return old_pll && new_pll && old_pll != new_pll &&
4674 (old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4677 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4678 struct intel_crtc *crtc)
4680 struct drm_i915_private *i915 = to_i915(state->base.dev);
4681 const struct intel_crtc_state *old_crtc_state =
4682 intel_atomic_get_old_crtc_state(state, crtc);
4683 const struct intel_crtc_state *new_crtc_state =
4684 intel_atomic_get_new_crtc_state(state, crtc);
4686 if (new_crtc_state->shared_dpll)
4687 verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4688 crtc, new_crtc_state);
4690 if (old_crtc_state->shared_dpll &&
4691 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4692 u8 pipe_mask = BIT(crtc->pipe);
4693 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4695 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4696 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4697 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4699 /* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4700 I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4701 new_crtc_state->shared_dpll) &&
4702 pll->state.pipe_mask & pipe_mask,
4703 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4704 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4708 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4710 struct drm_i915_private *i915 = to_i915(state->base.dev);
4711 struct intel_shared_dpll *pll;
4714 for_each_shared_dpll(i915, pll, i)
4715 verify_single_dpll_state(i915, pll, NULL, NULL);