1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/rational.h>
14 #include <linux/regmap.h>
15 #include <linux/math64.h>
16 #include <linux/minmax.h>
17 #include <linux/slab.h>
19 #include <asm/div64.h>
25 #define CMD_UPDATE BIT(0)
26 #define CMD_ROOT_EN BIT(1)
27 #define CMD_DIRTY_CFG BIT(4)
28 #define CMD_DIRTY_N BIT(5)
29 #define CMD_DIRTY_M BIT(6)
30 #define CMD_DIRTY_D BIT(7)
31 #define CMD_ROOT_OFF BIT(31)
34 #define CFG_SRC_DIV_SHIFT 0
35 #define CFG_SRC_SEL_SHIFT 8
36 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
37 #define CFG_MODE_SHIFT 12
38 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
39 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
40 #define CFG_HW_CLK_CTRL_MASK BIT(20)
46 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
47 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
48 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
49 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
51 /* Dynamic Frequency Scaling */
52 #define MAX_PERF_LEVEL 8
53 #define SE_CMD_DFSR_OFFSET 0x14
54 #define SE_CMD_DFS_EN BIT(0)
55 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
56 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
57 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
64 static int clk_rcg2_is_enabled(struct clk_hw *hw)
66 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
70 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
74 return (cmd & CMD_ROOT_OFF) == 0;
77 static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
79 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
80 int num_parents = clk_hw_get_num_parents(hw);
83 cfg &= CFG_SRC_SEL_MASK;
84 cfg >>= CFG_SRC_SEL_SHIFT;
86 for (i = 0; i < num_parents; i++)
87 if (cfg == rcg->parent_map[i].cfg)
90 pr_debug("%s: Clock %s has invalid parent, using default.\n",
91 __func__, clk_hw_get_name(hw));
95 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
97 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
101 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
103 pr_debug("%s: Unable to read CFG register for %s\n",
104 __func__, clk_hw_get_name(hw));
108 return __clk_rcg2_get_parent(hw, cfg);
111 static int update_config(struct clk_rcg2 *rcg)
115 struct clk_hw *hw = &rcg->clkr.hw;
116 const char *name = clk_hw_get_name(hw);
118 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
119 CMD_UPDATE, CMD_UPDATE);
123 /* Wait for update to take effect */
124 for (count = 500; count > 0; count--) {
125 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
128 if (!(cmd & CMD_UPDATE))
133 WARN(1, "%s: rcg didn't update its configuration.", name);
137 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
139 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
141 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
143 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
144 CFG_SRC_SEL_MASK, cfg);
148 return update_config(rcg);
152 * Calculate m/n:d rate
155 * rate = ----------- x ---
159 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
162 rate = mult_frac(rate, 2, hid_div + 1);
165 rate = mult_frac(rate, m, n);
171 __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
173 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
174 u32 hid_div, m = 0, n = 0, mode = 0, mask;
176 if (rcg->mnd_width) {
177 mask = BIT(rcg->mnd_width) - 1;
178 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
180 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
184 mode = cfg & CFG_MODE_MASK;
185 mode >>= CFG_MODE_SHIFT;
188 mask = BIT(rcg->hid_width) - 1;
189 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
192 return calc_rate(parent_rate, m, n, mode, hid_div);
196 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
198 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
201 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
203 return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
206 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
207 struct clk_rate_request *req,
208 enum freq_policy policy)
210 unsigned long clk_flags, rate = req->rate;
212 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
217 f = qcom_find_freq_floor(f, rate);
220 f = qcom_find_freq(f, rate);
229 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
233 clk_flags = clk_hw_get_flags(hw);
234 p = clk_hw_get_parent_by_index(hw, index);
238 if (clk_flags & CLK_SET_RATE_PARENT) {
244 rate *= f->pre_div + 1;
254 rate = clk_hw_get_rate(p);
256 req->best_parent_hw = p;
257 req->best_parent_rate = rate;
263 static const struct freq_conf *
264 __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
265 unsigned long req_rate)
267 unsigned long rate_diff, best_rate_diff = ULONG_MAX;
268 const struct freq_conf *conf, *best_conf = NULL;
269 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
270 const char *name = clk_hw_get_name(hw);
271 unsigned long parent_rate, rate;
275 /* Exit early if only one config is defined */
276 if (f->num_confs == 1) {
277 best_conf = f->confs;
281 /* Search in each provided config the one that is near the wanted rate */
282 for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
283 index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
287 p = clk_hw_get_parent_by_index(hw, index);
291 parent_rate = clk_hw_get_rate(p);
292 rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
294 if (rate == req_rate) {
299 rate_diff = abs_diff(req_rate, rate);
300 if (rate_diff < best_rate_diff) {
301 best_rate_diff = rate_diff;
307 * Very unlikely. Warn if we couldn't find a correct config
308 * due to parent not found in every config.
310 if (unlikely(!best_conf)) {
311 WARN(1, "%s: can't find a configuration for rate %lu\n",
313 return ERR_PTR(-EINVAL);
320 static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
321 struct clk_rate_request *req)
323 unsigned long clk_flags, rate = req->rate;
324 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
325 const struct freq_conf *conf;
329 f = qcom_find_freq_multi(f, rate);
333 conf = __clk_rcg2_select_conf(hw, f, rate);
335 return PTR_ERR(conf);
336 index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
340 clk_flags = clk_hw_get_flags(hw);
341 p = clk_hw_get_parent_by_index(hw, index);
345 if (clk_flags & CLK_SET_RATE_PARENT) {
351 rate *= conf->pre_div + 1;
358 do_div(tmp, conf->m);
362 rate = clk_hw_get_rate(p);
365 req->best_parent_hw = p;
366 req->best_parent_rate = rate;
372 static int clk_rcg2_determine_rate(struct clk_hw *hw,
373 struct clk_rate_request *req)
375 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
377 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
380 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
381 struct clk_rate_request *req)
383 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
385 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
388 static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
389 struct clk_rate_request *req)
391 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
393 return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
396 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
399 u32 cfg, mask, d_val, not2d_val, n_minus_m;
400 struct clk_hw *hw = &rcg->clkr.hw;
401 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
406 if (rcg->mnd_width && f->n) {
407 mask = BIT(rcg->mnd_width) - 1;
408 ret = regmap_update_bits(rcg->clkr.regmap,
409 RCG_M_OFFSET(rcg), mask, f->m);
413 ret = regmap_update_bits(rcg->clkr.regmap,
414 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
418 /* Calculate 2d value */
421 n_minus_m = f->n - f->m;
424 d_val = clamp_t(u32, d_val, f->m, n_minus_m);
425 not2d_val = ~d_val & mask;
427 ret = regmap_update_bits(rcg->clkr.regmap,
428 RCG_D_OFFSET(rcg), mask, not2d_val);
433 mask = BIT(rcg->hid_width) - 1;
434 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
435 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
436 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
437 if (rcg->mnd_width && f->n && (f->m != f->n))
438 cfg |= CFG_MODE_DUAL_EDGE;
439 if (rcg->hw_clk_ctrl)
440 cfg |= CFG_HW_CLK_CTRL_MASK;
448 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
453 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
457 ret = __clk_rcg2_configure(rcg, f, &cfg);
461 ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
465 return update_config(rcg);
468 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
469 enum freq_policy policy)
471 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
472 const struct freq_tbl *f;
476 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
479 f = qcom_find_freq(rcg->freq_tbl, rate);
488 return clk_rcg2_configure(rcg, f);
491 static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
493 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
494 const struct freq_multi_tbl *f;
495 const struct freq_conf *conf;
496 struct freq_tbl f_tbl = {};
498 f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
502 conf = __clk_rcg2_select_conf(hw, f, rate);
504 return PTR_ERR(conf);
506 f_tbl.freq = f->freq;
507 f_tbl.src = conf->src;
508 f_tbl.pre_div = conf->pre_div;
512 return clk_rcg2_configure(rcg, &f_tbl);
515 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
516 unsigned long parent_rate)
518 return __clk_rcg2_set_rate(hw, rate, CEIL);
521 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
522 unsigned long parent_rate)
524 return __clk_rcg2_set_rate(hw, rate, FLOOR);
527 static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
528 unsigned long parent_rate)
530 return __clk_rcg2_fm_set_rate(hw, rate);
533 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
534 unsigned long rate, unsigned long parent_rate, u8 index)
536 return __clk_rcg2_set_rate(hw, rate, CEIL);
539 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
540 unsigned long rate, unsigned long parent_rate, u8 index)
542 return __clk_rcg2_set_rate(hw, rate, FLOOR);
545 static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
546 unsigned long rate, unsigned long parent_rate, u8 index)
548 return __clk_rcg2_fm_set_rate(hw, rate);
551 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
553 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
554 u32 notn_m, n, m, d, not2d, mask;
556 if (!rcg->mnd_width) {
557 /* 50 % duty-cycle for Non-MND RCGs */
563 regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d);
564 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
565 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
567 if (!not2d && !m && !notn_m) {
568 /* 50 % duty-cycle always */
574 mask = BIT(rcg->mnd_width) - 1;
577 d = DIV_ROUND_CLOSEST(d, 2);
579 n = (~(notn_m) + m) & mask;
587 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
589 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
590 u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
593 /* Duty-cycle cannot be modified for non-MND RCGs */
597 mask = BIT(rcg->mnd_width) - 1;
599 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m);
600 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
601 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
603 /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
604 if (!(cfg & CFG_MODE_MASK))
607 n = (~(notn_m) + m) & mask;
609 duty_per = (duty->num * 100) / duty->den;
611 /* Calculate 2d value */
612 d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
615 * Check bit widths of 2d. If D is too big reduce duty cycle.
616 * Also make sure it is never zero.
618 d = clamp_val(d, 1, mask);
620 if ((d / 2) > (n - m))
622 else if ((d / 2) < (m / 2))
627 ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
632 return update_config(rcg);
635 const struct clk_ops clk_rcg2_ops = {
636 .is_enabled = clk_rcg2_is_enabled,
637 .get_parent = clk_rcg2_get_parent,
638 .set_parent = clk_rcg2_set_parent,
639 .recalc_rate = clk_rcg2_recalc_rate,
640 .determine_rate = clk_rcg2_determine_rate,
641 .set_rate = clk_rcg2_set_rate,
642 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
643 .get_duty_cycle = clk_rcg2_get_duty_cycle,
644 .set_duty_cycle = clk_rcg2_set_duty_cycle,
646 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
648 const struct clk_ops clk_rcg2_floor_ops = {
649 .is_enabled = clk_rcg2_is_enabled,
650 .get_parent = clk_rcg2_get_parent,
651 .set_parent = clk_rcg2_set_parent,
652 .recalc_rate = clk_rcg2_recalc_rate,
653 .determine_rate = clk_rcg2_determine_floor_rate,
654 .set_rate = clk_rcg2_set_floor_rate,
655 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
656 .get_duty_cycle = clk_rcg2_get_duty_cycle,
657 .set_duty_cycle = clk_rcg2_set_duty_cycle,
659 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
661 const struct clk_ops clk_rcg2_fm_ops = {
662 .is_enabled = clk_rcg2_is_enabled,
663 .get_parent = clk_rcg2_get_parent,
664 .set_parent = clk_rcg2_set_parent,
665 .recalc_rate = clk_rcg2_recalc_rate,
666 .determine_rate = clk_rcg2_fm_determine_rate,
667 .set_rate = clk_rcg2_fm_set_rate,
668 .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
669 .get_duty_cycle = clk_rcg2_get_duty_cycle,
670 .set_duty_cycle = clk_rcg2_set_duty_cycle,
672 EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
674 const struct clk_ops clk_rcg2_mux_closest_ops = {
675 .determine_rate = __clk_mux_determine_rate_closest,
676 .get_parent = clk_rcg2_get_parent,
677 .set_parent = clk_rcg2_set_parent,
679 EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
686 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
687 { 52, 295 }, /* 119 M */
688 { 11, 57 }, /* 130.25 M */
689 { 63, 307 }, /* 138.50 M */
690 { 11, 50 }, /* 148.50 M */
691 { 47, 206 }, /* 154 M */
692 { 31, 100 }, /* 205.25 M */
693 { 107, 269 }, /* 268.50 M */
697 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
698 { 31, 211 }, /* 119 M */
699 { 32, 199 }, /* 130.25 M */
700 { 63, 307 }, /* 138.50 M */
701 { 11, 60 }, /* 148.50 M */
702 { 50, 263 }, /* 154 M */
703 { 31, 120 }, /* 205.25 M */
704 { 119, 359 }, /* 268.50 M */
708 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
709 unsigned long parent_rate)
711 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
712 struct freq_tbl f = *rcg->freq_tbl;
713 const struct frac_entry *frac;
715 s64 src_rate = parent_rate;
717 u32 mask = BIT(rcg->hid_width) - 1;
720 if (src_rate == 810000000)
721 frac = frac_table_810m;
723 frac = frac_table_675m;
725 for (; frac->num; frac++) {
727 request *= frac->den;
728 request = div_s64(request, frac->num);
729 if ((src_rate < (request - delta)) ||
730 (src_rate > (request + delta)))
733 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
736 f.pre_div >>= CFG_SRC_DIV_SHIFT;
741 return clk_rcg2_configure(rcg, &f);
747 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
748 unsigned long rate, unsigned long parent_rate, u8 index)
750 /* Parent index is set statically in frequency table */
751 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
754 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
755 struct clk_rate_request *req)
757 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
758 const struct freq_tbl *f = rcg->freq_tbl;
759 const struct frac_entry *frac;
762 u32 mask = BIT(rcg->hid_width) - 1;
764 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
766 /* Force the correct parent */
767 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
768 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
770 if (req->best_parent_rate == 810000000)
771 frac = frac_table_810m;
773 frac = frac_table_675m;
775 for (; frac->num; frac++) {
777 request *= frac->den;
778 request = div_s64(request, frac->num);
779 if ((req->best_parent_rate < (request - delta)) ||
780 (req->best_parent_rate > (request + delta)))
783 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
785 hid_div >>= CFG_SRC_DIV_SHIFT;
788 req->rate = calc_rate(req->best_parent_rate,
789 frac->num, frac->den,
790 !!frac->den, hid_div);
797 const struct clk_ops clk_edp_pixel_ops = {
798 .is_enabled = clk_rcg2_is_enabled,
799 .get_parent = clk_rcg2_get_parent,
800 .set_parent = clk_rcg2_set_parent,
801 .recalc_rate = clk_rcg2_recalc_rate,
802 .set_rate = clk_edp_pixel_set_rate,
803 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
804 .determine_rate = clk_edp_pixel_determine_rate,
806 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
808 static int clk_byte_determine_rate(struct clk_hw *hw,
809 struct clk_rate_request *req)
811 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
812 const struct freq_tbl *f = rcg->freq_tbl;
813 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
814 unsigned long parent_rate, div;
815 u32 mask = BIT(rcg->hid_width) - 1;
821 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
822 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
824 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
825 div = min_t(u32, div, mask);
827 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
832 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
833 unsigned long parent_rate)
835 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
836 struct freq_tbl f = *rcg->freq_tbl;
838 u32 mask = BIT(rcg->hid_width) - 1;
840 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
841 div = min_t(u32, div, mask);
845 return clk_rcg2_configure(rcg, &f);
848 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
849 unsigned long rate, unsigned long parent_rate, u8 index)
851 /* Parent index is set statically in frequency table */
852 return clk_byte_set_rate(hw, rate, parent_rate);
855 const struct clk_ops clk_byte_ops = {
856 .is_enabled = clk_rcg2_is_enabled,
857 .get_parent = clk_rcg2_get_parent,
858 .set_parent = clk_rcg2_set_parent,
859 .recalc_rate = clk_rcg2_recalc_rate,
860 .set_rate = clk_byte_set_rate,
861 .set_rate_and_parent = clk_byte_set_rate_and_parent,
862 .determine_rate = clk_byte_determine_rate,
864 EXPORT_SYMBOL_GPL(clk_byte_ops);
866 static int clk_byte2_determine_rate(struct clk_hw *hw,
867 struct clk_rate_request *req)
869 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
870 unsigned long parent_rate, div;
871 u32 mask = BIT(rcg->hid_width) - 1;
873 unsigned long rate = req->rate;
878 p = req->best_parent_hw;
879 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
881 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
882 div = min_t(u32, div, mask);
884 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
889 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
890 unsigned long parent_rate)
892 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
893 struct freq_tbl f = { 0 };
895 int i, num_parents = clk_hw_get_num_parents(hw);
896 u32 mask = BIT(rcg->hid_width) - 1;
899 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
900 div = min_t(u32, div, mask);
904 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
905 cfg &= CFG_SRC_SEL_MASK;
906 cfg >>= CFG_SRC_SEL_SHIFT;
908 for (i = 0; i < num_parents; i++) {
909 if (cfg == rcg->parent_map[i].cfg) {
910 f.src = rcg->parent_map[i].src;
911 return clk_rcg2_configure(rcg, &f);
918 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
919 unsigned long rate, unsigned long parent_rate, u8 index)
921 /* Read the hardware to determine parent during set_rate */
922 return clk_byte2_set_rate(hw, rate, parent_rate);
925 const struct clk_ops clk_byte2_ops = {
926 .is_enabled = clk_rcg2_is_enabled,
927 .get_parent = clk_rcg2_get_parent,
928 .set_parent = clk_rcg2_set_parent,
929 .recalc_rate = clk_rcg2_recalc_rate,
930 .set_rate = clk_byte2_set_rate,
931 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
932 .determine_rate = clk_byte2_determine_rate,
934 EXPORT_SYMBOL_GPL(clk_byte2_ops);
936 static const struct frac_entry frac_table_pixel[] = {
945 static int clk_pixel_determine_rate(struct clk_hw *hw,
946 struct clk_rate_request *req)
948 unsigned long request, src_rate;
950 const struct frac_entry *frac = frac_table_pixel;
952 for (; frac->num; frac++) {
953 request = (req->rate * frac->den) / frac->num;
955 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
956 if ((src_rate < (request - delta)) ||
957 (src_rate > (request + delta)))
960 req->best_parent_rate = src_rate;
961 req->rate = (src_rate * frac->num) / frac->den;
968 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
969 unsigned long parent_rate)
971 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
972 struct freq_tbl f = { 0 };
973 const struct frac_entry *frac = frac_table_pixel;
974 unsigned long request;
976 u32 mask = BIT(rcg->hid_width) - 1;
978 int i, num_parents = clk_hw_get_num_parents(hw);
980 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
981 cfg &= CFG_SRC_SEL_MASK;
982 cfg >>= CFG_SRC_SEL_SHIFT;
984 for (i = 0; i < num_parents; i++)
985 if (cfg == rcg->parent_map[i].cfg) {
986 f.src = rcg->parent_map[i].src;
990 for (; frac->num; frac++) {
991 request = (rate * frac->den) / frac->num;
993 if ((parent_rate < (request - delta)) ||
994 (parent_rate > (request + delta)))
997 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1000 f.pre_div >>= CFG_SRC_DIV_SHIFT;
1005 return clk_rcg2_configure(rcg, &f);
1010 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1011 unsigned long parent_rate, u8 index)
1013 return clk_pixel_set_rate(hw, rate, parent_rate);
1016 const struct clk_ops clk_pixel_ops = {
1017 .is_enabled = clk_rcg2_is_enabled,
1018 .get_parent = clk_rcg2_get_parent,
1019 .set_parent = clk_rcg2_set_parent,
1020 .recalc_rate = clk_rcg2_recalc_rate,
1021 .set_rate = clk_pixel_set_rate,
1022 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
1023 .determine_rate = clk_pixel_determine_rate,
1025 EXPORT_SYMBOL_GPL(clk_pixel_ops);
1027 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
1028 struct clk_rate_request *req)
1030 struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
1031 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
1032 struct clk_hw *xo, *p0, *p1, *p2;
1033 unsigned long p0_rate;
1034 u8 mux_div = cgfx->div;
1041 * This function does ping-pong the RCG between PLLs: if we don't
1042 * have at least one fixed PLL and two variable ones,
1043 * then it's not going to work correctly.
1045 if (WARN_ON(!p0 || !p1 || !p2))
1048 xo = clk_hw_get_parent_by_index(hw, 0);
1049 if (req->rate == clk_hw_get_rate(xo)) {
1050 req->best_parent_hw = xo;
1057 parent_req.rate = req->rate * mux_div;
1059 /* This has to be a fixed rate PLL */
1060 p0_rate = clk_hw_get_rate(p0);
1062 if (parent_req.rate == p0_rate) {
1063 req->rate = req->best_parent_rate = p0_rate;
1064 req->best_parent_hw = p0;
1068 if (req->best_parent_hw == p0) {
1069 /* Are we going back to a previously used rate? */
1070 if (clk_hw_get_rate(p2) == parent_req.rate)
1071 req->best_parent_hw = p2;
1073 req->best_parent_hw = p1;
1074 } else if (req->best_parent_hw == p2) {
1075 req->best_parent_hw = p1;
1077 req->best_parent_hw = p2;
1080 clk_hw_get_rate_range(req->best_parent_hw,
1081 &parent_req.min_rate, &parent_req.max_rate);
1083 if (req->min_rate > parent_req.min_rate)
1084 parent_req.min_rate = req->min_rate;
1086 if (req->max_rate < parent_req.max_rate)
1087 parent_req.max_rate = req->max_rate;
1089 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
1093 req->rate = req->best_parent_rate = parent_req.rate;
1094 req->rate /= mux_div;
1099 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1100 unsigned long parent_rate, u8 index)
1102 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
1103 struct clk_rcg2 *rcg = &cgfx->rcg;
1107 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1108 /* On some targets, the GFX3D RCG may need to divide PLL frequency */
1110 cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
1112 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
1116 return update_config(rcg);
1119 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
1120 unsigned long parent_rate)
1123 * We should never get here; clk_gfx3d_determine_rate() should always
1124 * make us use a different parent than what we're currently using, so
1125 * clk_gfx3d_set_rate_and_parent() should always be called.
1130 const struct clk_ops clk_gfx3d_ops = {
1131 .is_enabled = clk_rcg2_is_enabled,
1132 .get_parent = clk_rcg2_get_parent,
1133 .set_parent = clk_rcg2_set_parent,
1134 .recalc_rate = clk_rcg2_recalc_rate,
1135 .set_rate = clk_gfx3d_set_rate,
1136 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
1137 .determine_rate = clk_gfx3d_determine_rate,
1139 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
1141 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
1143 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1144 const char *name = clk_hw_get_name(hw);
1147 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1148 CMD_ROOT_EN, CMD_ROOT_EN);
1152 /* wait for RCG to turn ON */
1153 for (count = 500; count > 0; count--) {
1154 if (clk_rcg2_is_enabled(hw))
1160 pr_err("%s: RCG did not turn on\n", name);
1164 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
1166 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1168 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
1173 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
1175 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1178 ret = clk_rcg2_set_force_enable(hw);
1182 ret = clk_rcg2_configure(rcg, f);
1186 return clk_rcg2_clear_force_enable(hw);
1189 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
1190 unsigned long parent_rate)
1192 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1193 const struct freq_tbl *f;
1195 f = qcom_find_freq(rcg->freq_tbl, rate);
1200 * In case clock is disabled, update the M, N and D registers, cache
1201 * the CFG value in parked_cfg and don't hit the update bit of CMD
1204 if (!clk_hw_is_enabled(hw))
1205 return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
1207 return clk_rcg2_shared_force_enable_clear(hw, f);
1210 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
1211 unsigned long rate, unsigned long parent_rate, u8 index)
1213 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
1216 static int clk_rcg2_shared_enable(struct clk_hw *hw)
1218 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1222 * Set the update bit because required configuration has already
1223 * been written in clk_rcg2_shared_set_rate()
1225 ret = clk_rcg2_set_force_enable(hw);
1229 /* Write back the stored configuration corresponding to current rate */
1230 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
1234 ret = update_config(rcg);
1238 return clk_rcg2_clear_force_enable(hw);
1241 static void clk_rcg2_shared_disable(struct clk_hw *hw)
1243 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1246 * Store current configuration as switching to safe source would clear
1247 * the SRC and DIV of CFG register
1249 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
1252 * Park the RCG at a safe configuration - sourced off of safe source.
1253 * Force enable and disable the RCG while configuring it to safeguard
1254 * against any update signal coming from the downstream clock.
1255 * The current parent is still prepared and enabled at this point, and
1256 * the safe source is always on while application processor subsystem
1257 * is online. Therefore, the RCG can safely switch its parent.
1259 clk_rcg2_set_force_enable(hw);
1261 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
1262 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
1266 clk_rcg2_clear_force_enable(hw);
1269 static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
1271 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1273 /* If the shared rcg is parked use the cached cfg instead */
1274 if (!clk_hw_is_enabled(hw))
1275 return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
1277 return clk_rcg2_get_parent(hw);
1280 static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
1282 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1284 /* If the shared rcg is parked only update the cached cfg */
1285 if (!clk_hw_is_enabled(hw)) {
1286 rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
1287 rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
1292 return clk_rcg2_set_parent(hw, index);
1295 static unsigned long
1296 clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1298 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1300 /* If the shared rcg is parked use the cached cfg instead */
1301 if (!clk_hw_is_enabled(hw))
1302 return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
1304 return clk_rcg2_recalc_rate(hw, parent_rate);
1307 static int clk_rcg2_shared_init(struct clk_hw *hw)
1310 * This does a few things:
1312 * 1. Sets rcg->parked_cfg to reflect the value at probe so that the
1313 * proper parent is reported from clk_rcg2_shared_get_parent().
1315 * 2. Clears the force enable bit of the RCG because we rely on child
1316 * clks (branches) to turn the RCG on/off with a hardware feedback
1317 * mechanism and only set the force enable bit in the RCG when we
1318 * want to make sure the clk stays on for parent switches or
1321 * 3. Parks shared RCGs on the safe source at registration because we
1322 * can't be certain that the parent clk will stay on during boot,
1323 * especially if the parent is shared. If this RCG is enabled at
1324 * boot, and the parent is turned off, the RCG will get stuck on. A
1325 * GDSC can wedge if is turned on and the RCG is stuck on because
1326 * the GDSC's controller will hang waiting for the clk status to
1327 * toggle on when it never does.
1329 * The safest option here is to "park" the RCG at init so that the clk
1330 * can never get stuck on or off. This ensures the GDSC can't get
1333 clk_rcg2_shared_disable(hw);
1338 const struct clk_ops clk_rcg2_shared_ops = {
1339 .init = clk_rcg2_shared_init,
1340 .enable = clk_rcg2_shared_enable,
1341 .disable = clk_rcg2_shared_disable,
1342 .get_parent = clk_rcg2_shared_get_parent,
1343 .set_parent = clk_rcg2_shared_set_parent,
1344 .recalc_rate = clk_rcg2_shared_recalc_rate,
1345 .determine_rate = clk_rcg2_determine_rate,
1346 .set_rate = clk_rcg2_shared_set_rate,
1347 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
1349 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
1351 /* Common APIs to be used for DFS based RCGR */
1352 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
1355 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1357 unsigned long prate = 0;
1358 u32 val, mask, cfg, mode, src;
1361 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
1363 mask = BIT(rcg->hid_width) - 1;
1366 f->pre_div = cfg & mask;
1368 src = cfg & CFG_SRC_SEL_MASK;
1369 src >>= CFG_SRC_SEL_SHIFT;
1371 num_parents = clk_hw_get_num_parents(hw);
1372 for (i = 0; i < num_parents; i++) {
1373 if (src == rcg->parent_map[i].cfg) {
1374 f->src = rcg->parent_map[i].src;
1375 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
1376 prate = clk_hw_get_rate(p);
1380 mode = cfg & CFG_MODE_MASK;
1381 mode >>= CFG_MODE_SHIFT;
1383 mask = BIT(rcg->mnd_width) - 1;
1384 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
1389 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
1397 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
1400 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
1402 struct freq_tbl *freq_tbl;
1405 /* Allocate space for 1 extra since table is NULL terminated */
1406 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1409 rcg->freq_tbl = freq_tbl;
1411 for (i = 0; i < MAX_PERF_LEVEL; i++)
1412 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1417 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1418 struct clk_rate_request *req)
1420 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1423 if (!rcg->freq_tbl) {
1424 ret = clk_rcg2_dfs_populate_freq_table(rcg);
1426 pr_err("Failed to update DFS tables for %s\n",
1427 clk_hw_get_name(hw));
1432 return clk_rcg2_determine_rate(hw, req);
1435 static unsigned long
1436 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1438 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1439 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1441 regmap_read(rcg->clkr.regmap,
1442 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1443 level &= GENMASK(4, 1);
1447 return rcg->freq_tbl[level].freq;
1450 * Assume that parent_rate is actually the parent because
1451 * we can't do any better at figuring it out when the table
1452 * hasn't been populated yet. We only populate the table
1453 * in determine_rate because we can't guarantee the parents
1454 * will be registered with the framework until then.
1456 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1459 mask = BIT(rcg->hid_width) - 1;
1462 pre_div = cfg & mask;
1464 mode = cfg & CFG_MODE_MASK;
1465 mode >>= CFG_MODE_SHIFT;
1467 mask = BIT(rcg->mnd_width) - 1;
1468 regmap_read(rcg->clkr.regmap,
1469 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1472 regmap_read(rcg->clkr.regmap,
1473 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1479 return calc_rate(parent_rate, m, n, mode, pre_div);
1482 static const struct clk_ops clk_rcg2_dfs_ops = {
1483 .is_enabled = clk_rcg2_is_enabled,
1484 .get_parent = clk_rcg2_get_parent,
1485 .determine_rate = clk_rcg2_dfs_determine_rate,
1486 .recalc_rate = clk_rcg2_dfs_recalc_rate,
1489 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1490 struct regmap *regmap)
1492 struct clk_rcg2 *rcg = data->rcg;
1493 struct clk_init_data *init = data->init;
1497 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1501 if (!(val & SE_CMD_DFS_EN))
1505 * Rate changes with consumer writing a register in
1506 * their own I/O region
1508 init->flags |= CLK_GET_RATE_NOCACHE;
1509 init->ops = &clk_rcg2_dfs_ops;
1511 rcg->freq_tbl = NULL;
1516 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1517 const struct clk_rcg_dfs_data *rcgs, size_t len)
1521 for (i = 0; i < len; i++) {
1522 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1529 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1531 static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
1532 unsigned long parent_rate)
1534 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1535 struct freq_tbl f = { 0 };
1536 u32 mask = BIT(rcg->hid_width) - 1;
1538 int i, num_parents = clk_hw_get_num_parents(hw);
1539 unsigned long num, den;
1541 rational_best_approximation(parent_rate, rate,
1542 GENMASK(rcg->mnd_width - 1, 0),
1543 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1548 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
1550 cfg &= CFG_SRC_SEL_MASK;
1551 cfg >>= CFG_SRC_SEL_SHIFT;
1553 for (i = 0; i < num_parents; i++) {
1554 if (cfg == rcg->parent_map[i].cfg) {
1555 f.src = rcg->parent_map[i].src;
1560 f.pre_div = hid_div;
1561 f.pre_div >>= CFG_SRC_DIV_SHIFT;
1572 return clk_rcg2_configure(rcg, &f);
1575 static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
1576 unsigned long rate, unsigned long parent_rate, u8 index)
1578 return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
1581 static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
1582 struct clk_rate_request *req)
1584 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1585 unsigned long num, den;
1588 /* Parent rate is a fixed phy link rate */
1589 rational_best_approximation(req->best_parent_rate, req->rate,
1590 GENMASK(rcg->mnd_width - 1, 0),
1591 GENMASK(rcg->mnd_width - 1, 0), &den, &num);
1596 tmp = req->best_parent_rate * num;
1603 const struct clk_ops clk_dp_ops = {
1604 .is_enabled = clk_rcg2_is_enabled,
1605 .get_parent = clk_rcg2_get_parent,
1606 .set_parent = clk_rcg2_set_parent,
1607 .recalc_rate = clk_rcg2_recalc_rate,
1608 .set_rate = clk_rcg2_dp_set_rate,
1609 .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
1610 .determine_rate = clk_rcg2_dp_determine_rate,
1612 EXPORT_SYMBOL_GPL(clk_dp_ops);