1 // SPDX-License-Identifier: GPL-2.0
3 * RZ/G2L Clock Pulse Generator
5 * Copyright (C) 2021 Renesas Electronics Corp.
7 * Based on renesas-cpg-mssr.c
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/clk/renesas.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
34 #include "rzg2l-cpg.h"
37 #define WARN_DEBUG(x) WARN_ON(x)
39 #define WARN_DEBUG(x) do { } while (0)
42 #define DIV_RSMASK(v, s, m) ((v >> s) & m)
43 #define GET_SHIFT(val) ((val >> 12) & 0xff)
44 #define GET_WIDTH(val) ((val >> 8) & 0xf)
46 #define KDIV(val) DIV_RSMASK(val, 16, 0xffff)
47 #define MDIV(val) DIV_RSMASK(val, 6, 0x3ff)
48 #define PDIV(val) DIV_RSMASK(val, 0, 0x3f)
49 #define SDIV(val) DIV_RSMASK(val, 0, 0x7)
51 #define CLK_ON_R(reg) (reg)
52 #define CLK_MON_R(reg) (0x180 + (reg))
53 #define CLK_RST_R(reg) (reg)
54 #define CLK_MRST_R(reg) (0x180 + (reg))
56 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
57 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
58 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
60 #define MAX_VCLK_FREQ (148500000)
65 struct rzg2l_cpg_priv *priv;
68 #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
70 struct rzg2l_pll5_param {
79 struct rzg2l_pll5_mux_dsi_div_param {
86 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
88 * @rcdev: Reset controller entity
90 * @base: CPG register block base address
91 * @rmw_lock: protects register accesses
92 * @clks: Array containing all Core and Module Clocks
93 * @num_core_clks: Number of Core Clocks in clks[]
94 * @num_mod_clks: Number of Module Clocks in clks[]
95 * @num_resets: Number of Module Resets in info->resets[]
96 * @last_dt_core_clk: ID of the last Core Clock exported to DT
97 * @info: Pointer to platform data
99 * @mux_dsi_div_params: pll5 mux and dsi div parameters
101 struct rzg2l_cpg_priv {
102 struct reset_controller_dev rcdev;
108 unsigned int num_core_clks;
109 unsigned int num_mod_clks;
110 unsigned int num_resets;
111 unsigned int last_dt_core_clk;
113 const struct rzg2l_cpg_info *info;
115 struct generic_pm_domain genpd;
117 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
120 static void rzg2l_cpg_del_clk_provider(void *data)
122 of_clk_del_provider(data);
125 static struct clk * __init
126 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
129 struct rzg2l_cpg_priv *priv)
131 struct device *dev = priv->dev;
132 const struct clk *parent;
133 const char *parent_name;
134 struct clk_hw *clk_hw;
136 parent = clks[core->parent & 0xffff];
138 return ERR_CAST(parent);
140 parent_name = __clk_get_name(parent);
143 clk_hw = clk_hw_register_divider_table(dev, core->name,
145 base + GET_REG_OFFSET(core->conf),
146 GET_SHIFT(core->conf),
147 GET_WIDTH(core->conf),
152 clk_hw = clk_hw_register_divider(dev, core->name,
154 base + GET_REG_OFFSET(core->conf),
155 GET_SHIFT(core->conf),
156 GET_WIDTH(core->conf),
157 core->flag, &priv->rmw_lock);
160 return ERR_CAST(clk_hw);
165 static struct clk * __init
166 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
168 struct rzg2l_cpg_priv *priv)
170 const struct clk_hw *clk_hw;
172 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
173 core->parent_names, core->num_parents,
175 base + GET_REG_OFFSET(core->conf),
176 GET_SHIFT(core->conf),
177 GET_WIDTH(core->conf),
178 core->mux_flags, &priv->rmw_lock);
180 return ERR_CAST(clk_hw);
185 static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
186 struct clk_rate_request *req)
188 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
191 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
193 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
194 struct rzg2l_cpg_priv *priv = hwdata->priv;
195 u32 off = GET_REG_OFFSET(hwdata->conf);
196 u32 shift = GET_SHIFT(hwdata->conf);
197 const u32 clk_src_266 = 2;
201 * As per the HW manual, we should not directly switch from 533 MHz to
202 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
203 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
204 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
206 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
207 * switching register is prohibited.
208 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
209 * the index to value mapping is done by adding 1 to the index.
211 bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
212 if (index != clk_src_266) {
216 writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
218 msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
220 ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
222 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
224 dev_err(priv->dev, "failed to switch clk source\n");
229 writel(bitmask | ((index + 1) << shift), priv->base + off);
234 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
236 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
237 struct rzg2l_cpg_priv *priv = hwdata->priv;
238 u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
240 val >>= GET_SHIFT(hwdata->conf);
241 val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
245 /* Prohibited clk source, change it to 533 MHz(reset value) */
246 rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
252 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
253 .determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
254 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
255 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
258 static struct clk * __init
259 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
261 struct rzg2l_cpg_priv *priv)
263 struct sd_hw_data *clk_hw_data;
264 struct clk_init_data init;
265 struct clk_hw *clk_hw;
268 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
270 return ERR_PTR(-ENOMEM);
272 clk_hw_data->priv = priv;
273 clk_hw_data->conf = core->conf;
275 init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
276 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
278 init.num_parents = core->num_parents;
279 init.parent_names = core->parent_names;
281 clk_hw = &clk_hw_data->hw;
282 clk_hw->init = &init;
284 ret = devm_clk_hw_register(priv->dev, clk_hw);
292 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
295 unsigned long foutpostdiv_rate;
297 params->pl5_intin = rate / MEGA;
298 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
299 params->pl5_refdiv = 2;
300 params->pl5_postdiv1 = 1;
301 params->pl5_postdiv2 = 1;
302 params->pl5_spread = 0x16;
305 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
306 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
307 (params->pl5_postdiv1 * params->pl5_postdiv2);
309 return foutpostdiv_rate;
312 struct dsi_div_hw_data {
316 struct rzg2l_cpg_priv *priv;
319 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
321 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
322 unsigned long parent_rate)
324 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
325 unsigned long rate = dsi_div->rate;
333 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
336 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
337 struct rzg2l_cpg_priv *priv = dsi_div->priv;
338 struct rzg2l_pll5_param params;
339 unsigned long parent_rate;
341 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
343 if (priv->mux_dsi_div_params.clksrc)
349 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
350 struct clk_rate_request *req)
352 if (req->rate > MAX_VCLK_FREQ)
353 req->rate = MAX_VCLK_FREQ;
355 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
360 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
362 unsigned long parent_rate)
364 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
365 struct rzg2l_cpg_priv *priv = dsi_div->priv;
368 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
370 * Based on the dot clock, the DSI divider clock sets the divider value,
371 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
372 * source for the MUX and propagates that info to the parents.
375 if (!rate || rate > MAX_VCLK_FREQ)
378 dsi_div->rate = rate;
379 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
380 (priv->mux_dsi_div_params.dsi_div_a << 0) |
381 (priv->mux_dsi_div_params.dsi_div_b << 8),
382 priv->base + CPG_PL5_SDIV);
387 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
388 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
389 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
390 .set_rate = rzg2l_cpg_dsi_div_set_rate,
393 static struct clk * __init
394 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
396 struct rzg2l_cpg_priv *priv)
398 struct dsi_div_hw_data *clk_hw_data;
399 const struct clk *parent;
400 const char *parent_name;
401 struct clk_init_data init;
402 struct clk_hw *clk_hw;
405 parent = clks[core->parent & 0xffff];
407 return ERR_CAST(parent);
409 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
411 return ERR_PTR(-ENOMEM);
413 clk_hw_data->priv = priv;
415 parent_name = __clk_get_name(parent);
416 init.name = core->name;
417 init.ops = &rzg2l_cpg_dsi_div_ops;
418 init.flags = CLK_SET_RATE_PARENT;
419 init.parent_names = &parent_name;
420 init.num_parents = 1;
422 clk_hw = &clk_hw_data->hw;
423 clk_hw->init = &init;
425 ret = devm_clk_hw_register(priv->dev, clk_hw);
432 struct pll5_mux_hw_data {
436 struct rzg2l_cpg_priv *priv;
439 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
441 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
442 struct clk_rate_request *req)
444 struct clk_hw *parent;
445 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
446 struct rzg2l_cpg_priv *priv = hwdata->priv;
448 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
449 req->best_parent_hw = parent;
450 req->best_parent_rate = req->rate;
455 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
457 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
458 struct rzg2l_cpg_priv *priv = hwdata->priv;
462 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
465 * Based on the dot clock, the DSI divider clock calculates the parent
466 * rate and clk source for the MUX. It propagates that info to
467 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
470 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
471 priv->base + CPG_OTHERFUNC1_REG);
476 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
478 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
479 struct rzg2l_cpg_priv *priv = hwdata->priv;
481 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
484 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
485 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
486 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
487 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
490 static struct clk * __init
491 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
492 struct rzg2l_cpg_priv *priv)
494 struct pll5_mux_hw_data *clk_hw_data;
495 struct clk_init_data init;
496 struct clk_hw *clk_hw;
499 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
501 return ERR_PTR(-ENOMEM);
503 clk_hw_data->priv = priv;
504 clk_hw_data->conf = core->conf;
506 init.name = core->name;
507 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
508 init.flags = CLK_SET_RATE_PARENT;
509 init.num_parents = core->num_parents;
510 init.parent_names = core->parent_names;
512 clk_hw = &clk_hw_data->hw;
513 clk_hw->init = &init;
515 ret = devm_clk_hw_register(priv->dev, clk_hw);
525 unsigned long foutpostdiv_rate;
526 struct rzg2l_cpg_priv *priv;
529 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
531 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
534 struct sipll5 *sipll5 = to_sipll5(hw);
535 struct rzg2l_cpg_priv *priv = sipll5->priv;
538 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
539 (priv->mux_dsi_div_params.dsi_div_b + 1));
541 if (priv->mux_dsi_div_params.clksrc)
547 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
548 unsigned long parent_rate)
550 struct sipll5 *sipll5 = to_sipll5(hw);
551 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
554 pll5_rate = parent_rate;
559 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
561 unsigned long *parent_rate)
566 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
568 unsigned long parent_rate)
570 struct sipll5 *sipll5 = to_sipll5(hw);
571 struct rzg2l_cpg_priv *priv = sipll5->priv;
572 struct rzg2l_pll5_param params;
573 unsigned long vclk_rate;
578 * OSC --> PLL5 --> FOUTPOSTDIV-->|
579 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
582 * Based on the dot clock, the DSI divider clock calculates the parent
583 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
584 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
586 * OSC --> PLL5 --> FOUTPOSTDIV
592 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
593 sipll5->foutpostdiv_rate =
594 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
596 /* Put PLL5 into standby mode */
597 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
598 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
599 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
601 dev_err(priv->dev, "failed to release pll5 lock");
605 /* Output clock setting 1 */
606 writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN |
607 CPG_SIPLL5_CLK1_REFDIV_WEN | (params.pl5_postdiv1 << 0) |
608 (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8),
609 priv->base + CPG_SIPLL5_CLK1);
611 /* Output clock setting, SSCG modulation value setting 3 */
612 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
614 /* Output clock setting 4 */
615 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
616 priv->base + CPG_SIPLL5_CLK4);
618 /* Output clock setting 5 */
619 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
621 /* PLL normal mode setting */
622 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
623 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
624 priv->base + CPG_SIPLL5_STBY);
626 /* PLL normal mode transition, output clock stability check */
627 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
628 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
630 dev_err(priv->dev, "failed to lock pll5");
637 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
638 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
639 .round_rate = rzg2l_cpg_sipll5_round_rate,
640 .set_rate = rzg2l_cpg_sipll5_set_rate,
643 static struct clk * __init
644 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
646 struct rzg2l_cpg_priv *priv)
648 const struct clk *parent;
649 struct clk_init_data init;
650 const char *parent_name;
651 struct sipll5 *sipll5;
652 struct clk_hw *clk_hw;
655 parent = clks[core->parent & 0xffff];
657 return ERR_CAST(parent);
659 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
661 return ERR_PTR(-ENOMEM);
663 init.name = core->name;
664 parent_name = __clk_get_name(parent);
665 init.ops = &rzg2l_cpg_sipll5_ops;
667 init.parent_names = &parent_name;
668 init.num_parents = 1;
670 sipll5->hw.init = &init;
671 sipll5->conf = core->conf;
674 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
675 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
677 clk_hw = &sipll5->hw;
678 clk_hw->init = &init;
680 ret = devm_clk_hw_register(priv->dev, clk_hw);
684 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
685 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
686 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
696 struct rzg2l_cpg_priv *priv;
699 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
701 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
702 unsigned long parent_rate)
704 struct pll_clk *pll_clk = to_pll(hw);
705 struct rzg2l_cpg_priv *priv = pll_clk->priv;
706 unsigned int val1, val2;
707 unsigned int mult = 1;
708 unsigned int div = 1;
710 if (pll_clk->type != CLK_TYPE_SAM_PLL)
713 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
714 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
715 mult = MDIV(val1) + KDIV(val1) / 65536;
716 div = PDIV(val1) << SDIV(val2);
718 return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
721 static const struct clk_ops rzg2l_cpg_pll_ops = {
722 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
725 static struct clk * __init
726 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
729 struct rzg2l_cpg_priv *priv)
731 struct device *dev = priv->dev;
732 const struct clk *parent;
733 struct clk_init_data init;
734 const char *parent_name;
735 struct pll_clk *pll_clk;
737 parent = clks[core->parent & 0xffff];
739 return ERR_CAST(parent);
741 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
743 return ERR_PTR(-ENOMEM);
745 parent_name = __clk_get_name(parent);
746 init.name = core->name;
747 init.ops = &rzg2l_cpg_pll_ops;
749 init.parent_names = &parent_name;
750 init.num_parents = 1;
752 pll_clk->hw.init = &init;
753 pll_clk->conf = core->conf;
754 pll_clk->base = base;
755 pll_clk->priv = priv;
756 pll_clk->type = core->type;
758 return clk_register(NULL, &pll_clk->hw);
762 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
765 unsigned int clkidx = clkspec->args[1];
766 struct rzg2l_cpg_priv *priv = data;
767 struct device *dev = priv->dev;
771 switch (clkspec->args[0]) {
774 if (clkidx > priv->last_dt_core_clk) {
775 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
776 return ERR_PTR(-EINVAL);
778 clk = priv->clks[clkidx];
783 if (clkidx >= priv->num_mod_clks) {
784 dev_err(dev, "Invalid %s clock index %u\n", type,
786 return ERR_PTR(-EINVAL);
788 clk = priv->clks[priv->num_core_clks + clkidx];
792 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
793 return ERR_PTR(-EINVAL);
797 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
800 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
801 clkspec->args[0], clkspec->args[1], clk,
807 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
808 const struct rzg2l_cpg_info *info,
809 struct rzg2l_cpg_priv *priv)
811 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
812 struct device *dev = priv->dev;
813 unsigned int id = core->id, div = core->div;
814 const char *parent_name;
816 WARN_DEBUG(id >= priv->num_core_clks);
817 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
820 /* Skip NULLified clock */
824 switch (core->type) {
826 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
829 WARN_DEBUG(core->parent >= priv->num_core_clks);
830 parent = priv->clks[core->parent];
831 if (IS_ERR(parent)) {
836 parent_name = __clk_get_name(parent);
837 clk = clk_register_fixed_factor(NULL, core->name,
838 parent_name, CLK_SET_RATE_PARENT,
841 case CLK_TYPE_SAM_PLL:
842 clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
845 case CLK_TYPE_SIPLL5:
846 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
849 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
853 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
855 case CLK_TYPE_SD_MUX:
856 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
858 case CLK_TYPE_PLL5_4_MUX:
859 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
861 case CLK_TYPE_DSI_DIV:
862 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
868 if (IS_ERR_OR_NULL(clk))
871 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
872 priv->clks[id] = clk;
876 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
877 core->name, PTR_ERR(clk));
881 * struct mstp_clock - MSTP gating clock
883 * @hw: handle between common and hardware-specific interfaces
884 * @off: register offset
886 * @enabled: soft state of the clock, if it is coupled with another clock
887 * @priv: CPG/MSTP private data
888 * @sibling: pointer to the other coupled clock
895 struct rzg2l_cpg_priv *priv;
896 struct mstp_clock *sibling;
899 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
901 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
903 struct mstp_clock *clock = to_mod_clock(hw);
904 struct rzg2l_cpg_priv *priv = clock->priv;
905 unsigned int reg = clock->off;
906 struct device *dev = priv->dev;
909 u32 bitmask = BIT(clock->bit);
913 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
917 dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
918 enable ? "ON" : "OFF");
919 spin_lock_irqsave(&priv->rmw_lock, flags);
922 value = (bitmask << 16) | bitmask;
924 value = bitmask << 16;
925 writel(value, priv->base + CLK_ON_R(reg));
927 spin_unlock_irqrestore(&priv->rmw_lock, flags);
932 if (!priv->info->has_clk_mon_regs)
935 for (i = 1000; i > 0; --i) {
936 if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
942 dev_err(dev, "Failed to enable CLK_ON %p\n",
943 priv->base + CLK_ON_R(reg));
950 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
952 struct mstp_clock *clock = to_mod_clock(hw);
954 if (clock->sibling) {
955 struct rzg2l_cpg_priv *priv = clock->priv;
959 spin_lock_irqsave(&priv->rmw_lock, flags);
960 enabled = clock->sibling->enabled;
961 clock->enabled = true;
962 spin_unlock_irqrestore(&priv->rmw_lock, flags);
967 return rzg2l_mod_clock_endisable(hw, true);
970 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
972 struct mstp_clock *clock = to_mod_clock(hw);
974 if (clock->sibling) {
975 struct rzg2l_cpg_priv *priv = clock->priv;
979 spin_lock_irqsave(&priv->rmw_lock, flags);
980 enabled = clock->sibling->enabled;
981 clock->enabled = false;
982 spin_unlock_irqrestore(&priv->rmw_lock, flags);
987 rzg2l_mod_clock_endisable(hw, false);
990 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
992 struct mstp_clock *clock = to_mod_clock(hw);
993 struct rzg2l_cpg_priv *priv = clock->priv;
994 u32 bitmask = BIT(clock->bit);
998 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
1003 return clock->enabled;
1005 if (priv->info->has_clk_mon_regs)
1006 value = readl(priv->base + CLK_MON_R(clock->off));
1008 value = readl(priv->base + clock->off);
1010 return value & bitmask;
1013 static const struct clk_ops rzg2l_mod_clock_ops = {
1014 .enable = rzg2l_mod_clock_enable,
1015 .disable = rzg2l_mod_clock_disable,
1016 .is_enabled = rzg2l_mod_clock_is_enabled,
1019 static struct mstp_clock
1020 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1021 struct rzg2l_cpg_priv *priv)
1026 for (i = 0; i < priv->num_mod_clks; i++) {
1027 struct mstp_clock *clk;
1029 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1032 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1033 clk = to_mod_clock(hw);
1034 if (clock->off == clk->off && clock->bit == clk->bit)
1042 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1043 const struct rzg2l_cpg_info *info,
1044 struct rzg2l_cpg_priv *priv)
1046 struct mstp_clock *clock = NULL;
1047 struct device *dev = priv->dev;
1048 unsigned int id = mod->id;
1049 struct clk_init_data init;
1050 struct clk *parent, *clk;
1051 const char *parent_name;
1054 WARN_DEBUG(id < priv->num_core_clks);
1055 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1056 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1057 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1060 /* Skip NULLified clock */
1064 parent = priv->clks[mod->parent];
1065 if (IS_ERR(parent)) {
1070 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1072 clk = ERR_PTR(-ENOMEM);
1076 init.name = mod->name;
1077 init.ops = &rzg2l_mod_clock_ops;
1078 init.flags = CLK_SET_RATE_PARENT;
1079 for (i = 0; i < info->num_crit_mod_clks; i++)
1080 if (id == info->crit_mod_clks[i]) {
1081 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1083 init.flags |= CLK_IS_CRITICAL;
1087 parent_name = __clk_get_name(parent);
1088 init.parent_names = &parent_name;
1089 init.num_parents = 1;
1091 clock->off = mod->off;
1092 clock->bit = mod->bit;
1094 clock->hw.init = &init;
1096 clk = clk_register(NULL, &clock->hw);
1100 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1101 priv->clks[id] = clk;
1103 if (mod->is_coupled) {
1104 struct mstp_clock *sibling;
1106 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1107 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1109 clock->sibling = sibling;
1110 sibling->sibling = clock;
1117 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1118 mod->name, PTR_ERR(clk));
1121 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1123 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1126 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1127 const struct rzg2l_cpg_info *info = priv->info;
1128 unsigned int reg = info->resets[id].off;
1129 u32 dis = BIT(info->resets[id].bit);
1132 dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1135 writel(we, priv->base + CLK_RST_R(reg));
1137 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1140 /* Release module from reset state */
1141 writel(we | dis, priv->base + CLK_RST_R(reg));
1146 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1149 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1150 const struct rzg2l_cpg_info *info = priv->info;
1151 unsigned int reg = info->resets[id].off;
1152 u32 value = BIT(info->resets[id].bit) << 16;
1154 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1156 writel(value, priv->base + CLK_RST_R(reg));
1160 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1163 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1164 const struct rzg2l_cpg_info *info = priv->info;
1165 unsigned int reg = info->resets[id].off;
1166 u32 dis = BIT(info->resets[id].bit);
1167 u32 value = (dis << 16) | dis;
1169 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1172 writel(value, priv->base + CLK_RST_R(reg));
1176 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1179 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1180 const struct rzg2l_cpg_info *info = priv->info;
1181 unsigned int reg = info->resets[id].off;
1182 u32 bitmask = BIT(info->resets[id].bit);
1183 s8 monbit = info->resets[id].monbit;
1185 if (info->has_clk_mon_regs) {
1186 return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
1187 } else if (monbit >= 0) {
1188 u32 monbitmask = BIT(monbit);
1190 return !!(readl(priv->base + CPG_RST_MON) & monbitmask);
1195 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1196 .reset = rzg2l_cpg_reset,
1197 .assert = rzg2l_cpg_assert,
1198 .deassert = rzg2l_cpg_deassert,
1199 .status = rzg2l_cpg_status,
1202 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1203 const struct of_phandle_args *reset_spec)
1205 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1206 const struct rzg2l_cpg_info *info = priv->info;
1207 unsigned int id = reset_spec->args[0];
1209 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1210 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1217 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1219 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1220 priv->rcdev.of_node = priv->dev->of_node;
1221 priv->rcdev.dev = priv->dev;
1222 priv->rcdev.of_reset_n_cells = 1;
1223 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1224 priv->rcdev.nr_resets = priv->num_resets;
1226 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1229 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1230 const struct of_phandle_args *clkspec)
1232 const struct rzg2l_cpg_info *info = priv->info;
1236 if (clkspec->args_count != 2)
1239 if (clkspec->args[0] != CPG_MOD)
1242 id = clkspec->args[1] + info->num_total_core_clks;
1243 for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1244 if (info->no_pm_mod_clks[i] == id)
1251 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1253 struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1254 struct device_node *np = dev->of_node;
1255 struct of_phandle_args clkspec;
1261 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1263 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1266 error = pm_clk_create(dev);
1268 of_node_put(clkspec.np);
1272 clk = of_clk_get_from_provider(&clkspec);
1273 of_node_put(clkspec.np);
1275 error = PTR_ERR(clk);
1279 error = pm_clk_add_clk(dev, clk);
1281 dev_err(dev, "pm_clk_add_clk failed %d\n",
1286 of_node_put(clkspec.np);
1297 pm_clk_destroy(dev);
1302 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1304 if (!pm_clk_no_clocks(dev))
1305 pm_clk_destroy(dev);
1308 static void rzg2l_cpg_genpd_remove(void *data)
1310 pm_genpd_remove(data);
1313 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1315 struct device *dev = priv->dev;
1316 struct device_node *np = dev->of_node;
1317 struct generic_pm_domain *genpd = &priv->genpd;
1320 genpd->name = np->name;
1321 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1322 GENPD_FLAG_ACTIVE_WAKEUP;
1323 genpd->attach_dev = rzg2l_cpg_attach_dev;
1324 genpd->detach_dev = rzg2l_cpg_detach_dev;
1325 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1329 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1333 return of_genpd_add_provider_simple(np, genpd);
1336 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1338 struct device *dev = &pdev->dev;
1339 struct device_node *np = dev->of_node;
1340 const struct rzg2l_cpg_info *info;
1341 struct rzg2l_cpg_priv *priv;
1342 unsigned int nclks, i;
1346 info = of_device_get_match_data(dev);
1348 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1354 spin_lock_init(&priv->rmw_lock);
1356 priv->base = devm_platform_ioremap_resource(pdev, 0);
1357 if (IS_ERR(priv->base))
1358 return PTR_ERR(priv->base);
1360 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1361 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1365 dev_set_drvdata(dev, priv);
1367 priv->num_core_clks = info->num_total_core_clks;
1368 priv->num_mod_clks = info->num_hw_mod_clks;
1369 priv->num_resets = info->num_resets;
1370 priv->last_dt_core_clk = info->last_dt_core_clk;
1372 for (i = 0; i < nclks; i++)
1373 clks[i] = ERR_PTR(-ENOENT);
1375 for (i = 0; i < info->num_core_clks; i++)
1376 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1378 for (i = 0; i < info->num_mod_clks; i++)
1379 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1381 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1385 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1389 error = rzg2l_cpg_add_clk_domain(priv);
1393 error = rzg2l_cpg_reset_controller_register(priv);
1400 static const struct of_device_id rzg2l_cpg_match[] = {
1401 #ifdef CONFIG_CLK_R9A07G043
1403 .compatible = "renesas,r9a07g043-cpg",
1404 .data = &r9a07g043_cpg_info,
1407 #ifdef CONFIG_CLK_R9A07G044
1409 .compatible = "renesas,r9a07g044-cpg",
1410 .data = &r9a07g044_cpg_info,
1413 #ifdef CONFIG_CLK_R9A07G054
1415 .compatible = "renesas,r9a07g054-cpg",
1416 .data = &r9a07g054_cpg_info,
1419 #ifdef CONFIG_CLK_R9A09G011
1421 .compatible = "renesas,r9a09g011-cpg",
1422 .data = &r9a09g011_cpg_info,
1428 static struct platform_driver rzg2l_cpg_driver = {
1430 .name = "rzg2l-cpg",
1431 .of_match_table = rzg2l_cpg_match,
1435 static int __init rzg2l_cpg_init(void)
1437 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1440 subsys_initcall(rzg2l_cpg_init);
1442 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");