1 // SPDX-License-Identifier: GPL-2.0
3 * RZ/G2L Clock Pulse Generator
5 * Copyright (C) 2021 Renesas Electronics Corp.
7 * Based on renesas-cpg-mssr.c
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
34 #include "rzg2l-cpg.h"
37 #define WARN_DEBUG(x) WARN_ON(x)
39 #define WARN_DEBUG(x) do { } while (0)
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
50 #define RZG3S_DIV_P GENMASK(28, 26)
51 #define RZG3S_DIV_M GENMASK(25, 22)
52 #define RZG3S_DIV_NI GENMASK(21, 13)
53 #define RZG3S_DIV_NF GENMASK(12, 1)
55 #define CLK_ON_R(reg) (reg)
56 #define CLK_MON_R(reg) (0x180 + (reg))
57 #define CLK_RST_R(reg) (reg)
58 #define CLK_MRST_R(reg) (0x180 + (reg))
60 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
61 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
62 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
64 #define CPG_WEN_BIT BIT(16)
66 #define MAX_VCLK_FREQ (148500000)
69 * struct clk_hw_data - clock hardware data
71 * @conf: clock configuration (register offset, shift, width)
72 * @sconf: clock status configuration (register offset, shift, width)
73 * @priv: CPG private data structure
79 struct rzg2l_cpg_priv *priv;
82 #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
85 * struct sd_mux_hw_data - SD MUX clock hardware data
86 * @hw_data: clock hw data
87 * @mtable: clock mux table
89 struct sd_mux_hw_data {
90 struct clk_hw_data hw_data;
94 #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
97 * struct div_hw_data - divider clock hardware data
98 * @hw_data: clock hw data
99 * @dtable: pointer to divider table
100 * @invalid_rate: invalid rate for divider
101 * @max_rate: maximum rate for divider
102 * @width: divider width
105 struct clk_hw_data hw_data;
106 const struct clk_div_table *dtable;
107 unsigned long invalid_rate;
108 unsigned long max_rate;
112 #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data)
114 struct rzg2l_pll5_param {
123 struct rzg2l_pll5_mux_dsi_div_param {
130 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
132 * @rcdev: Reset controller entity
134 * @base: CPG register block base address
135 * @rmw_lock: protects register accesses
136 * @clks: Array containing all Core and Module Clocks
137 * @num_core_clks: Number of Core Clocks in clks[]
138 * @num_mod_clks: Number of Module Clocks in clks[]
139 * @num_resets: Number of Module Resets in info->resets[]
140 * @last_dt_core_clk: ID of the last Core Clock exported to DT
141 * @info: Pointer to platform data
142 * @mux_dsi_div_params: pll5 mux and dsi div parameters
144 struct rzg2l_cpg_priv {
145 struct reset_controller_dev rcdev;
151 unsigned int num_core_clks;
152 unsigned int num_mod_clks;
153 unsigned int num_resets;
154 unsigned int last_dt_core_clk;
156 const struct rzg2l_cpg_info *info;
158 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
161 static void rzg2l_cpg_del_clk_provider(void *data)
163 of_clk_del_provider(data);
166 /* Must be called in atomic context. */
167 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
169 u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
170 u32 off = GET_REG_OFFSET(conf);
173 return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
176 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
179 struct clk_notifier_data *cnd = data;
180 struct clk_hw *hw = __clk_get_hw(cnd->clk);
181 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
182 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
183 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
184 u32 shift = GET_SHIFT(clk_hw_data->conf);
185 const u32 clk_src_266 = 3;
189 if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
192 spin_lock_irqsave(&priv->rmw_lock, flags);
195 * As per the HW manual, we should not directly switch from 533 MHz to
196 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
197 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
198 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
200 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
201 * switching register is prohibited.
202 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
203 * the index to value mapping is done by adding 1 to the index.
206 writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
208 /* Wait for the update done. */
209 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
211 spin_unlock_irqrestore(&priv->rmw_lock, flags);
214 dev_err(priv->dev, "failed to switch to safe clk source\n");
216 return notifier_from_errno(ret);
219 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
222 struct clk_notifier_data *cnd = data;
223 struct clk_hw *hw = __clk_get_hw(cnd->clk);
224 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
225 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
226 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
227 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
228 u32 shift = GET_SHIFT(clk_hw_data->conf);
233 if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
234 div_hw_data->invalid_rate % cnd->new_rate)
237 spin_lock_irqsave(&priv->rmw_lock, flags);
239 val = readl(priv->base + off);
241 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
244 * There are different constraints for the user of this notifiers as follows:
245 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
246 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
247 * As SD can have only one parent having 800MHz and OCTA div can have
248 * only one parent having 400MHz we took into account the parent rate
249 * at the beginning of function (by checking invalid_rate % new_rate).
250 * Now it is time to check the hardware divider and update it accordingly.
253 writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
254 /* Wait for the update done. */
255 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
258 spin_unlock_irqrestore(&priv->rmw_lock, flags);
261 dev_err(priv->dev, "Failed to downgrade the div\n");
263 return notifier_from_errno(ret);
266 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
267 struct rzg2l_cpg_priv *priv)
269 struct notifier_block *nb;
274 nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
278 nb->notifier_call = core->notifier;
280 return clk_notifier_register(hw->clk, nb);
283 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
284 unsigned long parent_rate)
286 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
287 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
288 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
291 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
292 val >>= GET_SHIFT(clk_hw_data->conf);
293 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
295 return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
296 CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
299 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
301 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
302 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
304 if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
305 req->rate = div_hw_data->max_rate;
307 return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
308 CLK_DIVIDER_ROUND_CLOSEST);
311 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
312 unsigned long parent_rate)
314 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
315 struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
316 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
317 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
318 u32 shift = GET_SHIFT(clk_hw_data->conf);
323 val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
324 CLK_DIVIDER_ROUND_CLOSEST);
326 spin_lock_irqsave(&priv->rmw_lock, flags);
327 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
328 /* Wait for the update done. */
329 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
330 spin_unlock_irqrestore(&priv->rmw_lock, flags);
335 static const struct clk_ops rzg3s_div_clk_ops = {
336 .recalc_rate = rzg3s_div_clk_recalc_rate,
337 .determine_rate = rzg3s_div_clk_determine_rate,
338 .set_rate = rzg3s_div_clk_set_rate,
341 static struct clk * __init
342 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
343 void __iomem *base, struct rzg2l_cpg_priv *priv)
345 struct div_hw_data *div_hw_data;
346 struct clk_init_data init = {};
347 const struct clk_div_table *clkt;
348 struct clk_hw *clk_hw;
349 const struct clk *parent;
350 const char *parent_name;
354 parent = clks[core->parent & 0xffff];
356 return ERR_CAST(parent);
358 parent_name = __clk_get_name(parent);
360 div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
362 return ERR_PTR(-ENOMEM);
364 init.name = core->name;
365 init.flags = core->flag;
366 init.ops = &rzg3s_div_clk_ops;
367 init.parent_names = &parent_name;
368 init.num_parents = 1;
370 /* Get the maximum divider to retrieve div width. */
371 for (clkt = core->dtable; clkt->div; clkt++) {
376 div_hw_data->hw_data.priv = priv;
377 div_hw_data->hw_data.conf = core->conf;
378 div_hw_data->hw_data.sconf = core->sconf;
379 div_hw_data->dtable = core->dtable;
380 div_hw_data->invalid_rate = core->invalid_rate;
381 div_hw_data->max_rate = core->max_rate;
382 div_hw_data->width = fls(max) - 1;
384 clk_hw = &div_hw_data->hw_data.hw;
385 clk_hw->init = &init;
387 ret = devm_clk_hw_register(priv->dev, clk_hw);
391 ret = rzg2l_register_notifier(clk_hw, core, priv);
393 dev_err(priv->dev, "Failed to register notifier for %s\n",
401 static struct clk * __init
402 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
405 struct rzg2l_cpg_priv *priv)
407 struct device *dev = priv->dev;
408 const struct clk *parent;
409 const char *parent_name;
410 struct clk_hw *clk_hw;
412 parent = clks[core->parent & 0xffff];
414 return ERR_CAST(parent);
416 parent_name = __clk_get_name(parent);
419 clk_hw = clk_hw_register_divider_table(dev, core->name,
421 base + GET_REG_OFFSET(core->conf),
422 GET_SHIFT(core->conf),
423 GET_WIDTH(core->conf),
428 clk_hw = clk_hw_register_divider(dev, core->name,
430 base + GET_REG_OFFSET(core->conf),
431 GET_SHIFT(core->conf),
432 GET_WIDTH(core->conf),
433 core->flag, &priv->rmw_lock);
436 return ERR_CAST(clk_hw);
441 static struct clk * __init
442 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
444 struct rzg2l_cpg_priv *priv)
446 const struct clk_hw *clk_hw;
448 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
449 core->parent_names, core->num_parents,
451 base + GET_REG_OFFSET(core->conf),
452 GET_SHIFT(core->conf),
453 GET_WIDTH(core->conf),
454 core->mux_flags, &priv->rmw_lock);
456 return ERR_CAST(clk_hw);
461 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
463 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
464 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
465 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
466 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
467 u32 shift = GET_SHIFT(clk_hw_data->conf);
472 val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
474 spin_lock_irqsave(&priv->rmw_lock, flags);
476 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
478 /* Wait for the update done. */
479 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
481 spin_unlock_irqrestore(&priv->rmw_lock, flags);
484 dev_err(priv->dev, "Failed to switch parent\n");
489 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
491 struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
492 struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
493 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
496 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
497 val >>= GET_SHIFT(clk_hw_data->conf);
498 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
500 return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
503 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
504 .determine_rate = __clk_mux_determine_rate_closest,
505 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
506 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
509 static struct clk * __init
510 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
512 struct rzg2l_cpg_priv *priv)
514 struct sd_mux_hw_data *sd_mux_hw_data;
515 struct clk_init_data init;
516 struct clk_hw *clk_hw;
519 sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
521 return ERR_PTR(-ENOMEM);
523 sd_mux_hw_data->hw_data.priv = priv;
524 sd_mux_hw_data->hw_data.conf = core->conf;
525 sd_mux_hw_data->hw_data.sconf = core->sconf;
526 sd_mux_hw_data->mtable = core->mtable;
528 init.name = core->name;
529 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
530 init.flags = core->flag;
531 init.num_parents = core->num_parents;
532 init.parent_names = core->parent_names;
534 clk_hw = &sd_mux_hw_data->hw_data.hw;
535 clk_hw->init = &init;
537 ret = devm_clk_hw_register(priv->dev, clk_hw);
541 ret = rzg2l_register_notifier(clk_hw, core, priv);
543 dev_err(priv->dev, "Failed to register notifier for %s\n",
552 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
555 unsigned long foutpostdiv_rate;
557 params->pl5_intin = rate / MEGA;
558 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
559 params->pl5_refdiv = 2;
560 params->pl5_postdiv1 = 1;
561 params->pl5_postdiv2 = 1;
562 params->pl5_spread = 0x16;
565 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
566 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
567 (params->pl5_postdiv1 * params->pl5_postdiv2);
569 return foutpostdiv_rate;
572 struct dsi_div_hw_data {
576 struct rzg2l_cpg_priv *priv;
579 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
581 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
582 unsigned long parent_rate)
584 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
585 unsigned long rate = dsi_div->rate;
593 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
596 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
597 struct rzg2l_cpg_priv *priv = dsi_div->priv;
598 struct rzg2l_pll5_param params;
599 unsigned long parent_rate;
601 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
603 if (priv->mux_dsi_div_params.clksrc)
609 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
610 struct clk_rate_request *req)
612 if (req->rate > MAX_VCLK_FREQ)
613 req->rate = MAX_VCLK_FREQ;
615 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
620 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
622 unsigned long parent_rate)
624 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
625 struct rzg2l_cpg_priv *priv = dsi_div->priv;
628 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
630 * Based on the dot clock, the DSI divider clock sets the divider value,
631 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
632 * source for the MUX and propagates that info to the parents.
635 if (!rate || rate > MAX_VCLK_FREQ)
638 dsi_div->rate = rate;
639 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
640 (priv->mux_dsi_div_params.dsi_div_a << 0) |
641 (priv->mux_dsi_div_params.dsi_div_b << 8),
642 priv->base + CPG_PL5_SDIV);
647 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
648 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
649 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
650 .set_rate = rzg2l_cpg_dsi_div_set_rate,
653 static struct clk * __init
654 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
656 struct rzg2l_cpg_priv *priv)
658 struct dsi_div_hw_data *clk_hw_data;
659 const struct clk *parent;
660 const char *parent_name;
661 struct clk_init_data init;
662 struct clk_hw *clk_hw;
665 parent = clks[core->parent & 0xffff];
667 return ERR_CAST(parent);
669 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
671 return ERR_PTR(-ENOMEM);
673 clk_hw_data->priv = priv;
675 parent_name = __clk_get_name(parent);
676 init.name = core->name;
677 init.ops = &rzg2l_cpg_dsi_div_ops;
678 init.flags = CLK_SET_RATE_PARENT;
679 init.parent_names = &parent_name;
680 init.num_parents = 1;
682 clk_hw = &clk_hw_data->hw;
683 clk_hw->init = &init;
685 ret = devm_clk_hw_register(priv->dev, clk_hw);
692 struct pll5_mux_hw_data {
696 struct rzg2l_cpg_priv *priv;
699 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
701 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
702 struct clk_rate_request *req)
704 struct clk_hw *parent;
705 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
706 struct rzg2l_cpg_priv *priv = hwdata->priv;
708 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
709 req->best_parent_hw = parent;
710 req->best_parent_rate = req->rate;
715 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
717 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
718 struct rzg2l_cpg_priv *priv = hwdata->priv;
722 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
725 * Based on the dot clock, the DSI divider clock calculates the parent
726 * rate and clk source for the MUX. It propagates that info to
727 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
730 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
731 priv->base + CPG_OTHERFUNC1_REG);
736 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
738 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
739 struct rzg2l_cpg_priv *priv = hwdata->priv;
741 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
744 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
745 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
746 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
747 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
750 static struct clk * __init
751 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
752 struct rzg2l_cpg_priv *priv)
754 struct pll5_mux_hw_data *clk_hw_data;
755 struct clk_init_data init;
756 struct clk_hw *clk_hw;
759 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
761 return ERR_PTR(-ENOMEM);
763 clk_hw_data->priv = priv;
764 clk_hw_data->conf = core->conf;
766 init.name = core->name;
767 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
768 init.flags = CLK_SET_RATE_PARENT;
769 init.num_parents = core->num_parents;
770 init.parent_names = core->parent_names;
772 clk_hw = &clk_hw_data->hw;
773 clk_hw->init = &init;
775 ret = devm_clk_hw_register(priv->dev, clk_hw);
785 unsigned long foutpostdiv_rate;
786 struct rzg2l_cpg_priv *priv;
789 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
791 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
794 struct sipll5 *sipll5 = to_sipll5(hw);
795 struct rzg2l_cpg_priv *priv = sipll5->priv;
798 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
799 (priv->mux_dsi_div_params.dsi_div_b + 1));
801 if (priv->mux_dsi_div_params.clksrc)
807 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
808 unsigned long parent_rate)
810 struct sipll5 *sipll5 = to_sipll5(hw);
811 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
814 pll5_rate = parent_rate;
819 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
821 unsigned long *parent_rate)
826 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
828 unsigned long parent_rate)
830 struct sipll5 *sipll5 = to_sipll5(hw);
831 struct rzg2l_cpg_priv *priv = sipll5->priv;
832 struct rzg2l_pll5_param params;
833 unsigned long vclk_rate;
838 * OSC --> PLL5 --> FOUTPOSTDIV-->|
839 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
842 * Based on the dot clock, the DSI divider clock calculates the parent
843 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
844 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
846 * OSC --> PLL5 --> FOUTPOSTDIV
852 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
853 sipll5->foutpostdiv_rate =
854 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
856 /* Put PLL5 into standby mode */
857 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
858 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
859 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
861 dev_err(priv->dev, "failed to release pll5 lock");
865 /* Output clock setting 1 */
866 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
867 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
869 /* Output clock setting, SSCG modulation value setting 3 */
870 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
872 /* Output clock setting 4 */
873 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
874 priv->base + CPG_SIPLL5_CLK4);
876 /* Output clock setting 5 */
877 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
879 /* PLL normal mode setting */
880 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
881 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
882 priv->base + CPG_SIPLL5_STBY);
884 /* PLL normal mode transition, output clock stability check */
885 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
886 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
888 dev_err(priv->dev, "failed to lock pll5");
895 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
896 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
897 .round_rate = rzg2l_cpg_sipll5_round_rate,
898 .set_rate = rzg2l_cpg_sipll5_set_rate,
901 static struct clk * __init
902 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
904 struct rzg2l_cpg_priv *priv)
906 const struct clk *parent;
907 struct clk_init_data init;
908 const char *parent_name;
909 struct sipll5 *sipll5;
910 struct clk_hw *clk_hw;
913 parent = clks[core->parent & 0xffff];
915 return ERR_CAST(parent);
917 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
919 return ERR_PTR(-ENOMEM);
921 init.name = core->name;
922 parent_name = __clk_get_name(parent);
923 init.ops = &rzg2l_cpg_sipll5_ops;
925 init.parent_names = &parent_name;
926 init.num_parents = 1;
928 sipll5->hw.init = &init;
929 sipll5->conf = core->conf;
932 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
933 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
935 clk_hw = &sipll5->hw;
936 clk_hw->init = &init;
938 ret = devm_clk_hw_register(priv->dev, clk_hw);
942 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
943 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
944 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
954 struct rzg2l_cpg_priv *priv;
957 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
959 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
960 unsigned long parent_rate)
962 struct pll_clk *pll_clk = to_pll(hw);
963 struct rzg2l_cpg_priv *priv = pll_clk->priv;
964 unsigned int val1, val2;
967 if (pll_clk->type != CLK_TYPE_SAM_PLL)
970 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
971 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
973 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
976 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
979 static const struct clk_ops rzg2l_cpg_pll_ops = {
980 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
983 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
984 unsigned long parent_rate)
986 struct pll_clk *pll_clk = to_pll(hw);
987 struct rzg2l_cpg_priv *priv = pll_clk->priv;
988 u32 nir, nfr, mr, pr, val;
991 if (pll_clk->type != CLK_TYPE_G3S_PLL)
994 val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
996 pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
997 /* Hardware interprets values higher than 8 as p = 16. */
1001 mr = FIELD_GET(RZG3S_DIV_M, val) + 1;
1002 nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1003 nfr = FIELD_GET(RZG3S_DIV_NF, val);
1005 rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1007 return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1010 static const struct clk_ops rzg3s_cpg_pll_ops = {
1011 .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1014 static struct clk * __init
1015 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1018 struct rzg2l_cpg_priv *priv,
1019 const struct clk_ops *ops)
1021 struct device *dev = priv->dev;
1022 const struct clk *parent;
1023 struct clk_init_data init;
1024 const char *parent_name;
1025 struct pll_clk *pll_clk;
1027 parent = clks[core->parent & 0xffff];
1029 return ERR_CAST(parent);
1031 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1033 return ERR_PTR(-ENOMEM);
1035 parent_name = __clk_get_name(parent);
1036 init.name = core->name;
1039 init.parent_names = &parent_name;
1040 init.num_parents = 1;
1042 pll_clk->hw.init = &init;
1043 pll_clk->conf = core->conf;
1044 pll_clk->base = base;
1045 pll_clk->priv = priv;
1046 pll_clk->type = core->type;
1048 return clk_register(NULL, &pll_clk->hw);
1052 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1055 unsigned int clkidx = clkspec->args[1];
1056 struct rzg2l_cpg_priv *priv = data;
1057 struct device *dev = priv->dev;
1061 switch (clkspec->args[0]) {
1064 if (clkidx > priv->last_dt_core_clk) {
1065 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1066 return ERR_PTR(-EINVAL);
1068 clk = priv->clks[clkidx];
1073 if (clkidx >= priv->num_mod_clks) {
1074 dev_err(dev, "Invalid %s clock index %u\n", type,
1076 return ERR_PTR(-EINVAL);
1078 clk = priv->clks[priv->num_core_clks + clkidx];
1082 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1083 return ERR_PTR(-EINVAL);
1087 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1090 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1091 clkspec->args[0], clkspec->args[1], clk,
1097 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1098 const struct rzg2l_cpg_info *info,
1099 struct rzg2l_cpg_priv *priv)
1101 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1102 struct device *dev = priv->dev;
1103 unsigned int id = core->id, div = core->div;
1104 const char *parent_name;
1106 WARN_DEBUG(id >= priv->num_core_clks);
1107 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1110 /* Skip NULLified clock */
1114 switch (core->type) {
1116 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1119 WARN_DEBUG(core->parent >= priv->num_core_clks);
1120 parent = priv->clks[core->parent];
1121 if (IS_ERR(parent)) {
1126 parent_name = __clk_get_name(parent);
1127 clk = clk_register_fixed_factor(NULL, core->name,
1128 parent_name, CLK_SET_RATE_PARENT,
1131 case CLK_TYPE_SAM_PLL:
1132 clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
1133 &rzg2l_cpg_pll_ops);
1135 case CLK_TYPE_G3S_PLL:
1136 clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
1137 &rzg3s_cpg_pll_ops);
1139 case CLK_TYPE_SIPLL5:
1140 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
1143 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
1146 case CLK_TYPE_G3S_DIV:
1147 clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv);
1150 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
1152 case CLK_TYPE_SD_MUX:
1153 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
1155 case CLK_TYPE_PLL5_4_MUX:
1156 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1158 case CLK_TYPE_DSI_DIV:
1159 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
1165 if (IS_ERR_OR_NULL(clk))
1168 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1169 priv->clks[id] = clk;
1173 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1174 core->name, PTR_ERR(clk));
1178 * struct mstp_clock - MSTP gating clock
1180 * @hw: handle between common and hardware-specific interfaces
1181 * @off: register offset
1183 * @enabled: soft state of the clock, if it is coupled with another clock
1184 * @priv: CPG/MSTP private data
1185 * @sibling: pointer to the other coupled clock
1192 struct rzg2l_cpg_priv *priv;
1193 struct mstp_clock *sibling;
1196 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1198 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1200 struct mstp_clock *clock = to_mod_clock(hw);
1201 struct rzg2l_cpg_priv *priv = clock->priv;
1202 unsigned int reg = clock->off;
1203 struct device *dev = priv->dev;
1204 u32 bitmask = BIT(clock->bit);
1209 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
1213 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1214 enable ? "ON" : "OFF");
1216 value = bitmask << 16;
1220 writel(value, priv->base + CLK_ON_R(reg));
1225 if (!priv->info->has_clk_mon_regs)
1228 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1229 value & bitmask, 0, 10);
1231 dev_err(dev, "Failed to enable CLK_ON %p\n",
1232 priv->base + CLK_ON_R(reg));
1237 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1239 struct mstp_clock *clock = to_mod_clock(hw);
1241 if (clock->sibling) {
1242 struct rzg2l_cpg_priv *priv = clock->priv;
1243 unsigned long flags;
1246 spin_lock_irqsave(&priv->rmw_lock, flags);
1247 enabled = clock->sibling->enabled;
1248 clock->enabled = true;
1249 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1254 return rzg2l_mod_clock_endisable(hw, true);
1257 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1259 struct mstp_clock *clock = to_mod_clock(hw);
1261 if (clock->sibling) {
1262 struct rzg2l_cpg_priv *priv = clock->priv;
1263 unsigned long flags;
1266 spin_lock_irqsave(&priv->rmw_lock, flags);
1267 enabled = clock->sibling->enabled;
1268 clock->enabled = false;
1269 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1274 rzg2l_mod_clock_endisable(hw, false);
1277 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1279 struct mstp_clock *clock = to_mod_clock(hw);
1280 struct rzg2l_cpg_priv *priv = clock->priv;
1281 u32 bitmask = BIT(clock->bit);
1285 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
1290 return clock->enabled;
1292 if (priv->info->has_clk_mon_regs)
1293 value = readl(priv->base + CLK_MON_R(clock->off));
1295 value = readl(priv->base + clock->off);
1297 return value & bitmask;
1300 static const struct clk_ops rzg2l_mod_clock_ops = {
1301 .enable = rzg2l_mod_clock_enable,
1302 .disable = rzg2l_mod_clock_disable,
1303 .is_enabled = rzg2l_mod_clock_is_enabled,
1306 static struct mstp_clock
1307 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1308 struct rzg2l_cpg_priv *priv)
1313 for (i = 0; i < priv->num_mod_clks; i++) {
1314 struct mstp_clock *clk;
1316 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1319 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1320 clk = to_mod_clock(hw);
1321 if (clock->off == clk->off && clock->bit == clk->bit)
1329 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1330 const struct rzg2l_cpg_info *info,
1331 struct rzg2l_cpg_priv *priv)
1333 struct mstp_clock *clock = NULL;
1334 struct device *dev = priv->dev;
1335 unsigned int id = mod->id;
1336 struct clk_init_data init;
1337 struct clk *parent, *clk;
1338 const char *parent_name;
1341 WARN_DEBUG(id < priv->num_core_clks);
1342 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1343 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1344 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1347 /* Skip NULLified clock */
1351 parent = priv->clks[mod->parent];
1352 if (IS_ERR(parent)) {
1357 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1359 clk = ERR_PTR(-ENOMEM);
1363 init.name = mod->name;
1364 init.ops = &rzg2l_mod_clock_ops;
1365 init.flags = CLK_SET_RATE_PARENT;
1366 for (i = 0; i < info->num_crit_mod_clks; i++)
1367 if (id == info->crit_mod_clks[i]) {
1368 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1370 init.flags |= CLK_IS_CRITICAL;
1374 parent_name = __clk_get_name(parent);
1375 init.parent_names = &parent_name;
1376 init.num_parents = 1;
1378 clock->off = mod->off;
1379 clock->bit = mod->bit;
1381 clock->hw.init = &init;
1383 clk = clk_register(NULL, &clock->hw);
1387 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1388 priv->clks[id] = clk;
1390 if (mod->is_coupled) {
1391 struct mstp_clock *sibling;
1393 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1394 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1396 clock->sibling = sibling;
1397 sibling->sibling = clock;
1404 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1405 mod->name, PTR_ERR(clk));
1408 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1410 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1413 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1414 const struct rzg2l_cpg_info *info = priv->info;
1415 unsigned int reg = info->resets[id].off;
1416 u32 mask = BIT(info->resets[id].bit);
1417 s8 monbit = info->resets[id].monbit;
1418 u32 value = mask << 16;
1420 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1422 writel(value, priv->base + CLK_RST_R(reg));
1424 if (info->has_clk_mon_regs) {
1425 reg = CLK_MRST_R(reg);
1426 } else if (monbit >= 0) {
1430 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1435 return readl_poll_timeout_atomic(priv->base + reg, value,
1436 value & mask, 10, 200);
1439 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1442 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1443 const struct rzg2l_cpg_info *info = priv->info;
1444 unsigned int reg = info->resets[id].off;
1445 u32 mask = BIT(info->resets[id].bit);
1446 s8 monbit = info->resets[id].monbit;
1447 u32 value = (mask << 16) | mask;
1449 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1452 writel(value, priv->base + CLK_RST_R(reg));
1454 if (info->has_clk_mon_regs) {
1455 reg = CLK_MRST_R(reg);
1456 } else if (monbit >= 0) {
1460 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1465 return readl_poll_timeout_atomic(priv->base + reg, value,
1466 !(value & mask), 10, 200);
1469 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1474 ret = rzg2l_cpg_assert(rcdev, id);
1478 return rzg2l_cpg_deassert(rcdev, id);
1481 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1484 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1485 const struct rzg2l_cpg_info *info = priv->info;
1486 s8 monbit = info->resets[id].monbit;
1490 if (info->has_clk_mon_regs) {
1491 reg = CLK_MRST_R(info->resets[id].off);
1492 bitmask = BIT(info->resets[id].bit);
1493 } else if (monbit >= 0) {
1495 bitmask = BIT(monbit);
1500 return !!(readl(priv->base + reg) & bitmask);
1503 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1504 .reset = rzg2l_cpg_reset,
1505 .assert = rzg2l_cpg_assert,
1506 .deassert = rzg2l_cpg_deassert,
1507 .status = rzg2l_cpg_status,
1510 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1511 const struct of_phandle_args *reset_spec)
1513 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1514 const struct rzg2l_cpg_info *info = priv->info;
1515 unsigned int id = reset_spec->args[0];
1517 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1518 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1525 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1527 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1528 priv->rcdev.of_node = priv->dev->of_node;
1529 priv->rcdev.dev = priv->dev;
1530 priv->rcdev.of_reset_n_cells = 1;
1531 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1532 priv->rcdev.nr_resets = priv->num_resets;
1534 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1537 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1538 const struct of_phandle_args *clkspec)
1540 const struct rzg2l_cpg_info *info = priv->info;
1544 if (clkspec->args_count != 2)
1547 if (clkspec->args[0] != CPG_MOD)
1550 id = clkspec->args[1] + info->num_total_core_clks;
1551 for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1552 if (info->no_pm_mod_clks[i] == id)
1560 * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1561 * @onecell_data: cell data
1562 * @domains: generic PM domains
1564 struct rzg2l_cpg_pm_domains {
1565 struct genpd_onecell_data onecell_data;
1566 struct generic_pm_domain *domains[];
1570 * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1571 * @genpd: generic PM domain
1572 * @priv: pointer to CPG private data structure
1573 * @conf: CPG PM domain configuration info
1574 * @id: RZ/G2L power domain ID
1576 struct rzg2l_cpg_pd {
1577 struct generic_pm_domain genpd;
1578 struct rzg2l_cpg_priv *priv;
1579 struct rzg2l_cpg_pm_domain_conf conf;
1583 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1585 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1586 struct rzg2l_cpg_priv *priv = pd->priv;
1587 struct device_node *np = dev->of_node;
1588 struct of_phandle_args clkspec;
1594 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1596 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1599 error = pm_clk_create(dev);
1601 of_node_put(clkspec.np);
1605 clk = of_clk_get_from_provider(&clkspec);
1606 of_node_put(clkspec.np);
1608 error = PTR_ERR(clk);
1612 error = pm_clk_add_clk(dev, clk);
1614 dev_err(dev, "pm_clk_add_clk failed %d\n",
1619 of_node_put(clkspec.np);
1630 pm_clk_destroy(dev);
1635 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1637 if (!pm_clk_no_clocks(dev))
1638 pm_clk_destroy(dev);
1641 static void rzg2l_cpg_genpd_remove(void *data)
1643 struct genpd_onecell_data *celldata = data;
1645 for (unsigned int i = 0; i < celldata->num_domains; i++)
1646 pm_genpd_remove(celldata->domains[i]);
1649 static void rzg2l_cpg_genpd_remove_simple(void *data)
1651 pm_genpd_remove(data);
1654 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
1656 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1657 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1658 struct rzg2l_cpg_priv *priv = pd->priv;
1662 writel(mstop.mask << 16, priv->base + mstop.off);
1667 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
1669 struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1670 struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1671 struct rzg2l_cpg_priv *priv = pd->priv;
1675 writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
1680 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on)
1682 struct dev_power_governor *governor;
1684 pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1685 pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
1686 pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
1688 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
1689 governor = &pm_domain_always_on_gov;
1691 pd->genpd.power_on = rzg2l_cpg_power_on;
1692 pd->genpd.power_off = rzg2l_cpg_power_off;
1693 governor = &simple_qos_governor;
1696 return pm_genpd_init(&pd->genpd, governor, !always_on);
1699 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1701 struct device *dev = priv->dev;
1702 struct device_node *np = dev->of_node;
1703 struct rzg2l_cpg_pd *pd;
1706 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1710 pd->genpd.name = np->name;
1712 ret = rzg2l_cpg_pd_setup(pd, true);
1716 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
1720 return of_genpd_add_provider_simple(np, &pd->genpd);
1723 static struct generic_pm_domain *
1724 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
1726 struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
1727 struct genpd_onecell_data *genpd = data;
1729 if (spec->args_count != 1)
1730 return ERR_PTR(-EINVAL);
1732 for (unsigned int i = 0; i < genpd->num_domains; i++) {
1733 struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
1736 if (pd->id == spec->args[0]) {
1737 domain = &pd->genpd;
1745 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
1747 const struct rzg2l_cpg_info *info = priv->info;
1748 struct device *dev = priv->dev;
1749 struct device_node *np = dev->of_node;
1750 struct rzg2l_cpg_pm_domains *domains;
1751 struct generic_pm_domain *parent;
1755 ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
1759 /* For backward compatibility. */
1761 return rzg2l_cpg_add_clk_domain(priv);
1763 domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
1768 domains->onecell_data.domains = domains->domains;
1769 domains->onecell_data.num_domains = info->num_pm_domains;
1770 domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
1772 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
1776 for (unsigned int i = 0; i < info->num_pm_domains; i++) {
1777 bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON);
1778 struct rzg2l_cpg_pd *pd;
1780 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1784 pd->genpd.name = info->pm_domains[i].name;
1785 pd->conf = info->pm_domains[i].conf;
1786 pd->id = info->pm_domains[i].id;
1789 ret = rzg2l_cpg_pd_setup(pd, always_on);
1794 ret = rzg2l_cpg_power_on(&pd->genpd);
1799 domains->domains[i] = &pd->genpd;
1800 /* Parent should be on the very first entry of info->pm_domains[]. */
1802 parent = &pd->genpd;
1806 ret = pm_genpd_add_subdomain(parent, &pd->genpd);
1811 ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
1818 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1820 struct device *dev = &pdev->dev;
1821 struct device_node *np = dev->of_node;
1822 const struct rzg2l_cpg_info *info;
1823 struct rzg2l_cpg_priv *priv;
1824 unsigned int nclks, i;
1828 info = of_device_get_match_data(dev);
1830 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1836 spin_lock_init(&priv->rmw_lock);
1838 priv->base = devm_platform_ioremap_resource(pdev, 0);
1839 if (IS_ERR(priv->base))
1840 return PTR_ERR(priv->base);
1842 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1843 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1847 dev_set_drvdata(dev, priv);
1849 priv->num_core_clks = info->num_total_core_clks;
1850 priv->num_mod_clks = info->num_hw_mod_clks;
1851 priv->num_resets = info->num_resets;
1852 priv->last_dt_core_clk = info->last_dt_core_clk;
1854 for (i = 0; i < nclks; i++)
1855 clks[i] = ERR_PTR(-ENOENT);
1857 for (i = 0; i < info->num_core_clks; i++)
1858 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1860 for (i = 0; i < info->num_mod_clks; i++)
1861 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1863 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1867 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1871 error = rzg2l_cpg_add_pm_domains(priv);
1875 error = rzg2l_cpg_reset_controller_register(priv);
1882 static const struct of_device_id rzg2l_cpg_match[] = {
1883 #ifdef CONFIG_CLK_R9A07G043
1885 .compatible = "renesas,r9a07g043-cpg",
1886 .data = &r9a07g043_cpg_info,
1889 #ifdef CONFIG_CLK_R9A07G044
1891 .compatible = "renesas,r9a07g044-cpg",
1892 .data = &r9a07g044_cpg_info,
1895 #ifdef CONFIG_CLK_R9A07G054
1897 .compatible = "renesas,r9a07g054-cpg",
1898 .data = &r9a07g054_cpg_info,
1901 #ifdef CONFIG_CLK_R9A08G045
1903 .compatible = "renesas,r9a08g045-cpg",
1904 .data = &r9a08g045_cpg_info,
1907 #ifdef CONFIG_CLK_R9A09G011
1909 .compatible = "renesas,r9a09g011-cpg",
1910 .data = &r9a09g011_cpg_info,
1916 static struct platform_driver rzg2l_cpg_driver = {
1918 .name = "rzg2l-cpg",
1919 .of_match_table = rzg2l_cpg_match,
1923 static int __init rzg2l_cpg_init(void)
1925 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1928 subsys_initcall(rzg2l_cpg_init);
1930 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");