]> Git Repo - linux.git/blob - drivers/clk/renesas/rzg2l-cpg.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / drivers / clk / renesas / rzg2l-cpg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "rzg2l-cpg.h"
35
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)   WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)   do { } while (0)
40 #endif
41
42 #define GET_SHIFT(val)          ((val >> 12) & 0xff)
43 #define GET_WIDTH(val)          ((val >> 8) & 0xf)
44
45 #define KDIV(val)               ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val)               FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val)               FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val)               FIELD_GET(GENMASK(2, 0), val)
49
50 #define RZG3S_DIV_P             GENMASK(28, 26)
51 #define RZG3S_DIV_M             GENMASK(25, 22)
52 #define RZG3S_DIV_NI            GENMASK(21, 13)
53 #define RZG3S_DIV_NF            GENMASK(12, 1)
54
55 #define CLK_ON_R(reg)           (reg)
56 #define CLK_MON_R(reg)          (0x180 + (reg))
57 #define CLK_RST_R(reg)          (reg)
58 #define CLK_MRST_R(reg)         (0x180 + (reg))
59
60 #define GET_REG_OFFSET(val)             ((val >> 20) & 0xfff)
61 #define GET_REG_SAMPLL_CLK1(val)        ((val >> 22) & 0xfff)
62 #define GET_REG_SAMPLL_CLK2(val)        ((val >> 12) & 0xfff)
63
64 #define CPG_WEN_BIT             BIT(16)
65
66 #define MAX_VCLK_FREQ           (148500000)
67
68 /**
69  * struct clk_hw_data - clock hardware data
70  * @hw: clock hw
71  * @conf: clock configuration (register offset, shift, width)
72  * @sconf: clock status configuration (register offset, shift, width)
73  * @priv: CPG private data structure
74  */
75 struct clk_hw_data {
76         struct clk_hw hw;
77         u32 conf;
78         u32 sconf;
79         struct rzg2l_cpg_priv *priv;
80 };
81
82 #define to_clk_hw_data(_hw)     container_of(_hw, struct clk_hw_data, hw)
83
84 /**
85  * struct sd_mux_hw_data - SD MUX clock hardware data
86  * @hw_data: clock hw data
87  * @mtable: clock mux table
88  */
89 struct sd_mux_hw_data {
90         struct clk_hw_data hw_data;
91         const u32 *mtable;
92 };
93
94 #define to_sd_mux_hw_data(_hw)  container_of(_hw, struct sd_mux_hw_data, hw_data)
95
96 /**
97  * struct div_hw_data - divider clock hardware data
98  * @hw_data: clock hw data
99  * @dtable: pointer to divider table
100  * @invalid_rate: invalid rate for divider
101  * @max_rate: maximum rate for divider
102  * @width: divider width
103  */
104 struct div_hw_data {
105         struct clk_hw_data hw_data;
106         const struct clk_div_table *dtable;
107         unsigned long invalid_rate;
108         unsigned long max_rate;
109         u32 width;
110 };
111
112 #define to_div_hw_data(_hw)     container_of(_hw, struct div_hw_data, hw_data)
113
114 struct rzg2l_pll5_param {
115         u32 pl5_fracin;
116         u8 pl5_refdiv;
117         u8 pl5_intin;
118         u8 pl5_postdiv1;
119         u8 pl5_postdiv2;
120         u8 pl5_spread;
121 };
122
123 struct rzg2l_pll5_mux_dsi_div_param {
124         u8 clksrc;
125         u8 dsi_div_a;
126         u8 dsi_div_b;
127 };
128
129 /**
130  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
131  *
132  * @rcdev: Reset controller entity
133  * @dev: CPG device
134  * @base: CPG register block base address
135  * @rmw_lock: protects register accesses
136  * @clks: Array containing all Core and Module Clocks
137  * @num_core_clks: Number of Core Clocks in clks[]
138  * @num_mod_clks: Number of Module Clocks in clks[]
139  * @num_resets: Number of Module Resets in info->resets[]
140  * @last_dt_core_clk: ID of the last Core Clock exported to DT
141  * @info: Pointer to platform data
142  * @mux_dsi_div_params: pll5 mux and dsi div parameters
143  */
144 struct rzg2l_cpg_priv {
145         struct reset_controller_dev rcdev;
146         struct device *dev;
147         void __iomem *base;
148         spinlock_t rmw_lock;
149
150         struct clk **clks;
151         unsigned int num_core_clks;
152         unsigned int num_mod_clks;
153         unsigned int num_resets;
154         unsigned int last_dt_core_clk;
155
156         const struct rzg2l_cpg_info *info;
157
158         struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
159 };
160
161 static void rzg2l_cpg_del_clk_provider(void *data)
162 {
163         of_clk_del_provider(data);
164 }
165
166 /* Must be called in atomic context. */
167 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
168 {
169         u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
170         u32 off = GET_REG_OFFSET(conf);
171         u32 val;
172
173         return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
174 }
175
176 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
177                                   void *data)
178 {
179         struct clk_notifier_data *cnd = data;
180         struct clk_hw *hw = __clk_get_hw(cnd->clk);
181         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
182         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
183         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
184         u32 shift = GET_SHIFT(clk_hw_data->conf);
185         const u32 clk_src_266 = 3;
186         unsigned long flags;
187         int ret;
188
189         if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
190                 return NOTIFY_DONE;
191
192         spin_lock_irqsave(&priv->rmw_lock, flags);
193
194         /*
195          * As per the HW manual, we should not directly switch from 533 MHz to
196          * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
197          * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
198          * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
199          * (400 MHz)).
200          * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
201          * switching register is prohibited.
202          * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
203          * the index to value mapping is done by adding 1 to the index.
204          */
205
206         writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
207
208         /* Wait for the update done. */
209         ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
210
211         spin_unlock_irqrestore(&priv->rmw_lock, flags);
212
213         if (ret)
214                 dev_err(priv->dev, "failed to switch to safe clk source\n");
215
216         return notifier_from_errno(ret);
217 }
218
219 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
220                                void *data)
221 {
222         struct clk_notifier_data *cnd = data;
223         struct clk_hw *hw = __clk_get_hw(cnd->clk);
224         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
225         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
226         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
227         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
228         u32 shift = GET_SHIFT(clk_hw_data->conf);
229         unsigned long flags;
230         int ret = 0;
231         u32 val;
232
233         if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
234             div_hw_data->invalid_rate % cnd->new_rate)
235                 return NOTIFY_DONE;
236
237         spin_lock_irqsave(&priv->rmw_lock, flags);
238
239         val = readl(priv->base + off);
240         val >>= shift;
241         val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
242
243         /*
244          * There are different constraints for the user of this notifiers as follows:
245          * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
246          * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
247          * As SD can have only one parent having 800MHz and OCTA div can have
248          * only one parent having 400MHz we took into account the parent rate
249          * at the beginning of function (by checking invalid_rate % new_rate).
250          * Now it is time to check the hardware divider and update it accordingly.
251          */
252         if (!val) {
253                 writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
254                 /* Wait for the update done. */
255                 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
256         }
257
258         spin_unlock_irqrestore(&priv->rmw_lock, flags);
259
260         if (ret)
261                 dev_err(priv->dev, "Failed to downgrade the div\n");
262
263         return notifier_from_errno(ret);
264 }
265
266 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
267                                    struct rzg2l_cpg_priv *priv)
268 {
269         struct notifier_block *nb;
270
271         if (!core->notifier)
272                 return 0;
273
274         nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
275         if (!nb)
276                 return -ENOMEM;
277
278         nb->notifier_call = core->notifier;
279
280         return clk_notifier_register(hw->clk, nb);
281 }
282
283 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
284                                                unsigned long parent_rate)
285 {
286         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
287         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
288         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
289         u32 val;
290
291         val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
292         val >>= GET_SHIFT(clk_hw_data->conf);
293         val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
294
295         return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
296                                    CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
297 }
298
299 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
300 {
301         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
302         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
303
304         if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
305                 req->rate = div_hw_data->max_rate;
306
307         return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
308                                       CLK_DIVIDER_ROUND_CLOSEST);
309 }
310
311 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
312                                   unsigned long parent_rate)
313 {
314         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
315         struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
316         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
317         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
318         u32 shift = GET_SHIFT(clk_hw_data->conf);
319         unsigned long flags;
320         u32 val;
321         int ret;
322
323         val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
324                               CLK_DIVIDER_ROUND_CLOSEST);
325
326         spin_lock_irqsave(&priv->rmw_lock, flags);
327         writel((CPG_WEN_BIT | val) << shift, priv->base + off);
328         /* Wait for the update done. */
329         ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
330         spin_unlock_irqrestore(&priv->rmw_lock, flags);
331
332         return ret;
333 }
334
335 static const struct clk_ops rzg3s_div_clk_ops = {
336         .recalc_rate = rzg3s_div_clk_recalc_rate,
337         .determine_rate = rzg3s_div_clk_determine_rate,
338         .set_rate = rzg3s_div_clk_set_rate,
339 };
340
341 static struct clk * __init
342 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks,
343                            void __iomem *base, struct rzg2l_cpg_priv *priv)
344 {
345         struct div_hw_data *div_hw_data;
346         struct clk_init_data init = {};
347         const struct clk_div_table *clkt;
348         struct clk_hw *clk_hw;
349         const struct clk *parent;
350         const char *parent_name;
351         u32 max = 0;
352         int ret;
353
354         parent = clks[core->parent & 0xffff];
355         if (IS_ERR(parent))
356                 return ERR_CAST(parent);
357
358         parent_name = __clk_get_name(parent);
359
360         div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
361         if (!div_hw_data)
362                 return ERR_PTR(-ENOMEM);
363
364         init.name = core->name;
365         init.flags = core->flag;
366         init.ops = &rzg3s_div_clk_ops;
367         init.parent_names = &parent_name;
368         init.num_parents = 1;
369
370         /* Get the maximum divider to retrieve div width. */
371         for (clkt = core->dtable; clkt->div; clkt++) {
372                 if (max < clkt->div)
373                         max = clkt->div;
374         }
375
376         div_hw_data->hw_data.priv = priv;
377         div_hw_data->hw_data.conf = core->conf;
378         div_hw_data->hw_data.sconf = core->sconf;
379         div_hw_data->dtable = core->dtable;
380         div_hw_data->invalid_rate = core->invalid_rate;
381         div_hw_data->max_rate = core->max_rate;
382         div_hw_data->width = fls(max) - 1;
383
384         clk_hw = &div_hw_data->hw_data.hw;
385         clk_hw->init = &init;
386
387         ret = devm_clk_hw_register(priv->dev, clk_hw);
388         if (ret)
389                 return ERR_PTR(ret);
390
391         ret = rzg2l_register_notifier(clk_hw, core, priv);
392         if (ret) {
393                 dev_err(priv->dev, "Failed to register notifier for %s\n",
394                         core->name);
395                 return ERR_PTR(ret);
396         }
397
398         return clk_hw->clk;
399 }
400
401 static struct clk * __init
402 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
403                            struct clk **clks,
404                            void __iomem *base,
405                            struct rzg2l_cpg_priv *priv)
406 {
407         struct device *dev = priv->dev;
408         const struct clk *parent;
409         const char *parent_name;
410         struct clk_hw *clk_hw;
411
412         parent = clks[core->parent & 0xffff];
413         if (IS_ERR(parent))
414                 return ERR_CAST(parent);
415
416         parent_name = __clk_get_name(parent);
417
418         if (core->dtable)
419                 clk_hw = clk_hw_register_divider_table(dev, core->name,
420                                                        parent_name, 0,
421                                                        base + GET_REG_OFFSET(core->conf),
422                                                        GET_SHIFT(core->conf),
423                                                        GET_WIDTH(core->conf),
424                                                        core->flag,
425                                                        core->dtable,
426                                                        &priv->rmw_lock);
427         else
428                 clk_hw = clk_hw_register_divider(dev, core->name,
429                                                  parent_name, 0,
430                                                  base + GET_REG_OFFSET(core->conf),
431                                                  GET_SHIFT(core->conf),
432                                                  GET_WIDTH(core->conf),
433                                                  core->flag, &priv->rmw_lock);
434
435         if (IS_ERR(clk_hw))
436                 return ERR_CAST(clk_hw);
437
438         return clk_hw->clk;
439 }
440
441 static struct clk * __init
442 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
443                            void __iomem *base,
444                            struct rzg2l_cpg_priv *priv)
445 {
446         const struct clk_hw *clk_hw;
447
448         clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
449                                           core->parent_names, core->num_parents,
450                                           core->flag,
451                                           base + GET_REG_OFFSET(core->conf),
452                                           GET_SHIFT(core->conf),
453                                           GET_WIDTH(core->conf),
454                                           core->mux_flags, &priv->rmw_lock);
455         if (IS_ERR(clk_hw))
456                 return ERR_CAST(clk_hw);
457
458         return clk_hw->clk;
459 }
460
461 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
462 {
463         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
464         struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
465         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
466         u32 off = GET_REG_OFFSET(clk_hw_data->conf);
467         u32 shift = GET_SHIFT(clk_hw_data->conf);
468         unsigned long flags;
469         u32 val;
470         int ret;
471
472         val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
473
474         spin_lock_irqsave(&priv->rmw_lock, flags);
475
476         writel((CPG_WEN_BIT | val) << shift, priv->base + off);
477
478         /* Wait for the update done. */
479         ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
480
481         spin_unlock_irqrestore(&priv->rmw_lock, flags);
482
483         if (ret)
484                 dev_err(priv->dev, "Failed to switch parent\n");
485
486         return ret;
487 }
488
489 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
490 {
491         struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
492         struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
493         struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
494         u32 val;
495
496         val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
497         val >>= GET_SHIFT(clk_hw_data->conf);
498         val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
499
500         return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
501 }
502
503 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
504         .determine_rate = __clk_mux_determine_rate_closest,
505         .set_parent     = rzg2l_cpg_sd_clk_mux_set_parent,
506         .get_parent     = rzg2l_cpg_sd_clk_mux_get_parent,
507 };
508
509 static struct clk * __init
510 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
511                               void __iomem *base,
512                               struct rzg2l_cpg_priv *priv)
513 {
514         struct sd_mux_hw_data *sd_mux_hw_data;
515         struct clk_init_data init;
516         struct clk_hw *clk_hw;
517         int ret;
518
519         sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
520         if (!sd_mux_hw_data)
521                 return ERR_PTR(-ENOMEM);
522
523         sd_mux_hw_data->hw_data.priv = priv;
524         sd_mux_hw_data->hw_data.conf = core->conf;
525         sd_mux_hw_data->hw_data.sconf = core->sconf;
526         sd_mux_hw_data->mtable = core->mtable;
527
528         init.name = core->name;
529         init.ops = &rzg2l_cpg_sd_clk_mux_ops;
530         init.flags = core->flag;
531         init.num_parents = core->num_parents;
532         init.parent_names = core->parent_names;
533
534         clk_hw = &sd_mux_hw_data->hw_data.hw;
535         clk_hw->init = &init;
536
537         ret = devm_clk_hw_register(priv->dev, clk_hw);
538         if (ret)
539                 return ERR_PTR(ret);
540
541         ret = rzg2l_register_notifier(clk_hw, core, priv);
542         if (ret) {
543                 dev_err(priv->dev, "Failed to register notifier for %s\n",
544                         core->name);
545                 return ERR_PTR(ret);
546         }
547
548         return clk_hw->clk;
549 }
550
551 static unsigned long
552 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
553                                unsigned long rate)
554 {
555         unsigned long foutpostdiv_rate;
556
557         params->pl5_intin = rate / MEGA;
558         params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
559         params->pl5_refdiv = 2;
560         params->pl5_postdiv1 = 1;
561         params->pl5_postdiv2 = 1;
562         params->pl5_spread = 0x16;
563
564         foutpostdiv_rate =
565                 EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
566                 ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
567                 (params->pl5_postdiv1 * params->pl5_postdiv2);
568
569         return foutpostdiv_rate;
570 }
571
572 struct dsi_div_hw_data {
573         struct clk_hw hw;
574         u32 conf;
575         unsigned long rate;
576         struct rzg2l_cpg_priv *priv;
577 };
578
579 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
580
581 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
582                                                    unsigned long parent_rate)
583 {
584         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
585         unsigned long rate = dsi_div->rate;
586
587         if (!rate)
588                 rate = parent_rate;
589
590         return rate;
591 }
592
593 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
594                                                     unsigned long rate)
595 {
596         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
597         struct rzg2l_cpg_priv *priv = dsi_div->priv;
598         struct rzg2l_pll5_param params;
599         unsigned long parent_rate;
600
601         parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
602
603         if (priv->mux_dsi_div_params.clksrc)
604                 parent_rate /= 2;
605
606         return parent_rate;
607 }
608
609 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
610                                             struct clk_rate_request *req)
611 {
612         if (req->rate > MAX_VCLK_FREQ)
613                 req->rate = MAX_VCLK_FREQ;
614
615         req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
616
617         return 0;
618 }
619
620 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
621                                       unsigned long rate,
622                                       unsigned long parent_rate)
623 {
624         struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
625         struct rzg2l_cpg_priv *priv = dsi_div->priv;
626
627         /*
628          * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
629          *
630          * Based on the dot clock, the DSI divider clock sets the divider value,
631          * calculates the pll parameters for generating FOUTPOSTDIV and the clk
632          * source for the MUX and propagates that info to the parents.
633          */
634
635         if (!rate || rate > MAX_VCLK_FREQ)
636                 return -EINVAL;
637
638         dsi_div->rate = rate;
639         writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
640                (priv->mux_dsi_div_params.dsi_div_a << 0) |
641                (priv->mux_dsi_div_params.dsi_div_b << 8),
642                priv->base + CPG_PL5_SDIV);
643
644         return 0;
645 }
646
647 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
648         .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
649         .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
650         .set_rate = rzg2l_cpg_dsi_div_set_rate,
651 };
652
653 static struct clk * __init
654 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
655                                struct clk **clks,
656                                struct rzg2l_cpg_priv *priv)
657 {
658         struct dsi_div_hw_data *clk_hw_data;
659         const struct clk *parent;
660         const char *parent_name;
661         struct clk_init_data init;
662         struct clk_hw *clk_hw;
663         int ret;
664
665         parent = clks[core->parent & 0xffff];
666         if (IS_ERR(parent))
667                 return ERR_CAST(parent);
668
669         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
670         if (!clk_hw_data)
671                 return ERR_PTR(-ENOMEM);
672
673         clk_hw_data->priv = priv;
674
675         parent_name = __clk_get_name(parent);
676         init.name = core->name;
677         init.ops = &rzg2l_cpg_dsi_div_ops;
678         init.flags = CLK_SET_RATE_PARENT;
679         init.parent_names = &parent_name;
680         init.num_parents = 1;
681
682         clk_hw = &clk_hw_data->hw;
683         clk_hw->init = &init;
684
685         ret = devm_clk_hw_register(priv->dev, clk_hw);
686         if (ret)
687                 return ERR_PTR(ret);
688
689         return clk_hw->clk;
690 }
691
692 struct pll5_mux_hw_data {
693         struct clk_hw hw;
694         u32 conf;
695         unsigned long rate;
696         struct rzg2l_cpg_priv *priv;
697 };
698
699 #define to_pll5_mux_hw_data(_hw)        container_of(_hw, struct pll5_mux_hw_data, hw)
700
701 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
702                                                    struct clk_rate_request *req)
703 {
704         struct clk_hw *parent;
705         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
706         struct rzg2l_cpg_priv *priv = hwdata->priv;
707
708         parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
709         req->best_parent_hw = parent;
710         req->best_parent_rate = req->rate;
711
712         return 0;
713 }
714
715 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
716 {
717         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
718         struct rzg2l_cpg_priv *priv = hwdata->priv;
719
720         /*
721          * FOUTPOSTDIV--->|
722          *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
723          *  |--FOUT1PH0-->|
724          *
725          * Based on the dot clock, the DSI divider clock calculates the parent
726          * rate and clk source for the MUX. It propagates that info to
727          * pll5_4_clk_mux which sets the clock source for DSI divider clock.
728          */
729
730         writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
731                priv->base + CPG_OTHERFUNC1_REG);
732
733         return 0;
734 }
735
736 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
737 {
738         struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
739         struct rzg2l_cpg_priv *priv = hwdata->priv;
740
741         return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
742 }
743
744 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
745         .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
746         .set_parent     = rzg2l_cpg_pll5_4_clk_mux_set_parent,
747         .get_parent     = rzg2l_cpg_pll5_4_clk_mux_get_parent,
748 };
749
750 static struct clk * __init
751 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
752                                   struct rzg2l_cpg_priv *priv)
753 {
754         struct pll5_mux_hw_data *clk_hw_data;
755         struct clk_init_data init;
756         struct clk_hw *clk_hw;
757         int ret;
758
759         clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
760         if (!clk_hw_data)
761                 return ERR_PTR(-ENOMEM);
762
763         clk_hw_data->priv = priv;
764         clk_hw_data->conf = core->conf;
765
766         init.name = core->name;
767         init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
768         init.flags = CLK_SET_RATE_PARENT;
769         init.num_parents = core->num_parents;
770         init.parent_names = core->parent_names;
771
772         clk_hw = &clk_hw_data->hw;
773         clk_hw->init = &init;
774
775         ret = devm_clk_hw_register(priv->dev, clk_hw);
776         if (ret)
777                 return ERR_PTR(ret);
778
779         return clk_hw->clk;
780 }
781
782 struct sipll5 {
783         struct clk_hw hw;
784         u32 conf;
785         unsigned long foutpostdiv_rate;
786         struct rzg2l_cpg_priv *priv;
787 };
788
789 #define to_sipll5(_hw)  container_of(_hw, struct sipll5, hw)
790
791 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
792                                              unsigned long rate)
793 {
794         struct sipll5 *sipll5 = to_sipll5(hw);
795         struct rzg2l_cpg_priv *priv = sipll5->priv;
796         unsigned long vclk;
797
798         vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
799                        (priv->mux_dsi_div_params.dsi_div_b + 1));
800
801         if (priv->mux_dsi_div_params.clksrc)
802                 vclk /= 2;
803
804         return vclk;
805 }
806
807 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
808                                                   unsigned long parent_rate)
809 {
810         struct sipll5 *sipll5 = to_sipll5(hw);
811         unsigned long pll5_rate = sipll5->foutpostdiv_rate;
812
813         if (!pll5_rate)
814                 pll5_rate = parent_rate;
815
816         return pll5_rate;
817 }
818
819 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
820                                         unsigned long rate,
821                                         unsigned long *parent_rate)
822 {
823         return rate;
824 }
825
826 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
827                                      unsigned long rate,
828                                      unsigned long parent_rate)
829 {
830         struct sipll5 *sipll5 = to_sipll5(hw);
831         struct rzg2l_cpg_priv *priv = sipll5->priv;
832         struct rzg2l_pll5_param params;
833         unsigned long vclk_rate;
834         int ret;
835         u32 val;
836
837         /*
838          *  OSC --> PLL5 --> FOUTPOSTDIV-->|
839          *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
840          *                   |--FOUT1PH0-->|
841          *
842          * Based on the dot clock, the DSI divider clock calculates the parent
843          * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
844          * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
845          *
846          * OSC --> PLL5 --> FOUTPOSTDIV
847          */
848
849         if (!rate)
850                 return -EINVAL;
851
852         vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
853         sipll5->foutpostdiv_rate =
854                 rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
855
856         /* Put PLL5 into standby mode */
857         writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
858         ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
859                                  !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
860         if (ret) {
861                 dev_err(priv->dev, "failed to release pll5 lock");
862                 return ret;
863         }
864
865         /* Output clock setting 1 */
866         writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
867                (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
868
869         /* Output clock setting, SSCG modulation value setting 3 */
870         writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
871
872         /* Output clock setting 4 */
873         writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
874                priv->base + CPG_SIPLL5_CLK4);
875
876         /* Output clock setting 5 */
877         writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
878
879         /* PLL normal mode setting */
880         writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
881                CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
882                priv->base + CPG_SIPLL5_STBY);
883
884         /* PLL normal mode transition, output clock stability check */
885         ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
886                                  (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
887         if (ret) {
888                 dev_err(priv->dev, "failed to lock pll5");
889                 return ret;
890         }
891
892         return 0;
893 }
894
895 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
896         .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
897         .round_rate = rzg2l_cpg_sipll5_round_rate,
898         .set_rate = rzg2l_cpg_sipll5_set_rate,
899 };
900
901 static struct clk * __init
902 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
903                           struct clk **clks,
904                           struct rzg2l_cpg_priv *priv)
905 {
906         const struct clk *parent;
907         struct clk_init_data init;
908         const char *parent_name;
909         struct sipll5 *sipll5;
910         struct clk_hw *clk_hw;
911         int ret;
912
913         parent = clks[core->parent & 0xffff];
914         if (IS_ERR(parent))
915                 return ERR_CAST(parent);
916
917         sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
918         if (!sipll5)
919                 return ERR_PTR(-ENOMEM);
920
921         init.name = core->name;
922         parent_name = __clk_get_name(parent);
923         init.ops = &rzg2l_cpg_sipll5_ops;
924         init.flags = 0;
925         init.parent_names = &parent_name;
926         init.num_parents = 1;
927
928         sipll5->hw.init = &init;
929         sipll5->conf = core->conf;
930         sipll5->priv = priv;
931
932         writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
933                CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
934
935         clk_hw = &sipll5->hw;
936         clk_hw->init = &init;
937
938         ret = devm_clk_hw_register(priv->dev, clk_hw);
939         if (ret)
940                 return ERR_PTR(ret);
941
942         priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
943         priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
944         priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
945
946         return clk_hw->clk;
947 }
948
949 struct pll_clk {
950         struct clk_hw hw;
951         unsigned int conf;
952         unsigned int type;
953         void __iomem *base;
954         struct rzg2l_cpg_priv *priv;
955 };
956
957 #define to_pll(_hw)     container_of(_hw, struct pll_clk, hw)
958
959 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
960                                                    unsigned long parent_rate)
961 {
962         struct pll_clk *pll_clk = to_pll(hw);
963         struct rzg2l_cpg_priv *priv = pll_clk->priv;
964         unsigned int val1, val2;
965         u64 rate;
966
967         if (pll_clk->type != CLK_TYPE_SAM_PLL)
968                 return parent_rate;
969
970         val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
971         val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
972
973         rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
974                                16 + SDIV(val2));
975
976         return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
977 }
978
979 static const struct clk_ops rzg2l_cpg_pll_ops = {
980         .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
981 };
982
983 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
984                                                    unsigned long parent_rate)
985 {
986         struct pll_clk *pll_clk = to_pll(hw);
987         struct rzg2l_cpg_priv *priv = pll_clk->priv;
988         u32 nir, nfr, mr, pr, val;
989         u64 rate;
990
991         if (pll_clk->type != CLK_TYPE_G3S_PLL)
992                 return parent_rate;
993
994         val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
995
996         pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
997         /* Hardware interprets values higher than 8 as p = 16. */
998         if (pr > 8)
999                 pr = 16;
1000
1001         mr  = FIELD_GET(RZG3S_DIV_M, val) + 1;
1002         nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1003         nfr = FIELD_GET(RZG3S_DIV_NF, val);
1004
1005         rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1006
1007         return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1008 }
1009
1010 static const struct clk_ops rzg3s_cpg_pll_ops = {
1011         .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1012 };
1013
1014 static struct clk * __init
1015 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1016                            struct clk **clks,
1017                            void __iomem *base,
1018                            struct rzg2l_cpg_priv *priv,
1019                            const struct clk_ops *ops)
1020 {
1021         struct device *dev = priv->dev;
1022         const struct clk *parent;
1023         struct clk_init_data init;
1024         const char *parent_name;
1025         struct pll_clk *pll_clk;
1026
1027         parent = clks[core->parent & 0xffff];
1028         if (IS_ERR(parent))
1029                 return ERR_CAST(parent);
1030
1031         pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1032         if (!pll_clk)
1033                 return ERR_PTR(-ENOMEM);
1034
1035         parent_name = __clk_get_name(parent);
1036         init.name = core->name;
1037         init.ops = ops;
1038         init.flags = 0;
1039         init.parent_names = &parent_name;
1040         init.num_parents = 1;
1041
1042         pll_clk->hw.init = &init;
1043         pll_clk->conf = core->conf;
1044         pll_clk->base = base;
1045         pll_clk->priv = priv;
1046         pll_clk->type = core->type;
1047
1048         return clk_register(NULL, &pll_clk->hw);
1049 }
1050
1051 static struct clk
1052 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1053                                void *data)
1054 {
1055         unsigned int clkidx = clkspec->args[1];
1056         struct rzg2l_cpg_priv *priv = data;
1057         struct device *dev = priv->dev;
1058         const char *type;
1059         struct clk *clk;
1060
1061         switch (clkspec->args[0]) {
1062         case CPG_CORE:
1063                 type = "core";
1064                 if (clkidx > priv->last_dt_core_clk) {
1065                         dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1066                         return ERR_PTR(-EINVAL);
1067                 }
1068                 clk = priv->clks[clkidx];
1069                 break;
1070
1071         case CPG_MOD:
1072                 type = "module";
1073                 if (clkidx >= priv->num_mod_clks) {
1074                         dev_err(dev, "Invalid %s clock index %u\n", type,
1075                                 clkidx);
1076                         return ERR_PTR(-EINVAL);
1077                 }
1078                 clk = priv->clks[priv->num_core_clks + clkidx];
1079                 break;
1080
1081         default:
1082                 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1083                 return ERR_PTR(-EINVAL);
1084         }
1085
1086         if (IS_ERR(clk))
1087                 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1088                         PTR_ERR(clk));
1089         else
1090                 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1091                         clkspec->args[0], clkspec->args[1], clk,
1092                         clk_get_rate(clk));
1093         return clk;
1094 }
1095
1096 static void __init
1097 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1098                             const struct rzg2l_cpg_info *info,
1099                             struct rzg2l_cpg_priv *priv)
1100 {
1101         struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1102         struct device *dev = priv->dev;
1103         unsigned int id = core->id, div = core->div;
1104         const char *parent_name;
1105
1106         WARN_DEBUG(id >= priv->num_core_clks);
1107         WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1108
1109         if (!core->name) {
1110                 /* Skip NULLified clock */
1111                 return;
1112         }
1113
1114         switch (core->type) {
1115         case CLK_TYPE_IN:
1116                 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1117                 break;
1118         case CLK_TYPE_FF:
1119                 WARN_DEBUG(core->parent >= priv->num_core_clks);
1120                 parent = priv->clks[core->parent];
1121                 if (IS_ERR(parent)) {
1122                         clk = parent;
1123                         goto fail;
1124                 }
1125
1126                 parent_name = __clk_get_name(parent);
1127                 clk = clk_register_fixed_factor(NULL, core->name,
1128                                                 parent_name, CLK_SET_RATE_PARENT,
1129                                                 core->mult, div);
1130                 break;
1131         case CLK_TYPE_SAM_PLL:
1132                 clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
1133                                                  &rzg2l_cpg_pll_ops);
1134                 break;
1135         case CLK_TYPE_G3S_PLL:
1136                 clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv,
1137                                                  &rzg3s_cpg_pll_ops);
1138                 break;
1139         case CLK_TYPE_SIPLL5:
1140                 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
1141                 break;
1142         case CLK_TYPE_DIV:
1143                 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
1144                                                  priv->base, priv);
1145                 break;
1146         case CLK_TYPE_G3S_DIV:
1147                 clk = rzg3s_cpg_div_clk_register(core, priv->clks, priv->base, priv);
1148                 break;
1149         case CLK_TYPE_MUX:
1150                 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
1151                 break;
1152         case CLK_TYPE_SD_MUX:
1153                 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
1154                 break;
1155         case CLK_TYPE_PLL5_4_MUX:
1156                 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1157                 break;
1158         case CLK_TYPE_DSI_DIV:
1159                 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
1160                 break;
1161         default:
1162                 goto fail;
1163         }
1164
1165         if (IS_ERR_OR_NULL(clk))
1166                 goto fail;
1167
1168         dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1169         priv->clks[id] = clk;
1170         return;
1171
1172 fail:
1173         dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1174                 core->name, PTR_ERR(clk));
1175 }
1176
1177 /**
1178  * struct mstp_clock - MSTP gating clock
1179  *
1180  * @hw: handle between common and hardware-specific interfaces
1181  * @off: register offset
1182  * @bit: ON/MON bit
1183  * @enabled: soft state of the clock, if it is coupled with another clock
1184  * @priv: CPG/MSTP private data
1185  * @sibling: pointer to the other coupled clock
1186  */
1187 struct mstp_clock {
1188         struct clk_hw hw;
1189         u16 off;
1190         u8 bit;
1191         bool enabled;
1192         struct rzg2l_cpg_priv *priv;
1193         struct mstp_clock *sibling;
1194 };
1195
1196 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1197
1198 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1199 {
1200         struct mstp_clock *clock = to_mod_clock(hw);
1201         struct rzg2l_cpg_priv *priv = clock->priv;
1202         unsigned int reg = clock->off;
1203         struct device *dev = priv->dev;
1204         u32 bitmask = BIT(clock->bit);
1205         u32 value;
1206         int error;
1207
1208         if (!clock->off) {
1209                 dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
1210                 return 0;
1211         }
1212
1213         dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1214                 enable ? "ON" : "OFF");
1215
1216         value = bitmask << 16;
1217         if (enable)
1218                 value |= bitmask;
1219
1220         writel(value, priv->base + CLK_ON_R(reg));
1221
1222         if (!enable)
1223                 return 0;
1224
1225         if (!priv->info->has_clk_mon_regs)
1226                 return 0;
1227
1228         error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1229                                           value & bitmask, 0, 10);
1230         if (error)
1231                 dev_err(dev, "Failed to enable CLK_ON %p\n",
1232                         priv->base + CLK_ON_R(reg));
1233
1234         return error;
1235 }
1236
1237 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1238 {
1239         struct mstp_clock *clock = to_mod_clock(hw);
1240
1241         if (clock->sibling) {
1242                 struct rzg2l_cpg_priv *priv = clock->priv;
1243                 unsigned long flags;
1244                 bool enabled;
1245
1246                 spin_lock_irqsave(&priv->rmw_lock, flags);
1247                 enabled = clock->sibling->enabled;
1248                 clock->enabled = true;
1249                 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1250                 if (enabled)
1251                         return 0;
1252         }
1253
1254         return rzg2l_mod_clock_endisable(hw, true);
1255 }
1256
1257 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1258 {
1259         struct mstp_clock *clock = to_mod_clock(hw);
1260
1261         if (clock->sibling) {
1262                 struct rzg2l_cpg_priv *priv = clock->priv;
1263                 unsigned long flags;
1264                 bool enabled;
1265
1266                 spin_lock_irqsave(&priv->rmw_lock, flags);
1267                 enabled = clock->sibling->enabled;
1268                 clock->enabled = false;
1269                 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1270                 if (enabled)
1271                         return;
1272         }
1273
1274         rzg2l_mod_clock_endisable(hw, false);
1275 }
1276
1277 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1278 {
1279         struct mstp_clock *clock = to_mod_clock(hw);
1280         struct rzg2l_cpg_priv *priv = clock->priv;
1281         u32 bitmask = BIT(clock->bit);
1282         u32 value;
1283
1284         if (!clock->off) {
1285                 dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1286                 return 1;
1287         }
1288
1289         if (clock->sibling)
1290                 return clock->enabled;
1291
1292         if (priv->info->has_clk_mon_regs)
1293                 value = readl(priv->base + CLK_MON_R(clock->off));
1294         else
1295                 value = readl(priv->base + clock->off);
1296
1297         return value & bitmask;
1298 }
1299
1300 static const struct clk_ops rzg2l_mod_clock_ops = {
1301         .enable = rzg2l_mod_clock_enable,
1302         .disable = rzg2l_mod_clock_disable,
1303         .is_enabled = rzg2l_mod_clock_is_enabled,
1304 };
1305
1306 static struct mstp_clock
1307 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1308                              struct rzg2l_cpg_priv *priv)
1309 {
1310         struct clk_hw *hw;
1311         unsigned int i;
1312
1313         for (i = 0; i < priv->num_mod_clks; i++) {
1314                 struct mstp_clock *clk;
1315
1316                 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1317                         continue;
1318
1319                 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1320                 clk = to_mod_clock(hw);
1321                 if (clock->off == clk->off && clock->bit == clk->bit)
1322                         return clk;
1323         }
1324
1325         return NULL;
1326 }
1327
1328 static void __init
1329 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1330                            const struct rzg2l_cpg_info *info,
1331                            struct rzg2l_cpg_priv *priv)
1332 {
1333         struct mstp_clock *clock = NULL;
1334         struct device *dev = priv->dev;
1335         unsigned int id = mod->id;
1336         struct clk_init_data init;
1337         struct clk *parent, *clk;
1338         const char *parent_name;
1339         unsigned int i;
1340
1341         WARN_DEBUG(id < priv->num_core_clks);
1342         WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1343         WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1344         WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1345
1346         if (!mod->name) {
1347                 /* Skip NULLified clock */
1348                 return;
1349         }
1350
1351         parent = priv->clks[mod->parent];
1352         if (IS_ERR(parent)) {
1353                 clk = parent;
1354                 goto fail;
1355         }
1356
1357         clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1358         if (!clock) {
1359                 clk = ERR_PTR(-ENOMEM);
1360                 goto fail;
1361         }
1362
1363         init.name = mod->name;
1364         init.ops = &rzg2l_mod_clock_ops;
1365         init.flags = CLK_SET_RATE_PARENT;
1366         for (i = 0; i < info->num_crit_mod_clks; i++)
1367                 if (id == info->crit_mod_clks[i]) {
1368                         dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1369                                 mod->name);
1370                         init.flags |= CLK_IS_CRITICAL;
1371                         break;
1372                 }
1373
1374         parent_name = __clk_get_name(parent);
1375         init.parent_names = &parent_name;
1376         init.num_parents = 1;
1377
1378         clock->off = mod->off;
1379         clock->bit = mod->bit;
1380         clock->priv = priv;
1381         clock->hw.init = &init;
1382
1383         clk = clk_register(NULL, &clock->hw);
1384         if (IS_ERR(clk))
1385                 goto fail;
1386
1387         dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1388         priv->clks[id] = clk;
1389
1390         if (mod->is_coupled) {
1391                 struct mstp_clock *sibling;
1392
1393                 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1394                 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1395                 if (sibling) {
1396                         clock->sibling = sibling;
1397                         sibling->sibling = clock;
1398                 }
1399         }
1400
1401         return;
1402
1403 fail:
1404         dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1405                 mod->name, PTR_ERR(clk));
1406 }
1407
1408 #define rcdev_to_priv(x)        container_of(x, struct rzg2l_cpg_priv, rcdev)
1409
1410 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1411                             unsigned long id)
1412 {
1413         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1414         const struct rzg2l_cpg_info *info = priv->info;
1415         unsigned int reg = info->resets[id].off;
1416         u32 mask = BIT(info->resets[id].bit);
1417         s8 monbit = info->resets[id].monbit;
1418         u32 value = mask << 16;
1419
1420         dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1421
1422         writel(value, priv->base + CLK_RST_R(reg));
1423
1424         if (info->has_clk_mon_regs) {
1425                 reg = CLK_MRST_R(reg);
1426         } else if (monbit >= 0) {
1427                 reg = CPG_RST_MON;
1428                 mask = BIT(monbit);
1429         } else {
1430                 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1431                 udelay(35);
1432                 return 0;
1433         }
1434
1435         return readl_poll_timeout_atomic(priv->base + reg, value,
1436                                          value & mask, 10, 200);
1437 }
1438
1439 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1440                               unsigned long id)
1441 {
1442         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1443         const struct rzg2l_cpg_info *info = priv->info;
1444         unsigned int reg = info->resets[id].off;
1445         u32 mask = BIT(info->resets[id].bit);
1446         s8 monbit = info->resets[id].monbit;
1447         u32 value = (mask << 16) | mask;
1448
1449         dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1450                 CLK_RST_R(reg));
1451
1452         writel(value, priv->base + CLK_RST_R(reg));
1453
1454         if (info->has_clk_mon_regs) {
1455                 reg = CLK_MRST_R(reg);
1456         } else if (monbit >= 0) {
1457                 reg = CPG_RST_MON;
1458                 mask = BIT(monbit);
1459         } else {
1460                 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1461                 udelay(35);
1462                 return 0;
1463         }
1464
1465         return readl_poll_timeout_atomic(priv->base + reg, value,
1466                                          !(value & mask), 10, 200);
1467 }
1468
1469 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1470                            unsigned long id)
1471 {
1472         int ret;
1473
1474         ret = rzg2l_cpg_assert(rcdev, id);
1475         if (ret)
1476                 return ret;
1477
1478         return rzg2l_cpg_deassert(rcdev, id);
1479 }
1480
1481 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1482                             unsigned long id)
1483 {
1484         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1485         const struct rzg2l_cpg_info *info = priv->info;
1486         s8 monbit = info->resets[id].monbit;
1487         unsigned int reg;
1488         u32 bitmask;
1489
1490         if (info->has_clk_mon_regs) {
1491                 reg = CLK_MRST_R(info->resets[id].off);
1492                 bitmask = BIT(info->resets[id].bit);
1493         } else if (monbit >= 0) {
1494                 reg = CPG_RST_MON;
1495                 bitmask = BIT(monbit);
1496         } else {
1497                 return -ENOTSUPP;
1498         }
1499
1500         return !!(readl(priv->base + reg) & bitmask);
1501 }
1502
1503 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1504         .reset = rzg2l_cpg_reset,
1505         .assert = rzg2l_cpg_assert,
1506         .deassert = rzg2l_cpg_deassert,
1507         .status = rzg2l_cpg_status,
1508 };
1509
1510 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1511                                  const struct of_phandle_args *reset_spec)
1512 {
1513         struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1514         const struct rzg2l_cpg_info *info = priv->info;
1515         unsigned int id = reset_spec->args[0];
1516
1517         if (id >= rcdev->nr_resets || !info->resets[id].off) {
1518                 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1519                 return -EINVAL;
1520         }
1521
1522         return id;
1523 }
1524
1525 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1526 {
1527         priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1528         priv->rcdev.of_node = priv->dev->of_node;
1529         priv->rcdev.dev = priv->dev;
1530         priv->rcdev.of_reset_n_cells = 1;
1531         priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1532         priv->rcdev.nr_resets = priv->num_resets;
1533
1534         return devm_reset_controller_register(priv->dev, &priv->rcdev);
1535 }
1536
1537 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1538                                 const struct of_phandle_args *clkspec)
1539 {
1540         const struct rzg2l_cpg_info *info = priv->info;
1541         unsigned int id;
1542         unsigned int i;
1543
1544         if (clkspec->args_count != 2)
1545                 return false;
1546
1547         if (clkspec->args[0] != CPG_MOD)
1548                 return false;
1549
1550         id = clkspec->args[1] + info->num_total_core_clks;
1551         for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1552                 if (info->no_pm_mod_clks[i] == id)
1553                         return false;
1554         }
1555
1556         return true;
1557 }
1558
1559 /**
1560  * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1561  * @onecell_data: cell data
1562  * @domains: generic PM domains
1563  */
1564 struct rzg2l_cpg_pm_domains {
1565         struct genpd_onecell_data onecell_data;
1566         struct generic_pm_domain *domains[];
1567 };
1568
1569 /**
1570  * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1571  * @genpd: generic PM domain
1572  * @priv: pointer to CPG private data structure
1573  * @conf: CPG PM domain configuration info
1574  * @id: RZ/G2L power domain ID
1575  */
1576 struct rzg2l_cpg_pd {
1577         struct generic_pm_domain genpd;
1578         struct rzg2l_cpg_priv *priv;
1579         struct rzg2l_cpg_pm_domain_conf conf;
1580         u16 id;
1581 };
1582
1583 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1584 {
1585         struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1586         struct rzg2l_cpg_priv *priv = pd->priv;
1587         struct device_node *np = dev->of_node;
1588         struct of_phandle_args clkspec;
1589         bool once = true;
1590         struct clk *clk;
1591         int error;
1592         int i = 0;
1593
1594         while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1595                                            &clkspec)) {
1596                 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1597                         if (once) {
1598                                 once = false;
1599                                 error = pm_clk_create(dev);
1600                                 if (error) {
1601                                         of_node_put(clkspec.np);
1602                                         goto err;
1603                                 }
1604                         }
1605                         clk = of_clk_get_from_provider(&clkspec);
1606                         of_node_put(clkspec.np);
1607                         if (IS_ERR(clk)) {
1608                                 error = PTR_ERR(clk);
1609                                 goto fail_destroy;
1610                         }
1611
1612                         error = pm_clk_add_clk(dev, clk);
1613                         if (error) {
1614                                 dev_err(dev, "pm_clk_add_clk failed %d\n",
1615                                         error);
1616                                 goto fail_put;
1617                         }
1618                 } else {
1619                         of_node_put(clkspec.np);
1620                 }
1621                 i++;
1622         }
1623
1624         return 0;
1625
1626 fail_put:
1627         clk_put(clk);
1628
1629 fail_destroy:
1630         pm_clk_destroy(dev);
1631 err:
1632         return error;
1633 }
1634
1635 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1636 {
1637         if (!pm_clk_no_clocks(dev))
1638                 pm_clk_destroy(dev);
1639 }
1640
1641 static void rzg2l_cpg_genpd_remove(void *data)
1642 {
1643         struct genpd_onecell_data *celldata = data;
1644
1645         for (unsigned int i = 0; i < celldata->num_domains; i++)
1646                 pm_genpd_remove(celldata->domains[i]);
1647 }
1648
1649 static void rzg2l_cpg_genpd_remove_simple(void *data)
1650 {
1651         pm_genpd_remove(data);
1652 }
1653
1654 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
1655 {
1656         struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1657         struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1658         struct rzg2l_cpg_priv *priv = pd->priv;
1659
1660         /* Set MSTOP. */
1661         if (mstop.mask)
1662                 writel(mstop.mask << 16, priv->base + mstop.off);
1663
1664         return 0;
1665 }
1666
1667 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
1668 {
1669         struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1670         struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1671         struct rzg2l_cpg_priv *priv = pd->priv;
1672
1673         /* Set MSTOP. */
1674         if (mstop.mask)
1675                 writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
1676
1677         return 0;
1678 }
1679
1680 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on)
1681 {
1682         struct dev_power_governor *governor;
1683
1684         pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1685         pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
1686         pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
1687         if (always_on) {
1688                 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
1689                 governor = &pm_domain_always_on_gov;
1690         } else {
1691                 pd->genpd.power_on = rzg2l_cpg_power_on;
1692                 pd->genpd.power_off = rzg2l_cpg_power_off;
1693                 governor = &simple_qos_governor;
1694         }
1695
1696         return pm_genpd_init(&pd->genpd, governor, !always_on);
1697 }
1698
1699 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1700 {
1701         struct device *dev = priv->dev;
1702         struct device_node *np = dev->of_node;
1703         struct rzg2l_cpg_pd *pd;
1704         int ret;
1705
1706         pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1707         if (!pd)
1708                 return -ENOMEM;
1709
1710         pd->genpd.name = np->name;
1711         pd->priv = priv;
1712         ret = rzg2l_cpg_pd_setup(pd, true);
1713         if (ret)
1714                 return ret;
1715
1716         ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
1717         if (ret)
1718                 return ret;
1719
1720         return of_genpd_add_provider_simple(np, &pd->genpd);
1721 }
1722
1723 static struct generic_pm_domain *
1724 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
1725 {
1726         struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
1727         struct genpd_onecell_data *genpd = data;
1728
1729         if (spec->args_count != 1)
1730                 return ERR_PTR(-EINVAL);
1731
1732         for (unsigned int i = 0; i < genpd->num_domains; i++) {
1733                 struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
1734                                                        genpd);
1735
1736                 if (pd->id == spec->args[0]) {
1737                         domain = &pd->genpd;
1738                         break;
1739                 }
1740         }
1741
1742         return domain;
1743 }
1744
1745 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
1746 {
1747         const struct rzg2l_cpg_info *info = priv->info;
1748         struct device *dev = priv->dev;
1749         struct device_node *np = dev->of_node;
1750         struct rzg2l_cpg_pm_domains *domains;
1751         struct generic_pm_domain *parent;
1752         u32 ncells;
1753         int ret;
1754
1755         ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
1756         if (ret)
1757                 return ret;
1758
1759         /* For backward compatibility. */
1760         if (!ncells)
1761                 return rzg2l_cpg_add_clk_domain(priv);
1762
1763         domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
1764                                GFP_KERNEL);
1765         if (!domains)
1766                 return -ENOMEM;
1767
1768         domains->onecell_data.domains = domains->domains;
1769         domains->onecell_data.num_domains = info->num_pm_domains;
1770         domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
1771
1772         ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
1773         if (ret)
1774                 return ret;
1775
1776         for (unsigned int i = 0; i < info->num_pm_domains; i++) {
1777                 bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON);
1778                 struct rzg2l_cpg_pd *pd;
1779
1780                 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1781                 if (!pd)
1782                         return -ENOMEM;
1783
1784                 pd->genpd.name = info->pm_domains[i].name;
1785                 pd->conf = info->pm_domains[i].conf;
1786                 pd->id = info->pm_domains[i].id;
1787                 pd->priv = priv;
1788
1789                 ret = rzg2l_cpg_pd_setup(pd, always_on);
1790                 if (ret)
1791                         return ret;
1792
1793                 if (always_on) {
1794                         ret = rzg2l_cpg_power_on(&pd->genpd);
1795                         if (ret)
1796                                 return ret;
1797                 }
1798
1799                 domains->domains[i] = &pd->genpd;
1800                 /* Parent should be on the very first entry of info->pm_domains[]. */
1801                 if (!i) {
1802                         parent = &pd->genpd;
1803                         continue;
1804                 }
1805
1806                 ret = pm_genpd_add_subdomain(parent, &pd->genpd);
1807                 if (ret)
1808                         return ret;
1809         }
1810
1811         ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
1812         if (ret)
1813                 return ret;
1814
1815         return 0;
1816 }
1817
1818 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1819 {
1820         struct device *dev = &pdev->dev;
1821         struct device_node *np = dev->of_node;
1822         const struct rzg2l_cpg_info *info;
1823         struct rzg2l_cpg_priv *priv;
1824         unsigned int nclks, i;
1825         struct clk **clks;
1826         int error;
1827
1828         info = of_device_get_match_data(dev);
1829
1830         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1831         if (!priv)
1832                 return -ENOMEM;
1833
1834         priv->dev = dev;
1835         priv->info = info;
1836         spin_lock_init(&priv->rmw_lock);
1837
1838         priv->base = devm_platform_ioremap_resource(pdev, 0);
1839         if (IS_ERR(priv->base))
1840                 return PTR_ERR(priv->base);
1841
1842         nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1843         clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1844         if (!clks)
1845                 return -ENOMEM;
1846
1847         dev_set_drvdata(dev, priv);
1848         priv->clks = clks;
1849         priv->num_core_clks = info->num_total_core_clks;
1850         priv->num_mod_clks = info->num_hw_mod_clks;
1851         priv->num_resets = info->num_resets;
1852         priv->last_dt_core_clk = info->last_dt_core_clk;
1853
1854         for (i = 0; i < nclks; i++)
1855                 clks[i] = ERR_PTR(-ENOENT);
1856
1857         for (i = 0; i < info->num_core_clks; i++)
1858                 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1859
1860         for (i = 0; i < info->num_mod_clks; i++)
1861                 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1862
1863         error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1864         if (error)
1865                 return error;
1866
1867         error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1868         if (error)
1869                 return error;
1870
1871         error = rzg2l_cpg_add_pm_domains(priv);
1872         if (error)
1873                 return error;
1874
1875         error = rzg2l_cpg_reset_controller_register(priv);
1876         if (error)
1877                 return error;
1878
1879         return 0;
1880 }
1881
1882 static const struct of_device_id rzg2l_cpg_match[] = {
1883 #ifdef CONFIG_CLK_R9A07G043
1884         {
1885                 .compatible = "renesas,r9a07g043-cpg",
1886                 .data = &r9a07g043_cpg_info,
1887         },
1888 #endif
1889 #ifdef CONFIG_CLK_R9A07G044
1890         {
1891                 .compatible = "renesas,r9a07g044-cpg",
1892                 .data = &r9a07g044_cpg_info,
1893         },
1894 #endif
1895 #ifdef CONFIG_CLK_R9A07G054
1896         {
1897                 .compatible = "renesas,r9a07g054-cpg",
1898                 .data = &r9a07g054_cpg_info,
1899         },
1900 #endif
1901 #ifdef CONFIG_CLK_R9A08G045
1902         {
1903                 .compatible = "renesas,r9a08g045-cpg",
1904                 .data = &r9a08g045_cpg_info,
1905         },
1906 #endif
1907 #ifdef CONFIG_CLK_R9A09G011
1908         {
1909                 .compatible = "renesas,r9a09g011-cpg",
1910                 .data = &r9a09g011_cpg_info,
1911         },
1912 #endif
1913         { /* sentinel */ }
1914 };
1915
1916 static struct platform_driver rzg2l_cpg_driver = {
1917         .driver         = {
1918                 .name   = "rzg2l-cpg",
1919                 .of_match_table = rzg2l_cpg_match,
1920         },
1921 };
1922
1923 static int __init rzg2l_cpg_init(void)
1924 {
1925         return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1926 }
1927
1928 subsys_initcall(rzg2l_cpg_init);
1929
1930 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
This page took 0.142843 seconds and 4 git commands to generate.