]> Git Repo - linux.git/blob - drivers/clk/renesas/rcar-gen4-cpg.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / drivers / clk / renesas / rcar-gen4-cpg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * R-Car Gen4 Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on rcar-gen3-cpg.c
8  *
9  * Copyright (C) 2015-2018 Glider bvba
10  * Copyright (C) 2019 Renesas Electronics Corp.
11  */
12
13 #include <linux/bitfield.h>
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/slab.h>
22
23 #include "renesas-cpg-mssr.h"
24 #include "rcar-gen4-cpg.h"
25 #include "rcar-cpg-lib.h"
26
27 static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initdata;
28 static unsigned int cpg_clk_extalr __initdata;
29 static u32 cpg_mode __initdata;
30
31 #define CPG_PLLECR              0x0820  /* PLL Enable Control Register */
32
33 #define CPG_PLLECR_PLLST(n)     BIT(8 + ((n) < 3 ? (n) - 1 : \
34                                          (n) > 3 ? (n) + 1 : n)) /* PLLn Circuit Status */
35
36 #define CPG_PLL1CR0             0x830   /* PLLn Control Registers */
37 #define CPG_PLL1CR1             0x8b0
38 #define CPG_PLL2CR0             0x834
39 #define CPG_PLL2CR1             0x8b8
40 #define CPG_PLL3CR0             0x83c
41 #define CPG_PLL3CR1             0x8c0
42 #define CPG_PLL4CR0             0x844
43 #define CPG_PLL4CR1             0x8c8
44 #define CPG_PLL6CR0             0x84c
45 #define CPG_PLL6CR1             0x8d8
46
47 #define CPG_PLLxCR0_KICK        BIT(31)
48 #define CPG_PLLxCR0_NI          GENMASK(27, 20) /* Integer mult. factor */
49 #define CPG_PLLxCR0_SSMODE      GENMASK(18, 16) /* PLL mode */
50 #define CPG_PLLxCR0_SSMODE_FM   BIT(18) /* Fractional Multiplication */
51 #define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */
52 #define CPG_PLLxCR0_SSMODE_CENT BIT(16) /* Center (vs. Down) Spread Dithering */
53 #define CPG_PLLxCR0_SSFREQ      GENMASK(14, 8)  /* SSCG Modulation Frequency */
54 #define CPG_PLLxCR0_SSDEPT      GENMASK(6, 0)   /* SSCG Modulation Depth */
55
56 #define SSMODE_FM               BIT(2)  /* Fractional Multiplication */
57 #define SSMODE_DITHER           BIT(1)  /* Frequency Dithering */
58 #define SSMODE_CENTER           BIT(0)  /* Center (vs. Down) Spread Dithering */
59
60 /* PLL Clocks */
61 struct cpg_pll_clk {
62         struct clk_hw hw;
63         void __iomem *pllcr0_reg;
64         void __iomem *pllecr_reg;
65         u32 pllecr_pllst_mask;
66 };
67
68 #define to_pll_clk(_hw)   container_of(_hw, struct cpg_pll_clk, hw)
69
70 static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
71                                              unsigned long parent_rate)
72 {
73         struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
74         unsigned int mult;
75
76         mult = FIELD_GET(CPG_PLLxCR0_NI, readl(pll_clk->pllcr0_reg)) + 1;
77
78         return parent_rate * mult * 2;
79 }
80
81 static int cpg_pll_clk_determine_rate(struct clk_hw *hw,
82                                       struct clk_rate_request *req)
83 {
84         unsigned int min_mult, max_mult, mult;
85         unsigned long prate;
86
87         prate = req->best_parent_rate * 2;
88         min_mult = max(div64_ul(req->min_rate, prate), 1ULL);
89         max_mult = min(div64_ul(req->max_rate, prate), 256ULL);
90         if (max_mult < min_mult)
91                 return -EINVAL;
92
93         mult = DIV_ROUND_CLOSEST_ULL(req->rate, prate);
94         mult = clamp(mult, min_mult, max_mult);
95
96         req->rate = prate * mult;
97         return 0;
98 }
99
100 static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
101                                 unsigned long parent_rate)
102 {
103         struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
104         unsigned int mult;
105         u32 val;
106
107         mult = DIV_ROUND_CLOSEST_ULL(rate, parent_rate * 2);
108         mult = clamp(mult, 1U, 256U);
109
110         if (readl(pll_clk->pllcr0_reg) & CPG_PLLxCR0_KICK)
111                 return -EBUSY;
112
113         cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_NI,
114                        FIELD_PREP(CPG_PLLxCR0_NI, mult - 1));
115
116         /*
117          * Set KICK bit in PLLxCR0 to update hardware setting and wait for
118          * clock change completion.
119          */
120         cpg_reg_modify(pll_clk->pllcr0_reg, 0, CPG_PLLxCR0_KICK);
121
122         /*
123          * Note: There is no HW information about the worst case latency.
124          *
125          * Using experimental measurements, it seems that no more than
126          * ~45 µs are needed, independently of the CPU rate.
127          * Since this value might be dependent on external xtal rate, pll
128          * rate or even the other emulation clocks rate, use 1000 as a
129          * "super" safe value.
130          */
131         return readl_poll_timeout(pll_clk->pllecr_reg, val,
132                                   val & pll_clk->pllecr_pllst_mask, 0, 1000);
133 }
134
135 static const struct clk_ops cpg_pll_clk_ops = {
136         .recalc_rate = cpg_pll_clk_recalc_rate,
137         .determine_rate = cpg_pll_clk_determine_rate,
138         .set_rate = cpg_pll_clk_set_rate,
139 };
140
141 static struct clk * __init cpg_pll_clk_register(const char *name,
142                                                 const char *parent_name,
143                                                 void __iomem *base,
144                                                 unsigned int cr0_offset,
145                                                 unsigned int cr1_offset,
146                                                 unsigned int index)
147
148 {
149         struct cpg_pll_clk *pll_clk;
150         struct clk_init_data init = {};
151         struct clk *clk;
152
153         pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
154         if (!pll_clk)
155                 return ERR_PTR(-ENOMEM);
156
157         init.name = name;
158         init.ops = &cpg_pll_clk_ops;
159         init.parent_names = &parent_name;
160         init.num_parents = 1;
161
162         pll_clk->hw.init = &init;
163         pll_clk->pllcr0_reg = base + cr0_offset;
164         pll_clk->pllecr_reg = base + CPG_PLLECR;
165         pll_clk->pllecr_pllst_mask = CPG_PLLECR_PLLST(index);
166
167         /* Disable Fractional Multiplication and Frequency Dithering */
168         writel(0, base + cr1_offset);
169         cpg_reg_modify(pll_clk->pllcr0_reg, CPG_PLLxCR0_SSMODE, 0);
170
171         clk = clk_register(NULL, &pll_clk->hw);
172         if (IS_ERR(clk))
173                 kfree(pll_clk);
174
175         return clk;
176 }
177 /*
178  * Z0 Clock & Z1 Clock
179  */
180 #define CPG_FRQCRB                      0x00000804
181 #define CPG_FRQCRB_KICK                 BIT(31)
182 #define CPG_FRQCRC0                     0x00000808
183 #define CPG_FRQCRC1                     0x000008e0
184
185 struct cpg_z_clk {
186         struct clk_hw hw;
187         void __iomem *reg;
188         void __iomem *kick_reg;
189         unsigned long max_rate;         /* Maximum rate for normal mode */
190         unsigned int fixed_div;
191         u32 mask;
192 };
193
194 #define to_z_clk(_hw)   container_of(_hw, struct cpg_z_clk, hw)
195
196 static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
197                                            unsigned long parent_rate)
198 {
199         struct cpg_z_clk *zclk = to_z_clk(hw);
200         unsigned int mult;
201         u32 val;
202
203         val = readl(zclk->reg) & zclk->mask;
204         mult = 32 - (val >> __ffs(zclk->mask));
205
206         return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
207                                      32 * zclk->fixed_div);
208 }
209
210 static int cpg_z_clk_determine_rate(struct clk_hw *hw,
211                                     struct clk_rate_request *req)
212 {
213         struct cpg_z_clk *zclk = to_z_clk(hw);
214         unsigned int min_mult, max_mult, mult;
215         unsigned long rate, prate;
216
217         rate = min(req->rate, req->max_rate);
218         if (rate <= zclk->max_rate) {
219                 /* Set parent rate to initial value for normal modes */
220                 prate = zclk->max_rate;
221         } else {
222                 /* Set increased parent rate for boost modes */
223                 prate = rate;
224         }
225         req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
226                                                   prate * zclk->fixed_div);
227
228         prate = req->best_parent_rate / zclk->fixed_div;
229         min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
230         max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
231         if (max_mult < min_mult)
232                 return -EINVAL;
233
234         mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
235         mult = clamp(mult, min_mult, max_mult);
236
237         req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
238         return 0;
239 }
240
241 static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
242                               unsigned long parent_rate)
243 {
244         struct cpg_z_clk *zclk = to_z_clk(hw);
245         unsigned int mult;
246         unsigned int i;
247
248         mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
249                                        parent_rate);
250         mult = clamp(mult, 1U, 32U);
251
252         if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
253                 return -EBUSY;
254
255         cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
256
257         /*
258          * Set KICK bit in FRQCRB to update hardware setting and wait for
259          * clock change completion.
260          */
261         cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
262
263         /*
264          * Note: There is no HW information about the worst case latency.
265          *
266          * Using experimental measurements, it seems that no more than
267          * ~10 iterations are needed, independently of the CPU rate.
268          * Since this value might be dependent on external xtal rate, pll1
269          * rate or even the other emulation clocks rate, use 1000 as a
270          * "super" safe value.
271          */
272         for (i = 1000; i; i--) {
273                 if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
274                         return 0;
275
276                 cpu_relax();
277         }
278
279         return -ETIMEDOUT;
280 }
281
282 static const struct clk_ops cpg_z_clk_ops = {
283         .recalc_rate = cpg_z_clk_recalc_rate,
284         .determine_rate = cpg_z_clk_determine_rate,
285         .set_rate = cpg_z_clk_set_rate,
286 };
287
288 static struct clk * __init cpg_z_clk_register(const char *name,
289                                               const char *parent_name,
290                                               void __iomem *reg,
291                                               unsigned int div,
292                                               unsigned int offset)
293 {
294         struct clk_init_data init = {};
295         struct cpg_z_clk *zclk;
296         struct clk *clk;
297
298         zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
299         if (!zclk)
300                 return ERR_PTR(-ENOMEM);
301
302         init.name = name;
303         init.ops = &cpg_z_clk_ops;
304         init.flags = CLK_SET_RATE_PARENT;
305         init.parent_names = &parent_name;
306         init.num_parents = 1;
307
308         if (offset < 32) {
309                 zclk->reg = reg + CPG_FRQCRC0;
310         } else {
311                 zclk->reg = reg + CPG_FRQCRC1;
312                 offset -= 32;
313         }
314         zclk->kick_reg = reg + CPG_FRQCRB;
315         zclk->hw.init = &init;
316         zclk->mask = GENMASK(offset + 4, offset);
317         zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
318
319         clk = clk_register(NULL, &zclk->hw);
320         if (IS_ERR(clk)) {
321                 kfree(zclk);
322                 return clk;
323         }
324
325         zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
326                          zclk->fixed_div;
327         return clk;
328 }
329
330 /*
331  * RPC Clocks
332  */
333 static const struct clk_div_table cpg_rpcsrc_div_table[] = {
334         { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
335 };
336
337 struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
338         const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
339         struct clk **clks, void __iomem *base,
340         struct raw_notifier_head *notifiers)
341 {
342         const struct clk *parent;
343         unsigned int mult = 1;
344         unsigned int div = 1;
345         u32 value;
346
347         parent = clks[core->parent & 0xffff];   /* some types use high bits */
348         if (IS_ERR(parent))
349                 return ERR_CAST(parent);
350
351         switch (core->type) {
352         case CLK_TYPE_GEN4_MAIN:
353                 div = cpg_pll_config->extal_div;
354                 break;
355
356         case CLK_TYPE_GEN4_PLL1:
357                 mult = cpg_pll_config->pll1_mult;
358                 div = cpg_pll_config->pll1_div;
359                 break;
360
361         case CLK_TYPE_GEN4_PLL2_VAR:
362                 /*
363                  * PLL2 is implemented as a custom clock, to change the
364                  * multiplier when cpufreq changes between normal and boost
365                  * modes.
366                  */
367                 return cpg_pll_clk_register(core->name, __clk_get_name(parent),
368                                             base, CPG_PLL2CR0, CPG_PLL2CR1, 2);
369
370         case CLK_TYPE_GEN4_PLL2:
371                 mult = cpg_pll_config->pll2_mult;
372                 div = cpg_pll_config->pll2_div;
373                 break;
374
375         case CLK_TYPE_GEN4_PLL3:
376                 mult = cpg_pll_config->pll3_mult;
377                 div = cpg_pll_config->pll3_div;
378                 break;
379
380         case CLK_TYPE_GEN4_PLL4:
381                 mult = cpg_pll_config->pll4_mult;
382                 div = cpg_pll_config->pll4_div;
383                 break;
384
385         case CLK_TYPE_GEN4_PLL5:
386                 mult = cpg_pll_config->pll5_mult;
387                 div = cpg_pll_config->pll5_div;
388                 break;
389
390         case CLK_TYPE_GEN4_PLL6:
391                 mult = cpg_pll_config->pll6_mult;
392                 div = cpg_pll_config->pll6_div;
393                 break;
394
395         case CLK_TYPE_GEN4_PLL2X_3X:
396                 value = readl(base + core->offset);
397                 mult = (((value >> 24) & 0x7f) + 1) * 2;
398                 break;
399
400         case CLK_TYPE_GEN4_Z:
401                 return cpg_z_clk_register(core->name, __clk_get_name(parent),
402                                           base, core->div, core->offset);
403
404         case CLK_TYPE_GEN4_SDSRC:
405                 div = ((readl(base + SD0CKCR1) >> 29) & 0x03) + 4;
406                 break;
407
408         case CLK_TYPE_GEN4_SDH:
409                 return cpg_sdh_clk_register(core->name, base + core->offset,
410                                            __clk_get_name(parent), notifiers);
411
412         case CLK_TYPE_GEN4_SD:
413                 return cpg_sd_clk_register(core->name, base + core->offset,
414                                            __clk_get_name(parent));
415
416         case CLK_TYPE_GEN4_MDSEL:
417                 /*
418                  * Clock selectable between two parents and two fixed dividers
419                  * using a mode pin
420                  */
421                 if (cpg_mode & BIT(core->offset)) {
422                         div = core->div & 0xffff;
423                 } else {
424                         parent = clks[core->parent >> 16];
425                         if (IS_ERR(parent))
426                                 return ERR_CAST(parent);
427                         div = core->div >> 16;
428                 }
429                 mult = 1;
430                 break;
431
432         case CLK_TYPE_GEN4_OSC:
433                 /*
434                  * Clock combining OSC EXTAL predivider and a fixed divider
435                  */
436                 div = cpg_pll_config->osc_prediv * core->div;
437                 break;
438
439         case CLK_TYPE_GEN4_RPCSRC:
440                 return clk_register_divider_table(NULL, core->name,
441                                                   __clk_get_name(parent), 0,
442                                                   base + CPG_RPCCKCR, 3, 2, 0,
443                                                   cpg_rpcsrc_div_table,
444                                                   &cpg_lock);
445
446         case CLK_TYPE_GEN4_RPC:
447                 return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
448                                             __clk_get_name(parent), notifiers);
449
450         case CLK_TYPE_GEN4_RPCD2:
451                 return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
452                                               __clk_get_name(parent));
453
454         default:
455                 return ERR_PTR(-EINVAL);
456         }
457
458         return clk_register_fixed_factor(NULL, core->name,
459                                          __clk_get_name(parent), 0, mult, div);
460 }
461
462 int __init rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
463                               unsigned int clk_extalr, u32 mode)
464 {
465         cpg_pll_config = config;
466         cpg_clk_extalr = clk_extalr;
467         cpg_mode = mode;
468
469         return 0;
470 }
This page took 0.056977 seconds and 4 git commands to generate.