]> Git Repo - J-u-boot.git/blame - drivers/clk/clk_zynq.c
clk: zynq: Add zynq clock framework driver
[J-u-boot.git] / drivers / clk / clk_zynq.c
CommitLineData
3a64b253
SH
1/*
2 * Copyright (C) 2017 Weidmüller Interface GmbH & Co. KG
3 * Stefan Herbrechtsmeier <[email protected]>
4 *
5 * Copyright (C) 2013 Soren Brinkmann <[email protected]>
6 * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
7 *
8 * SPDX-License-Identifier: GPL-2.0+
9 */
10
11#include <common.h>
12#include <clk-uclass.h>
13#include <dm.h>
14#include <dm/lists.h>
15#include <errno.h>
16#include <asm/io.h>
17#include <asm/arch/clk.h>
18#include <asm/arch/hardware.h>
19#include <asm/arch/sys_proto.h>
20
21/* Register bitfield defines */
22#define PLLCTRL_FBDIV_MASK 0x7f000
23#define PLLCTRL_FBDIV_SHIFT 12
24#define PLLCTRL_BPFORCE_MASK (1 << 4)
25#define PLLCTRL_PWRDWN_MASK 2
26#define PLLCTRL_PWRDWN_SHIFT 1
27#define PLLCTRL_RESET_MASK 1
28#define PLLCTRL_RESET_SHIFT 0
29
30#define ZYNQ_CLK_MAXDIV 0x3f
31#define CLK_CTRL_DIV1_SHIFT 20
32#define CLK_CTRL_DIV1_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV1_SHIFT)
33#define CLK_CTRL_DIV0_SHIFT 8
34#define CLK_CTRL_DIV0_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV0_SHIFT)
35#define CLK_CTRL_SRCSEL_SHIFT 4
36#define CLK_CTRL_SRCSEL_MASK (0x3 << CLK_CTRL_SRCSEL_SHIFT)
37
38#define CLK_CTRL_DIV2X_SHIFT 26
39#define CLK_CTRL_DIV2X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV2X_SHIFT)
40#define CLK_CTRL_DIV3X_SHIFT 20
41#define CLK_CTRL_DIV3X_MASK (ZYNQ_CLK_MAXDIV << CLK_CTRL_DIV3X_SHIFT)
42
43DECLARE_GLOBAL_DATA_PTR;
44
45#ifndef CONFIG_SPL_BUILD
46enum zynq_clk_rclk {mio_clk, emio_clk};
47#endif
48
49struct zynq_clk_priv {
50 ulong ps_clk_freq;
51};
52
53static void *zynq_clk_get_register(enum zynq_clk id)
54{
55 switch (id) {
56 case armpll_clk:
57 return &slcr_base->arm_pll_ctrl;
58 case ddrpll_clk:
59 return &slcr_base->ddr_pll_ctrl;
60 case iopll_clk:
61 return &slcr_base->io_pll_ctrl;
62 case lqspi_clk:
63 return &slcr_base->lqspi_clk_ctrl;
64 case smc_clk:
65 return &slcr_base->smc_clk_ctrl;
66 case pcap_clk:
67 return &slcr_base->pcap_clk_ctrl;
68 case sdio0_clk ... sdio1_clk:
69 return &slcr_base->sdio_clk_ctrl;
70 case uart0_clk ... uart1_clk:
71 return &slcr_base->uart_clk_ctrl;
72 case spi0_clk ... spi1_clk:
73 return &slcr_base->spi_clk_ctrl;
74#ifndef CONFIG_SPL_BUILD
75 case dci_clk:
76 return &slcr_base->dci_clk_ctrl;
77 case gem0_clk:
78 return &slcr_base->gem0_clk_ctrl;
79 case gem1_clk:
80 return &slcr_base->gem1_clk_ctrl;
81 case fclk0_clk:
82 return &slcr_base->fpga0_clk_ctrl;
83 case fclk1_clk:
84 return &slcr_base->fpga1_clk_ctrl;
85 case fclk2_clk:
86 return &slcr_base->fpga2_clk_ctrl;
87 case fclk3_clk:
88 return &slcr_base->fpga3_clk_ctrl;
89 case can0_clk ... can1_clk:
90 return &slcr_base->can_clk_ctrl;
91 case dbg_trc_clk ... dbg_apb_clk:
92 /* fall through */
93#endif
94 default:
95 return &slcr_base->dbg_clk_ctrl;
96 }
97}
98
99static enum zynq_clk zynq_clk_get_cpu_pll(u32 clk_ctrl)
100{
101 u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
102
103 switch (srcsel) {
104 case 2:
105 return ddrpll_clk;
106 case 3:
107 return iopll_clk;
108 case 0 ... 1:
109 default:
110 return armpll_clk;
111 }
112}
113
114static enum zynq_clk zynq_clk_get_peripheral_pll(u32 clk_ctrl)
115{
116 u32 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
117
118 switch (srcsel) {
119 case 2:
120 return armpll_clk;
121 case 3:
122 return ddrpll_clk;
123 case 0 ... 1:
124 default:
125 return iopll_clk;
126 }
127}
128
129static ulong zynq_clk_get_pll_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
130{
131 u32 clk_ctrl, reset, pwrdwn, mul, bypass;
132
133 clk_ctrl = readl(zynq_clk_get_register(id));
134
135 reset = (clk_ctrl & PLLCTRL_RESET_MASK) >> PLLCTRL_RESET_SHIFT;
136 pwrdwn = (clk_ctrl & PLLCTRL_PWRDWN_MASK) >> PLLCTRL_PWRDWN_SHIFT;
137 if (reset || pwrdwn)
138 return 0;
139
140 bypass = clk_ctrl & PLLCTRL_BPFORCE_MASK;
141 if (bypass)
142 mul = 1;
143 else
144 mul = (clk_ctrl & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT;
145
146 return priv->ps_clk_freq * mul;
147}
148
149#ifndef CONFIG_SPL_BUILD
150static enum zynq_clk_rclk zynq_clk_get_gem_rclk(enum zynq_clk id)
151{
152 u32 clk_ctrl, srcsel;
153
154 if (id == gem0_clk)
155 clk_ctrl = readl(&slcr_base->gem0_rclk_ctrl);
156 else
157 clk_ctrl = readl(&slcr_base->gem1_rclk_ctrl);
158
159 srcsel = (clk_ctrl & CLK_CTRL_SRCSEL_MASK) >> CLK_CTRL_SRCSEL_SHIFT;
160 if (srcsel)
161 return emio_clk;
162 else
163 return mio_clk;
164}
165#endif
166
167static ulong zynq_clk_get_cpu_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
168{
169 u32 clk_621, clk_ctrl, div;
170 enum zynq_clk pll;
171
172 clk_ctrl = readl(&slcr_base->arm_clk_ctrl);
173
174 div = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
175
176 switch (id) {
177 case cpu_1x_clk:
178 div *= 2;
179 /* fall through */
180 case cpu_2x_clk:
181 clk_621 = readl(&slcr_base->clk_621_true) & 1;
182 div *= 2 + clk_621;
183 break;
184 case cpu_3or2x_clk:
185 div *= 2;
186 /* fall through */
187 case cpu_6or4x_clk:
188 break;
189 default:
190 return 0;
191 }
192
193 pll = zynq_clk_get_cpu_pll(clk_ctrl);
194
195 return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, pll), div);
196}
197
198#ifndef CONFIG_SPL_BUILD
199static ulong zynq_clk_get_ddr2x_rate(struct zynq_clk_priv *priv)
200{
201 u32 clk_ctrl, div;
202
203 clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
204
205 div = (clk_ctrl & CLK_CTRL_DIV2X_MASK) >> CLK_CTRL_DIV2X_SHIFT;
206
207 return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
208}
209#endif
210
211static ulong zynq_clk_get_ddr3x_rate(struct zynq_clk_priv *priv)
212{
213 u32 clk_ctrl, div;
214
215 clk_ctrl = readl(&slcr_base->ddr_clk_ctrl);
216
217 div = (clk_ctrl & CLK_CTRL_DIV3X_MASK) >> CLK_CTRL_DIV3X_SHIFT;
218
219 return DIV_ROUND_CLOSEST(zynq_clk_get_pll_rate(priv, ddrpll_clk), div);
220}
221
222#ifndef CONFIG_SPL_BUILD
223static ulong zynq_clk_get_dci_rate(struct zynq_clk_priv *priv)
224{
225 u32 clk_ctrl, div0, div1;
226
227 clk_ctrl = readl(&slcr_base->dci_clk_ctrl);
228
229 div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
230 div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
231
232 return DIV_ROUND_CLOSEST(DIV_ROUND_CLOSEST(
233 zynq_clk_get_pll_rate(priv, ddrpll_clk), div0), div1);
234}
235#endif
236
237static ulong zynq_clk_get_peripheral_rate(struct zynq_clk_priv *priv,
238 enum zynq_clk id, bool two_divs)
239{
240 enum zynq_clk pll;
241 u32 clk_ctrl, div0;
242 u32 div1 = 1;
243
244 clk_ctrl = readl(zynq_clk_get_register(id));
245
246 div0 = (clk_ctrl & CLK_CTRL_DIV0_MASK) >> CLK_CTRL_DIV0_SHIFT;
247 if (!div0)
248 div0 = 1;
249
250#ifndef CONFIG_SPL_BUILD
251 if (two_divs) {
252 div1 = (clk_ctrl & CLK_CTRL_DIV1_MASK) >> CLK_CTRL_DIV1_SHIFT;
253 if (!div1)
254 div1 = 1;
255 }
256#endif
257
258 pll = zynq_clk_get_peripheral_pll(clk_ctrl);
259
260 return
261 DIV_ROUND_CLOSEST(
262 DIV_ROUND_CLOSEST(
263 zynq_clk_get_pll_rate(priv, pll), div0),
264 div1);
265}
266
267#ifndef CONFIG_SPL_BUILD
268static ulong zynq_clk_get_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id)
269{
270 if (zynq_clk_get_gem_rclk(id) == mio_clk)
271 return zynq_clk_get_peripheral_rate(priv, id, true);
272
273 debug("%s: gem%d emio rx clock source unknown\n", __func__,
274 id - gem0_clk);
275
276 return -ENOSYS;
277}
278
279static unsigned long zynq_clk_calc_peripheral_two_divs(ulong rate,
280 ulong pll_rate,
281 u32 *div0, u32 *div1)
282{
283 long new_err, best_err = (long)(~0UL >> 1);
284 ulong new_rate, best_rate = 0;
285 u32 d0, d1;
286
287 for (d0 = 1; d0 <= ZYNQ_CLK_MAXDIV; d0++) {
288 for (d1 = 1; d1 <= ZYNQ_CLK_MAXDIV >> 1; d1++) {
289 new_rate = DIV_ROUND_CLOSEST(
290 DIV_ROUND_CLOSEST(pll_rate, d0), d1);
291 new_err = abs(new_rate - rate);
292
293 if (new_err < best_err) {
294 *div0 = d0;
295 *div1 = d1;
296 best_err = new_err;
297 best_rate = new_rate;
298 }
299 }
300 }
301
302 return best_rate;
303}
304
305static ulong zynq_clk_set_peripheral_rate(struct zynq_clk_priv *priv,
306 enum zynq_clk id, ulong rate,
307 bool two_divs)
308{
309 enum zynq_clk pll;
310 u32 clk_ctrl, div0 = 0, div1 = 0;
311 ulong pll_rate, new_rate;
312 u32 *reg;
313
314 reg = zynq_clk_get_register(id);
315 clk_ctrl = readl(reg);
316
317 pll = zynq_clk_get_peripheral_pll(clk_ctrl);
318 pll_rate = zynq_clk_get_pll_rate(priv, pll);
319 clk_ctrl &= ~CLK_CTRL_DIV0_MASK;
320 if (two_divs) {
321 clk_ctrl &= ~CLK_CTRL_DIV1_MASK;
322 new_rate = zynq_clk_calc_peripheral_two_divs(rate, pll_rate,
323 &div0, &div1);
324 clk_ctrl |= div1 << CLK_CTRL_DIV1_SHIFT;
325 } else {
326 div0 = DIV_ROUND_CLOSEST(pll_rate, rate);
327 if (div0 > ZYNQ_CLK_MAXDIV)
328 div0 = ZYNQ_CLK_MAXDIV;
329 new_rate = DIV_ROUND_CLOSEST(rate, div0);
330 }
331 clk_ctrl |= div0 << CLK_CTRL_DIV0_SHIFT;
332
333 zynq_slcr_unlock();
334 writel(clk_ctrl, reg);
335 zynq_slcr_lock();
336
337 return new_rate;
338}
339
340static ulong zynq_clk_set_gem_rate(struct zynq_clk_priv *priv, enum zynq_clk id,
341 ulong rate)
342{
343 if (zynq_clk_get_gem_rclk(id) == mio_clk)
344 return zynq_clk_set_peripheral_rate(priv, id, rate, true);
345
346 debug("%s: gem%d emio rx clock source unknown\n", __func__,
347 id - gem0_clk);
348
349 return -ENOSYS;
350}
351#endif
352
353#ifndef CONFIG_SPL_BUILD
354static ulong zynq_clk_get_rate(struct clk *clk)
355{
356 struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
357 enum zynq_clk id = clk->id;
358 bool two_divs = false;
359
360 switch (id) {
361 case armpll_clk ... iopll_clk:
362 return zynq_clk_get_pll_rate(priv, id);
363 case cpu_6or4x_clk ... cpu_1x_clk:
364 return zynq_clk_get_cpu_rate(priv, id);
365 case ddr2x_clk:
366 return zynq_clk_get_ddr2x_rate(priv);
367 case ddr3x_clk:
368 return zynq_clk_get_ddr3x_rate(priv);
369 case dci_clk:
370 return zynq_clk_get_dci_rate(priv);
371 case gem0_clk ... gem1_clk:
372 return zynq_clk_get_gem_rate(priv, id);
373 case fclk0_clk ... can1_clk:
374 two_divs = true;
375 /* fall through */
376 case dbg_trc_clk ... dbg_apb_clk:
377 case lqspi_clk ... pcap_clk:
378 case sdio0_clk ... spi1_clk:
379 return zynq_clk_get_peripheral_rate(priv, id, two_divs);
380 case dma_clk:
381 return zynq_clk_get_cpu_rate(priv, cpu_2x_clk);
382 case usb0_aper_clk ... smc_aper_clk:
383 return zynq_clk_get_cpu_rate(priv, cpu_1x_clk);
384 default:
385 return -ENXIO;
386 }
387}
388
389static ulong zynq_clk_set_rate(struct clk *clk, ulong rate)
390{
391 struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
392 enum zynq_clk id = clk->id;
393 bool two_divs = false;
394
395 switch (id) {
396 case gem0_clk ... gem1_clk:
397 return zynq_clk_set_gem_rate(priv, id, rate);
398 case fclk0_clk ... can1_clk:
399 two_divs = true;
400 /* fall through */
401 case lqspi_clk ... pcap_clk:
402 case sdio0_clk ... spi1_clk:
403 case dbg_trc_clk ... dbg_apb_clk:
404 return zynq_clk_set_peripheral_rate(priv, id, rate, two_divs);
405 default:
406 return -ENXIO;
407 }
408}
409#else
410static ulong zynq_clk_get_rate(struct clk *clk)
411{
412 struct zynq_clk_priv *priv = dev_get_priv(clk->dev);
413 enum zynq_clk id = clk->id;
414
415 switch (id) {
416 case cpu_6or4x_clk ... cpu_1x_clk:
417 return zynq_clk_get_cpu_rate(priv, id);
418 case ddr3x_clk:
419 return zynq_clk_get_ddr3x_rate(priv);
420 case lqspi_clk ... pcap_clk:
421 case sdio0_clk ... spi1_clk:
422 return zynq_clk_get_peripheral_rate(priv, id, 0);
423 default:
424 return -ENXIO;
425 }
426}
427#endif
428
429static struct clk_ops zynq_clk_ops = {
430 .get_rate = zynq_clk_get_rate,
431#ifndef CONFIG_SPL_BUILD
432 .set_rate = zynq_clk_set_rate,
433#endif
434};
435
436static int zynq_clk_probe(struct udevice *dev)
437{
438 struct zynq_clk_priv *priv = dev_get_priv(dev);
439
440 priv->ps_clk_freq = fdtdec_get_uint(gd->fdt_blob, dev->of_offset,
441 "ps-clk-frequency", 33333333UL);
442
443 return 0;
444}
445
446static const struct udevice_id zynq_clk_ids[] = {
447 { .compatible = "xlnx,ps7-clkc"},
448 {}
449};
450
451U_BOOT_DRIVER(zynq_clk) = {
452 .name = "zynq_clk",
453 .id = UCLASS_CLK,
454 .of_match = zynq_clk_ids,
455 .flags = DM_FLAG_PRE_RELOC,
456 .ops = &zynq_clk_ops,
457 .priv_auto_alloc_size = sizeof(struct zynq_clk_priv),
458 .probe = zynq_clk_probe,
459};
This page took 0.069914 seconds and 4 git commands to generate.