]>
Commit | Line | Data |
---|---|---|
8d4d9f52 RH |
1 | /* |
2 | * Copyright 2011-2012 Calxeda, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/err.h> | |
9bd6314c | 20 | #include <linux/clk.h> |
8d4d9f52 RH |
21 | #include <linux/clk-provider.h> |
22 | #include <linux/io.h> | |
23 | #include <linux/of.h> | |
26cae166 | 24 | #include <linux/of_address.h> |
8d4d9f52 RH |
25 | |
26 | #define HB_PLL_LOCK_500 0x20000000 | |
27 | #define HB_PLL_LOCK 0x10000000 | |
28 | #define HB_PLL_DIVF_SHIFT 20 | |
29 | #define HB_PLL_DIVF_MASK 0x0ff00000 | |
30 | #define HB_PLL_DIVQ_SHIFT 16 | |
31 | #define HB_PLL_DIVQ_MASK 0x00070000 | |
32 | #define HB_PLL_DIVR_SHIFT 8 | |
33 | #define HB_PLL_DIVR_MASK 0x00001f00 | |
34 | #define HB_PLL_RANGE_SHIFT 4 | |
35 | #define HB_PLL_RANGE_MASK 0x00000070 | |
36 | #define HB_PLL_BYPASS 0x00000008 | |
37 | #define HB_PLL_RESET 0x00000004 | |
38 | #define HB_PLL_EXT_BYPASS 0x00000002 | |
39 | #define HB_PLL_EXT_ENA 0x00000001 | |
40 | ||
41 | #define HB_PLL_VCO_MIN_FREQ 2133000000 | |
42 | #define HB_PLL_MAX_FREQ HB_PLL_VCO_MIN_FREQ | |
43 | #define HB_PLL_MIN_FREQ (HB_PLL_VCO_MIN_FREQ / 64) | |
44 | ||
45 | #define HB_A9_BCLK_DIV_MASK 0x00000006 | |
46 | #define HB_A9_BCLK_DIV_SHIFT 1 | |
47 | #define HB_A9_PCLK_DIV 0x00000001 | |
48 | ||
49 | struct hb_clk { | |
50 | struct clk_hw hw; | |
51 | void __iomem *reg; | |
52 | char *parent_name; | |
53 | }; | |
54 | #define to_hb_clk(p) container_of(p, struct hb_clk, hw) | |
55 | ||
56 | static int clk_pll_prepare(struct clk_hw *hwclk) | |
57 | { | |
58 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
59 | u32 reg; | |
60 | ||
61 | reg = readl(hbclk->reg); | |
62 | reg &= ~HB_PLL_RESET; | |
63 | writel(reg, hbclk->reg); | |
64 | ||
65 | while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0) | |
66 | ; | |
67 | while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0) | |
68 | ; | |
69 | ||
70 | return 0; | |
71 | } | |
72 | ||
73 | static void clk_pll_unprepare(struct clk_hw *hwclk) | |
74 | { | |
75 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
76 | u32 reg; | |
77 | ||
78 | reg = readl(hbclk->reg); | |
79 | reg |= HB_PLL_RESET; | |
80 | writel(reg, hbclk->reg); | |
81 | } | |
82 | ||
83 | static int clk_pll_enable(struct clk_hw *hwclk) | |
84 | { | |
85 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
86 | u32 reg; | |
87 | ||
88 | reg = readl(hbclk->reg); | |
89 | reg |= HB_PLL_EXT_ENA; | |
90 | writel(reg, hbclk->reg); | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | static void clk_pll_disable(struct clk_hw *hwclk) | |
96 | { | |
97 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
98 | u32 reg; | |
99 | ||
100 | reg = readl(hbclk->reg); | |
101 | reg &= ~HB_PLL_EXT_ENA; | |
102 | writel(reg, hbclk->reg); | |
103 | } | |
104 | ||
105 | static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, | |
106 | unsigned long parent_rate) | |
107 | { | |
108 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
109 | unsigned long divf, divq, vco_freq, reg; | |
110 | ||
111 | reg = readl(hbclk->reg); | |
112 | if (reg & HB_PLL_EXT_BYPASS) | |
113 | return parent_rate; | |
114 | ||
115 | divf = (reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT; | |
116 | divq = (reg & HB_PLL_DIVQ_MASK) >> HB_PLL_DIVQ_SHIFT; | |
117 | vco_freq = parent_rate * (divf + 1); | |
118 | ||
119 | return vco_freq / (1 << divq); | |
120 | } | |
121 | ||
122 | static void clk_pll_calc(unsigned long rate, unsigned long ref_freq, | |
123 | u32 *pdivq, u32 *pdivf) | |
124 | { | |
125 | u32 divq, divf; | |
126 | unsigned long vco_freq; | |
127 | ||
128 | if (rate < HB_PLL_MIN_FREQ) | |
129 | rate = HB_PLL_MIN_FREQ; | |
130 | if (rate > HB_PLL_MAX_FREQ) | |
131 | rate = HB_PLL_MAX_FREQ; | |
132 | ||
133 | for (divq = 1; divq <= 6; divq++) { | |
134 | if ((rate * (1 << divq)) >= HB_PLL_VCO_MIN_FREQ) | |
135 | break; | |
136 | } | |
137 | ||
138 | vco_freq = rate * (1 << divq); | |
139 | divf = (vco_freq + (ref_freq / 2)) / ref_freq; | |
140 | divf--; | |
141 | ||
142 | *pdivq = divq; | |
143 | *pdivf = divf; | |
144 | } | |
145 | ||
146 | static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate, | |
147 | unsigned long *parent_rate) | |
148 | { | |
149 | u32 divq, divf; | |
150 | unsigned long ref_freq = *parent_rate; | |
151 | ||
152 | clk_pll_calc(rate, ref_freq, &divq, &divf); | |
153 | ||
154 | return (ref_freq * (divf + 1)) / (1 << divq); | |
155 | } | |
156 | ||
157 | static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate, | |
158 | unsigned long parent_rate) | |
159 | { | |
160 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
161 | u32 divq, divf; | |
162 | u32 reg; | |
163 | ||
164 | clk_pll_calc(rate, parent_rate, &divq, &divf); | |
165 | ||
166 | reg = readl(hbclk->reg); | |
167 | if (divf != ((reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT)) { | |
168 | /* Need to re-lock PLL, so put it into bypass mode */ | |
169 | reg |= HB_PLL_EXT_BYPASS; | |
170 | writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg); | |
171 | ||
172 | writel(reg | HB_PLL_RESET, hbclk->reg); | |
173 | reg &= ~(HB_PLL_DIVF_MASK | HB_PLL_DIVQ_MASK); | |
174 | reg |= (divf << HB_PLL_DIVF_SHIFT) | (divq << HB_PLL_DIVQ_SHIFT); | |
175 | writel(reg | HB_PLL_RESET, hbclk->reg); | |
176 | writel(reg, hbclk->reg); | |
177 | ||
178 | while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0) | |
179 | ; | |
180 | while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0) | |
181 | ; | |
182 | reg |= HB_PLL_EXT_ENA; | |
183 | reg &= ~HB_PLL_EXT_BYPASS; | |
184 | } else { | |
b5964708 | 185 | writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg); |
8d4d9f52 RH |
186 | reg &= ~HB_PLL_DIVQ_MASK; |
187 | reg |= divq << HB_PLL_DIVQ_SHIFT; | |
b5964708 | 188 | writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg); |
8d4d9f52 RH |
189 | } |
190 | writel(reg, hbclk->reg); | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
195 | static const struct clk_ops clk_pll_ops = { | |
196 | .prepare = clk_pll_prepare, | |
197 | .unprepare = clk_pll_unprepare, | |
198 | .enable = clk_pll_enable, | |
199 | .disable = clk_pll_disable, | |
200 | .recalc_rate = clk_pll_recalc_rate, | |
201 | .round_rate = clk_pll_round_rate, | |
202 | .set_rate = clk_pll_set_rate, | |
203 | }; | |
204 | ||
205 | static unsigned long clk_cpu_periphclk_recalc_rate(struct clk_hw *hwclk, | |
206 | unsigned long parent_rate) | |
207 | { | |
208 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
209 | u32 div = (readl(hbclk->reg) & HB_A9_PCLK_DIV) ? 8 : 4; | |
210 | return parent_rate / div; | |
211 | } | |
212 | ||
213 | static const struct clk_ops a9periphclk_ops = { | |
214 | .recalc_rate = clk_cpu_periphclk_recalc_rate, | |
215 | }; | |
216 | ||
217 | static unsigned long clk_cpu_a9bclk_recalc_rate(struct clk_hw *hwclk, | |
218 | unsigned long parent_rate) | |
219 | { | |
220 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
221 | u32 div = (readl(hbclk->reg) & HB_A9_BCLK_DIV_MASK) >> HB_A9_BCLK_DIV_SHIFT; | |
222 | ||
223 | return parent_rate / (div + 2); | |
224 | } | |
225 | ||
226 | static const struct clk_ops a9bclk_ops = { | |
227 | .recalc_rate = clk_cpu_a9bclk_recalc_rate, | |
228 | }; | |
229 | ||
230 | static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, | |
231 | unsigned long parent_rate) | |
232 | { | |
233 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
234 | u32 div; | |
235 | ||
236 | div = readl(hbclk->reg) & 0x1f; | |
237 | div++; | |
238 | div *= 2; | |
239 | ||
240 | return parent_rate / div; | |
241 | } | |
242 | ||
243 | static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate, | |
244 | unsigned long *parent_rate) | |
245 | { | |
246 | u32 div; | |
247 | ||
248 | div = *parent_rate / rate; | |
249 | div++; | |
250 | div &= ~0x1; | |
251 | ||
252 | return *parent_rate / div; | |
253 | } | |
254 | ||
255 | static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate, | |
256 | unsigned long parent_rate) | |
257 | { | |
258 | struct hb_clk *hbclk = to_hb_clk(hwclk); | |
259 | u32 div; | |
260 | ||
261 | div = parent_rate / rate; | |
262 | if (div & 0x1) | |
263 | return -EINVAL; | |
264 | ||
265 | writel(div >> 1, hbclk->reg); | |
266 | return 0; | |
267 | } | |
268 | ||
269 | static const struct clk_ops periclk_ops = { | |
270 | .recalc_rate = clk_periclk_recalc_rate, | |
271 | .round_rate = clk_periclk_round_rate, | |
272 | .set_rate = clk_periclk_set_rate, | |
273 | }; | |
274 | ||
275 | static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops) | |
276 | { | |
277 | u32 reg; | |
8d4d9f52 RH |
278 | struct hb_clk *hb_clk; |
279 | const char *clk_name = node->name; | |
280 | const char *parent_name; | |
281 | struct clk_init_data init; | |
26cae166 | 282 | struct device_node *srnp; |
8d4d9f52 RH |
283 | int rc; |
284 | ||
285 | rc = of_property_read_u32(node, "reg", ®); | |
286 | if (WARN_ON(rc)) | |
287 | return NULL; | |
288 | ||
289 | hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL); | |
290 | if (WARN_ON(!hb_clk)) | |
291 | return NULL; | |
292 | ||
26cae166 SH |
293 | /* Map system registers */ |
294 | srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); | |
295 | hb_clk->reg = of_iomap(srnp, 0); | |
296 | BUG_ON(!hb_clk->reg); | |
297 | hb_clk->reg += reg; | |
8d4d9f52 RH |
298 | |
299 | of_property_read_string(node, "clock-output-names", &clk_name); | |
300 | ||
301 | init.name = clk_name; | |
302 | init.ops = ops; | |
303 | init.flags = 0; | |
304 | parent_name = of_clk_get_parent_name(node, 0); | |
305 | init.parent_names = &parent_name; | |
306 | init.num_parents = 1; | |
307 | ||
308 | hb_clk->hw.init = &init; | |
309 | ||
8e66cc05 SB |
310 | rc = clk_hw_register(NULL, &hb_clk->hw); |
311 | if (WARN_ON(rc)) { | |
8d4d9f52 RH |
312 | kfree(hb_clk); |
313 | return NULL; | |
314 | } | |
8e66cc05 SB |
315 | rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw); |
316 | return hb_clk->hw.clk; | |
8d4d9f52 RH |
317 | } |
318 | ||
319 | static void __init hb_pll_init(struct device_node *node) | |
320 | { | |
321 | hb_clk_init(node, &clk_pll_ops); | |
322 | } | |
d34bcdeb | 323 | CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init); |
8d4d9f52 RH |
324 | |
325 | static void __init hb_a9periph_init(struct device_node *node) | |
326 | { | |
327 | hb_clk_init(node, &a9periphclk_ops); | |
328 | } | |
d34bcdeb | 329 | CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init); |
8d4d9f52 RH |
330 | |
331 | static void __init hb_a9bus_init(struct device_node *node) | |
332 | { | |
333 | struct clk *clk = hb_clk_init(node, &a9bclk_ops); | |
334 | clk_prepare_enable(clk); | |
335 | } | |
d34bcdeb | 336 | CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init); |
8d4d9f52 RH |
337 | |
338 | static void __init hb_emmc_init(struct device_node *node) | |
339 | { | |
340 | hb_clk_init(node, &periclk_ops); | |
341 | } | |
d34bcdeb | 342 | CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init); |