]>
Commit | Line | Data |
---|---|---|
adaafaa3 YG |
1 | /* |
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "phy-qcom-ufs-i.h" | |
16 | ||
17 | #define MAX_PROP_NAME 32 | |
18 | #define VDDA_PHY_MIN_UV 1000000 | |
19 | #define VDDA_PHY_MAX_UV 1000000 | |
20 | #define VDDA_PLL_MIN_UV 1800000 | |
21 | #define VDDA_PLL_MAX_UV 1800000 | |
22 | #define VDDP_REF_CLK_MIN_UV 1200000 | |
23 | #define VDDP_REF_CLK_MAX_UV 1200000 | |
24 | ||
25 | static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, | |
26 | const char *, bool); | |
27 | static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, | |
28 | const char *); | |
29 | static int ufs_qcom_phy_base_init(struct platform_device *pdev, | |
30 | struct ufs_qcom_phy *phy_common); | |
31 | ||
32 | int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, | |
33 | struct ufs_qcom_phy_calibration *tbl_A, | |
34 | int tbl_size_A, | |
35 | struct ufs_qcom_phy_calibration *tbl_B, | |
36 | int tbl_size_B, bool is_rate_B) | |
37 | { | |
38 | int i; | |
39 | int ret = 0; | |
40 | ||
41 | if (!tbl_A) { | |
42 | dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); | |
43 | ret = EINVAL; | |
44 | goto out; | |
45 | } | |
46 | ||
47 | for (i = 0; i < tbl_size_A; i++) | |
48 | writel_relaxed(tbl_A[i].cfg_value, | |
49 | ufs_qcom_phy->mmio + tbl_A[i].reg_offset); | |
50 | ||
51 | /* | |
52 | * In case we would like to work in rate B, we need | |
53 | * to override a registers that were configured in rate A table | |
54 | * with registers of rate B table. | |
55 | * table. | |
56 | */ | |
57 | if (is_rate_B) { | |
58 | if (!tbl_B) { | |
59 | dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", | |
60 | __func__); | |
61 | ret = EINVAL; | |
62 | goto out; | |
63 | } | |
64 | ||
65 | for (i = 0; i < tbl_size_B; i++) | |
66 | writel_relaxed(tbl_B[i].cfg_value, | |
67 | ufs_qcom_phy->mmio + tbl_B[i].reg_offset); | |
68 | } | |
69 | ||
70 | /* flush buffered writes */ | |
71 | mb(); | |
72 | ||
73 | out: | |
74 | return ret; | |
75 | } | |
358d6c87 | 76 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); |
adaafaa3 YG |
77 | |
78 | struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, | |
79 | struct ufs_qcom_phy *common_cfg, | |
80 | struct phy_ops *ufs_qcom_phy_gen_ops, | |
81 | struct ufs_qcom_phy_specific_ops *phy_spec_ops) | |
82 | { | |
83 | int err; | |
84 | struct device *dev = &pdev->dev; | |
85 | struct phy *generic_phy = NULL; | |
86 | struct phy_provider *phy_provider; | |
87 | ||
88 | err = ufs_qcom_phy_base_init(pdev, common_cfg); | |
89 | if (err) { | |
90 | dev_err(dev, "%s: phy base init failed %d\n", __func__, err); | |
91 | goto out; | |
92 | } | |
93 | ||
94 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | |
95 | if (IS_ERR(phy_provider)) { | |
96 | err = PTR_ERR(phy_provider); | |
97 | dev_err(dev, "%s: failed to register phy %d\n", __func__, err); | |
98 | goto out; | |
99 | } | |
100 | ||
101 | generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); | |
102 | if (IS_ERR(generic_phy)) { | |
103 | err = PTR_ERR(generic_phy); | |
104 | dev_err(dev, "%s: failed to create phy %d\n", __func__, err); | |
d89a7f69 | 105 | generic_phy = NULL; |
adaafaa3 YG |
106 | goto out; |
107 | } | |
108 | ||
109 | common_cfg->phy_spec_ops = phy_spec_ops; | |
110 | common_cfg->dev = dev; | |
111 | ||
112 | out: | |
113 | return generic_phy; | |
114 | } | |
358d6c87 | 115 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); |
adaafaa3 YG |
116 | |
117 | /* | |
118 | * This assumes the embedded phy structure inside generic_phy is of type | |
119 | * struct ufs_qcom_phy. In order to function properly it's crucial | |
120 | * to keep the embedded struct "struct ufs_qcom_phy common_cfg" | |
121 | * as the first inside generic_phy. | |
122 | */ | |
123 | struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) | |
124 | { | |
125 | return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); | |
126 | } | |
358d6c87 | 127 | EXPORT_SYMBOL_GPL(get_ufs_qcom_phy); |
adaafaa3 YG |
128 | |
129 | static | |
130 | int ufs_qcom_phy_base_init(struct platform_device *pdev, | |
131 | struct ufs_qcom_phy *phy_common) | |
132 | { | |
133 | struct device *dev = &pdev->dev; | |
134 | struct resource *res; | |
135 | int err = 0; | |
136 | ||
137 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); | |
adaafaa3 YG |
138 | phy_common->mmio = devm_ioremap_resource(dev, res); |
139 | if (IS_ERR((void const *)phy_common->mmio)) { | |
140 | err = PTR_ERR((void const *)phy_common->mmio); | |
141 | phy_common->mmio = NULL; | |
142 | dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", | |
143 | __func__, err); | |
52ea796b | 144 | return err; |
adaafaa3 YG |
145 | } |
146 | ||
147 | /* "dev_ref_clk_ctrl_mem" is optional resource */ | |
148 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
149 | "dev_ref_clk_ctrl_mem"); | |
adaafaa3 | 150 | phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); |
52ea796b | 151 | if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) |
adaafaa3 | 152 | phy_common->dev_ref_clk_ctrl_mmio = NULL; |
adaafaa3 | 153 | |
52ea796b | 154 | return 0; |
adaafaa3 YG |
155 | } |
156 | ||
157 | static int __ufs_qcom_phy_clk_get(struct phy *phy, | |
158 | const char *name, struct clk **clk_out, bool err_print) | |
159 | { | |
160 | struct clk *clk; | |
161 | int err = 0; | |
162 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
163 | struct device *dev = ufs_qcom_phy->dev; | |
164 | ||
165 | clk = devm_clk_get(dev, name); | |
166 | if (IS_ERR(clk)) { | |
167 | err = PTR_ERR(clk); | |
168 | if (err_print) | |
169 | dev_err(dev, "failed to get %s err %d", name, err); | |
170 | } else { | |
171 | *clk_out = clk; | |
172 | } | |
173 | ||
174 | return err; | |
175 | } | |
176 | ||
177 | static | |
178 | int ufs_qcom_phy_clk_get(struct phy *phy, | |
179 | const char *name, struct clk **clk_out) | |
180 | { | |
181 | return __ufs_qcom_phy_clk_get(phy, name, clk_out, true); | |
182 | } | |
183 | ||
184 | int | |
185 | ufs_qcom_phy_init_clks(struct phy *generic_phy, | |
186 | struct ufs_qcom_phy *phy_common) | |
187 | { | |
188 | int err; | |
189 | ||
190 | err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk", | |
191 | &phy_common->tx_iface_clk); | |
192 | if (err) | |
193 | goto out; | |
194 | ||
195 | err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk", | |
196 | &phy_common->rx_iface_clk); | |
197 | if (err) | |
198 | goto out; | |
199 | ||
200 | err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src", | |
201 | &phy_common->ref_clk_src); | |
202 | if (err) | |
203 | goto out; | |
204 | ||
205 | /* | |
206 | * "ref_clk_parent" is optional hence don't abort init if it's not | |
207 | * found. | |
208 | */ | |
209 | __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent", | |
210 | &phy_common->ref_clk_parent, false); | |
211 | ||
212 | err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk", | |
213 | &phy_common->ref_clk); | |
214 | ||
215 | out: | |
216 | return err; | |
217 | } | |
358d6c87 | 218 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); |
adaafaa3 YG |
219 | |
220 | int | |
221 | ufs_qcom_phy_init_vregulators(struct phy *generic_phy, | |
222 | struct ufs_qcom_phy *phy_common) | |
223 | { | |
224 | int err; | |
225 | ||
226 | err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll, | |
227 | "vdda-pll"); | |
228 | if (err) | |
229 | goto out; | |
230 | ||
231 | err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy, | |
232 | "vdda-phy"); | |
233 | ||
234 | if (err) | |
235 | goto out; | |
236 | ||
237 | /* vddp-ref-clk-* properties are optional */ | |
238 | __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk, | |
239 | "vddp-ref-clk", true); | |
240 | out: | |
241 | return err; | |
242 | } | |
358d6c87 | 243 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); |
adaafaa3 YG |
244 | |
245 | static int __ufs_qcom_phy_init_vreg(struct phy *phy, | |
246 | struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional) | |
247 | { | |
248 | int err = 0; | |
249 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
250 | struct device *dev = ufs_qcom_phy->dev; | |
251 | ||
252 | char prop_name[MAX_PROP_NAME]; | |
253 | ||
254 | vreg->name = kstrdup(name, GFP_KERNEL); | |
255 | if (!vreg->name) { | |
256 | err = -ENOMEM; | |
257 | goto out; | |
258 | } | |
259 | ||
260 | vreg->reg = devm_regulator_get(dev, name); | |
261 | if (IS_ERR(vreg->reg)) { | |
262 | err = PTR_ERR(vreg->reg); | |
263 | vreg->reg = NULL; | |
264 | if (!optional) | |
265 | dev_err(dev, "failed to get %s, %d\n", name, err); | |
266 | goto out; | |
267 | } | |
268 | ||
269 | if (dev->of_node) { | |
270 | snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); | |
271 | err = of_property_read_u32(dev->of_node, | |
272 | prop_name, &vreg->max_uA); | |
273 | if (err && err != -EINVAL) { | |
274 | dev_err(dev, "%s: failed to read %s\n", | |
275 | __func__, prop_name); | |
276 | goto out; | |
277 | } else if (err == -EINVAL || !vreg->max_uA) { | |
278 | if (regulator_count_voltages(vreg->reg) > 0) { | |
279 | dev_err(dev, "%s: %s is mandatory\n", | |
280 | __func__, prop_name); | |
281 | goto out; | |
282 | } | |
283 | err = 0; | |
284 | } | |
285 | snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name); | |
286 | if (of_get_property(dev->of_node, prop_name, NULL)) | |
287 | vreg->is_always_on = true; | |
288 | else | |
289 | vreg->is_always_on = false; | |
290 | } | |
291 | ||
292 | if (!strcmp(name, "vdda-pll")) { | |
293 | vreg->max_uV = VDDA_PLL_MAX_UV; | |
294 | vreg->min_uV = VDDA_PLL_MIN_UV; | |
295 | } else if (!strcmp(name, "vdda-phy")) { | |
296 | vreg->max_uV = VDDA_PHY_MAX_UV; | |
297 | vreg->min_uV = VDDA_PHY_MIN_UV; | |
298 | } else if (!strcmp(name, "vddp-ref-clk")) { | |
299 | vreg->max_uV = VDDP_REF_CLK_MAX_UV; | |
300 | vreg->min_uV = VDDP_REF_CLK_MIN_UV; | |
301 | } | |
302 | ||
303 | out: | |
304 | if (err) | |
305 | kfree(vreg->name); | |
306 | return err; | |
307 | } | |
308 | ||
309 | static int ufs_qcom_phy_init_vreg(struct phy *phy, | |
310 | struct ufs_qcom_phy_vreg *vreg, const char *name) | |
311 | { | |
312 | return __ufs_qcom_phy_init_vreg(phy, vreg, name, false); | |
313 | } | |
314 | ||
315 | static | |
316 | int ufs_qcom_phy_cfg_vreg(struct phy *phy, | |
317 | struct ufs_qcom_phy_vreg *vreg, bool on) | |
318 | { | |
319 | int ret = 0; | |
320 | struct regulator *reg = vreg->reg; | |
321 | const char *name = vreg->name; | |
322 | int min_uV; | |
323 | int uA_load; | |
324 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
325 | struct device *dev = ufs_qcom_phy->dev; | |
326 | ||
327 | BUG_ON(!vreg); | |
328 | ||
329 | if (regulator_count_voltages(reg) > 0) { | |
330 | min_uV = on ? vreg->min_uV : 0; | |
331 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
332 | if (ret) { | |
333 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
334 | __func__, name, ret); | |
335 | goto out; | |
336 | } | |
337 | uA_load = on ? vreg->max_uA : 0; | |
7e476c7d | 338 | ret = regulator_set_load(reg, uA_load); |
adaafaa3 YG |
339 | if (ret >= 0) { |
340 | /* | |
7e476c7d | 341 | * regulator_set_load() returns new regulator |
adaafaa3 YG |
342 | * mode upon success. |
343 | */ | |
344 | ret = 0; | |
345 | } else { | |
346 | dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", | |
347 | __func__, name, uA_load, ret); | |
348 | goto out; | |
349 | } | |
350 | } | |
351 | out: | |
352 | return ret; | |
353 | } | |
354 | ||
355 | static | |
356 | int ufs_qcom_phy_enable_vreg(struct phy *phy, | |
357 | struct ufs_qcom_phy_vreg *vreg) | |
358 | { | |
359 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
360 | struct device *dev = ufs_qcom_phy->dev; | |
361 | int ret = 0; | |
362 | ||
363 | if (!vreg || vreg->enabled) | |
364 | goto out; | |
365 | ||
366 | ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true); | |
367 | if (ret) { | |
368 | dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", | |
369 | __func__, ret); | |
370 | goto out; | |
371 | } | |
372 | ||
373 | ret = regulator_enable(vreg->reg); | |
374 | if (ret) { | |
375 | dev_err(dev, "%s: enable failed, err=%d\n", | |
376 | __func__, ret); | |
377 | goto out; | |
378 | } | |
379 | ||
380 | vreg->enabled = true; | |
381 | out: | |
382 | return ret; | |
383 | } | |
384 | ||
385 | int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy) | |
386 | { | |
387 | int ret = 0; | |
388 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
389 | ||
390 | if (phy->is_ref_clk_enabled) | |
391 | goto out; | |
392 | ||
393 | /* | |
394 | * reference clock is propagated in a daisy-chained manner from | |
395 | * source to phy, so ungate them at each stage. | |
396 | */ | |
397 | ret = clk_prepare_enable(phy->ref_clk_src); | |
398 | if (ret) { | |
399 | dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", | |
400 | __func__, ret); | |
401 | goto out; | |
402 | } | |
403 | ||
404 | /* | |
405 | * "ref_clk_parent" is optional clock hence make sure that clk reference | |
406 | * is available before trying to enable the clock. | |
407 | */ | |
408 | if (phy->ref_clk_parent) { | |
409 | ret = clk_prepare_enable(phy->ref_clk_parent); | |
410 | if (ret) { | |
411 | dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", | |
412 | __func__, ret); | |
413 | goto out_disable_src; | |
414 | } | |
415 | } | |
416 | ||
417 | ret = clk_prepare_enable(phy->ref_clk); | |
418 | if (ret) { | |
419 | dev_err(phy->dev, "%s: ref_clk enable failed %d\n", | |
420 | __func__, ret); | |
421 | goto out_disable_parent; | |
422 | } | |
423 | ||
424 | phy->is_ref_clk_enabled = true; | |
425 | goto out; | |
426 | ||
427 | out_disable_parent: | |
428 | if (phy->ref_clk_parent) | |
429 | clk_disable_unprepare(phy->ref_clk_parent); | |
430 | out_disable_src: | |
431 | clk_disable_unprepare(phy->ref_clk_src); | |
432 | out: | |
433 | return ret; | |
434 | } | |
435 | ||
436 | static | |
437 | int ufs_qcom_phy_disable_vreg(struct phy *phy, | |
438 | struct ufs_qcom_phy_vreg *vreg) | |
439 | { | |
440 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | |
441 | struct device *dev = ufs_qcom_phy->dev; | |
442 | int ret = 0; | |
443 | ||
444 | if (!vreg || !vreg->enabled || vreg->is_always_on) | |
445 | goto out; | |
446 | ||
447 | ret = regulator_disable(vreg->reg); | |
448 | ||
449 | if (!ret) { | |
450 | /* ignore errors on applying disable config */ | |
451 | ufs_qcom_phy_cfg_vreg(phy, vreg, false); | |
452 | vreg->enabled = false; | |
453 | } else { | |
454 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
455 | __func__, vreg->name, ret); | |
456 | } | |
457 | out: | |
458 | return ret; | |
459 | } | |
460 | ||
461 | void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) | |
462 | { | |
463 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
464 | ||
465 | if (phy->is_ref_clk_enabled) { | |
466 | clk_disable_unprepare(phy->ref_clk); | |
467 | /* | |
468 | * "ref_clk_parent" is optional clock hence make sure that clk | |
469 | * reference is available before trying to disable the clock. | |
470 | */ | |
471 | if (phy->ref_clk_parent) | |
472 | clk_disable_unprepare(phy->ref_clk_parent); | |
473 | clk_disable_unprepare(phy->ref_clk_src); | |
474 | phy->is_ref_clk_enabled = false; | |
475 | } | |
476 | } | |
477 | ||
478 | #define UFS_REF_CLK_EN (1 << 5) | |
479 | ||
480 | static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) | |
481 | { | |
482 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
483 | ||
484 | if (phy->dev_ref_clk_ctrl_mmio && | |
485 | (enable ^ phy->is_dev_ref_clk_enabled)) { | |
486 | u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); | |
487 | ||
488 | if (enable) | |
489 | temp |= UFS_REF_CLK_EN; | |
490 | else | |
491 | temp &= ~UFS_REF_CLK_EN; | |
492 | ||
493 | /* | |
494 | * If we are here to disable this clock immediately after | |
495 | * entering into hibern8, we need to make sure that device | |
496 | * ref_clk is active atleast 1us after the hibern8 enter. | |
497 | */ | |
498 | if (!enable) | |
499 | udelay(1); | |
500 | ||
501 | writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); | |
502 | /* ensure that ref_clk is enabled/disabled before we return */ | |
503 | wmb(); | |
504 | /* | |
505 | * If we call hibern8 exit after this, we need to make sure that | |
506 | * device ref_clk is stable for atleast 1us before the hibern8 | |
507 | * exit command. | |
508 | */ | |
509 | if (enable) | |
510 | udelay(1); | |
511 | ||
512 | phy->is_dev_ref_clk_enabled = enable; | |
513 | } | |
514 | } | |
515 | ||
516 | void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) | |
517 | { | |
518 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); | |
519 | } | |
520 | ||
521 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) | |
522 | { | |
523 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); | |
524 | } | |
525 | ||
526 | /* Turn ON M-PHY RMMI interface clocks */ | |
527 | int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) | |
528 | { | |
529 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
530 | int ret = 0; | |
531 | ||
532 | if (phy->is_iface_clk_enabled) | |
533 | goto out; | |
534 | ||
535 | ret = clk_prepare_enable(phy->tx_iface_clk); | |
536 | if (ret) { | |
537 | dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", | |
538 | __func__, ret); | |
539 | goto out; | |
540 | } | |
541 | ret = clk_prepare_enable(phy->rx_iface_clk); | |
542 | if (ret) { | |
543 | clk_disable_unprepare(phy->tx_iface_clk); | |
544 | dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", | |
545 | __func__, ret); | |
546 | goto out; | |
547 | } | |
548 | phy->is_iface_clk_enabled = true; | |
549 | ||
550 | out: | |
551 | return ret; | |
552 | } | |
553 | ||
554 | /* Turn OFF M-PHY RMMI interface clocks */ | |
555 | void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) | |
556 | { | |
557 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
558 | ||
559 | if (phy->is_iface_clk_enabled) { | |
560 | clk_disable_unprepare(phy->tx_iface_clk); | |
561 | clk_disable_unprepare(phy->rx_iface_clk); | |
562 | phy->is_iface_clk_enabled = false; | |
563 | } | |
564 | } | |
565 | ||
566 | int ufs_qcom_phy_start_serdes(struct phy *generic_phy) | |
567 | { | |
568 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
569 | int ret = 0; | |
570 | ||
571 | if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { | |
572 | dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", | |
573 | __func__); | |
574 | ret = -ENOTSUPP; | |
575 | } else { | |
576 | ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); | |
577 | } | |
578 | ||
579 | return ret; | |
580 | } | |
581 | ||
582 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | |
583 | { | |
584 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
585 | int ret = 0; | |
586 | ||
587 | if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { | |
588 | dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", | |
589 | __func__); | |
590 | ret = -ENOTSUPP; | |
591 | } else { | |
592 | ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, | |
593 | tx_lanes); | |
594 | } | |
595 | ||
596 | return ret; | |
597 | } | |
598 | ||
599 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | |
600 | u8 major, u16 minor, u16 step) | |
601 | { | |
602 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
603 | ||
604 | ufs_qcom_phy->host_ctrl_rev_major = major; | |
605 | ufs_qcom_phy->host_ctrl_rev_minor = minor; | |
606 | ufs_qcom_phy->host_ctrl_rev_step = step; | |
607 | } | |
608 | ||
609 | int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) | |
610 | { | |
611 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
612 | int ret = 0; | |
613 | ||
614 | if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) { | |
615 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n", | |
616 | __func__); | |
617 | ret = -ENOTSUPP; | |
618 | } else { | |
619 | ret = ufs_qcom_phy->phy_spec_ops-> | |
620 | calibrate_phy(ufs_qcom_phy, is_rate_B); | |
621 | if (ret) | |
622 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n", | |
623 | __func__, ret); | |
624 | } | |
625 | ||
626 | return ret; | |
627 | } | |
628 | ||
629 | int ufs_qcom_phy_remove(struct phy *generic_phy, | |
630 | struct ufs_qcom_phy *ufs_qcom_phy) | |
631 | { | |
632 | phy_power_off(generic_phy); | |
633 | ||
634 | kfree(ufs_qcom_phy->vdda_pll.name); | |
635 | kfree(ufs_qcom_phy->vdda_phy.name); | |
636 | ||
637 | return 0; | |
638 | } | |
358d6c87 | 639 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove); |
adaafaa3 YG |
640 | |
641 | int ufs_qcom_phy_exit(struct phy *generic_phy) | |
642 | { | |
643 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
644 | ||
645 | if (ufs_qcom_phy->is_powered_on) | |
646 | phy_power_off(generic_phy); | |
647 | ||
648 | return 0; | |
649 | } | |
358d6c87 | 650 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit); |
adaafaa3 YG |
651 | |
652 | int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) | |
653 | { | |
654 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
655 | ||
656 | if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { | |
657 | dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", | |
658 | __func__); | |
659 | return -ENOTSUPP; | |
660 | } | |
661 | ||
662 | return ufs_qcom_phy->phy_spec_ops-> | |
663 | is_physical_coding_sublayer_ready(ufs_qcom_phy); | |
664 | } | |
665 | ||
666 | int ufs_qcom_phy_power_on(struct phy *generic_phy) | |
667 | { | |
668 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
669 | struct device *dev = phy_common->dev; | |
670 | int err; | |
671 | ||
672 | err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy); | |
673 | if (err) { | |
674 | dev_err(dev, "%s enable vdda_phy failed, err=%d\n", | |
675 | __func__, err); | |
676 | goto out; | |
677 | } | |
678 | ||
679 | phy_common->phy_spec_ops->power_control(phy_common, true); | |
680 | ||
681 | /* vdda_pll also enables ref clock LDOs so enable it first */ | |
682 | err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll); | |
683 | if (err) { | |
684 | dev_err(dev, "%s enable vdda_pll failed, err=%d\n", | |
685 | __func__, err); | |
686 | goto out_disable_phy; | |
687 | } | |
688 | ||
689 | err = ufs_qcom_phy_enable_ref_clk(generic_phy); | |
690 | if (err) { | |
691 | dev_err(dev, "%s enable phy ref clock failed, err=%d\n", | |
692 | __func__, err); | |
693 | goto out_disable_pll; | |
694 | } | |
695 | ||
696 | /* enable device PHY ref_clk pad rail */ | |
697 | if (phy_common->vddp_ref_clk.reg) { | |
698 | err = ufs_qcom_phy_enable_vreg(generic_phy, | |
699 | &phy_common->vddp_ref_clk); | |
700 | if (err) { | |
701 | dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", | |
702 | __func__, err); | |
703 | goto out_disable_ref_clk; | |
704 | } | |
705 | } | |
706 | ||
707 | phy_common->is_powered_on = true; | |
708 | goto out; | |
709 | ||
710 | out_disable_ref_clk: | |
711 | ufs_qcom_phy_disable_ref_clk(generic_phy); | |
712 | out_disable_pll: | |
713 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); | |
714 | out_disable_phy: | |
715 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); | |
716 | out: | |
717 | return err; | |
718 | } | |
358d6c87 | 719 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on); |
adaafaa3 YG |
720 | |
721 | int ufs_qcom_phy_power_off(struct phy *generic_phy) | |
722 | { | |
723 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
724 | ||
725 | phy_common->phy_spec_ops->power_control(phy_common, false); | |
726 | ||
727 | if (phy_common->vddp_ref_clk.reg) | |
728 | ufs_qcom_phy_disable_vreg(generic_phy, | |
729 | &phy_common->vddp_ref_clk); | |
730 | ufs_qcom_phy_disable_ref_clk(generic_phy); | |
731 | ||
732 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); | |
733 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); | |
734 | phy_common->is_powered_on = false; | |
735 | ||
736 | return 0; | |
737 | } | |
358d6c87 | 738 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); |