]>
Commit | Line | Data |
---|---|---|
adaafaa3 YG |
1 | /* |
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "phy-qcom-ufs-i.h" | |
16 | ||
17 | #define MAX_PROP_NAME 32 | |
18 | #define VDDA_PHY_MIN_UV 1000000 | |
19 | #define VDDA_PHY_MAX_UV 1000000 | |
20 | #define VDDA_PLL_MIN_UV 1800000 | |
21 | #define VDDA_PLL_MAX_UV 1800000 | |
22 | #define VDDP_REF_CLK_MIN_UV 1200000 | |
23 | #define VDDP_REF_CLK_MAX_UV 1200000 | |
24 | ||
adaafaa3 YG |
25 | int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, |
26 | struct ufs_qcom_phy_calibration *tbl_A, | |
27 | int tbl_size_A, | |
28 | struct ufs_qcom_phy_calibration *tbl_B, | |
29 | int tbl_size_B, bool is_rate_B) | |
30 | { | |
31 | int i; | |
32 | int ret = 0; | |
33 | ||
34 | if (!tbl_A) { | |
35 | dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); | |
36 | ret = EINVAL; | |
37 | goto out; | |
38 | } | |
39 | ||
40 | for (i = 0; i < tbl_size_A; i++) | |
41 | writel_relaxed(tbl_A[i].cfg_value, | |
42 | ufs_qcom_phy->mmio + tbl_A[i].reg_offset); | |
43 | ||
44 | /* | |
45 | * In case we would like to work in rate B, we need | |
46 | * to override a registers that were configured in rate A table | |
47 | * with registers of rate B table. | |
48 | * table. | |
49 | */ | |
50 | if (is_rate_B) { | |
51 | if (!tbl_B) { | |
52 | dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", | |
53 | __func__); | |
54 | ret = EINVAL; | |
55 | goto out; | |
56 | } | |
57 | ||
58 | for (i = 0; i < tbl_size_B; i++) | |
59 | writel_relaxed(tbl_B[i].cfg_value, | |
60 | ufs_qcom_phy->mmio + tbl_B[i].reg_offset); | |
61 | } | |
62 | ||
63 | /* flush buffered writes */ | |
64 | mb(); | |
65 | ||
66 | out: | |
67 | return ret; | |
68 | } | |
358d6c87 | 69 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); |
adaafaa3 | 70 | |
adaafaa3 YG |
71 | /* |
72 | * This assumes the embedded phy structure inside generic_phy is of type | |
73 | * struct ufs_qcom_phy. In order to function properly it's crucial | |
74 | * to keep the embedded struct "struct ufs_qcom_phy common_cfg" | |
75 | * as the first inside generic_phy. | |
76 | */ | |
77 | struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) | |
78 | { | |
79 | return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); | |
80 | } | |
358d6c87 | 81 | EXPORT_SYMBOL_GPL(get_ufs_qcom_phy); |
adaafaa3 YG |
82 | |
83 | static | |
84 | int ufs_qcom_phy_base_init(struct platform_device *pdev, | |
85 | struct ufs_qcom_phy *phy_common) | |
86 | { | |
87 | struct device *dev = &pdev->dev; | |
88 | struct resource *res; | |
89 | int err = 0; | |
90 | ||
91 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); | |
adaafaa3 YG |
92 | phy_common->mmio = devm_ioremap_resource(dev, res); |
93 | if (IS_ERR((void const *)phy_common->mmio)) { | |
94 | err = PTR_ERR((void const *)phy_common->mmio); | |
95 | phy_common->mmio = NULL; | |
96 | dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", | |
97 | __func__, err); | |
52ea796b | 98 | return err; |
adaafaa3 YG |
99 | } |
100 | ||
101 | /* "dev_ref_clk_ctrl_mem" is optional resource */ | |
102 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
103 | "dev_ref_clk_ctrl_mem"); | |
adaafaa3 | 104 | phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); |
52ea796b | 105 | if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) |
adaafaa3 | 106 | phy_common->dev_ref_clk_ctrl_mmio = NULL; |
adaafaa3 | 107 | |
52ea796b | 108 | return 0; |
adaafaa3 YG |
109 | } |
110 | ||
15887cb8 VG |
111 | struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, |
112 | struct ufs_qcom_phy *common_cfg, | |
113 | const struct phy_ops *ufs_qcom_phy_gen_ops, | |
114 | struct ufs_qcom_phy_specific_ops *phy_spec_ops) | |
115 | { | |
116 | int err; | |
117 | struct device *dev = &pdev->dev; | |
118 | struct phy *generic_phy = NULL; | |
119 | struct phy_provider *phy_provider; | |
120 | ||
121 | err = ufs_qcom_phy_base_init(pdev, common_cfg); | |
122 | if (err) { | |
123 | dev_err(dev, "%s: phy base init failed %d\n", __func__, err); | |
124 | goto out; | |
125 | } | |
126 | ||
127 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | |
128 | if (IS_ERR(phy_provider)) { | |
129 | err = PTR_ERR(phy_provider); | |
130 | dev_err(dev, "%s: failed to register phy %d\n", __func__, err); | |
131 | goto out; | |
132 | } | |
133 | ||
134 | generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); | |
135 | if (IS_ERR(generic_phy)) { | |
136 | err = PTR_ERR(generic_phy); | |
137 | dev_err(dev, "%s: failed to create phy %d\n", __func__, err); | |
138 | generic_phy = NULL; | |
139 | goto out; | |
140 | } | |
141 | ||
142 | common_cfg->phy_spec_ops = phy_spec_ops; | |
143 | common_cfg->dev = dev; | |
144 | ||
145 | out: | |
146 | return generic_phy; | |
147 | } | |
148 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); | |
149 | ||
89bd296b | 150 | static int __ufs_qcom_phy_clk_get(struct device *dev, |
adaafaa3 YG |
151 | const char *name, struct clk **clk_out, bool err_print) |
152 | { | |
153 | struct clk *clk; | |
154 | int err = 0; | |
adaafaa3 YG |
155 | |
156 | clk = devm_clk_get(dev, name); | |
157 | if (IS_ERR(clk)) { | |
158 | err = PTR_ERR(clk); | |
159 | if (err_print) | |
160 | dev_err(dev, "failed to get %s err %d", name, err); | |
161 | } else { | |
162 | *clk_out = clk; | |
163 | } | |
164 | ||
165 | return err; | |
166 | } | |
167 | ||
89bd296b | 168 | static int ufs_qcom_phy_clk_get(struct device *dev, |
adaafaa3 YG |
169 | const char *name, struct clk **clk_out) |
170 | { | |
89bd296b | 171 | return __ufs_qcom_phy_clk_get(dev, name, clk_out, true); |
adaafaa3 YG |
172 | } |
173 | ||
89bd296b | 174 | int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common) |
adaafaa3 YG |
175 | { |
176 | int err; | |
177 | ||
300f9677 VG |
178 | if (of_device_is_compatible(phy_common->dev->of_node, |
179 | "qcom,msm8996-ufs-phy-qmp-14nm")) | |
180 | goto skip_txrx_clk; | |
181 | ||
89bd296b | 182 | err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk", |
adaafaa3 YG |
183 | &phy_common->tx_iface_clk); |
184 | if (err) | |
185 | goto out; | |
186 | ||
89bd296b | 187 | err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk", |
adaafaa3 YG |
188 | &phy_common->rx_iface_clk); |
189 | if (err) | |
190 | goto out; | |
191 | ||
89bd296b | 192 | err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src", |
adaafaa3 YG |
193 | &phy_common->ref_clk_src); |
194 | if (err) | |
195 | goto out; | |
196 | ||
300f9677 | 197 | skip_txrx_clk: |
adaafaa3 YG |
198 | /* |
199 | * "ref_clk_parent" is optional hence don't abort init if it's not | |
200 | * found. | |
201 | */ | |
89bd296b | 202 | __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent", |
adaafaa3 YG |
203 | &phy_common->ref_clk_parent, false); |
204 | ||
89bd296b | 205 | err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk", |
adaafaa3 YG |
206 | &phy_common->ref_clk); |
207 | ||
208 | out: | |
209 | return err; | |
210 | } | |
358d6c87 | 211 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); |
adaafaa3 | 212 | |
89bd296b | 213 | static int __ufs_qcom_phy_init_vreg(struct device *dev, |
adaafaa3 YG |
214 | struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional) |
215 | { | |
216 | int err = 0; | |
adaafaa3 YG |
217 | |
218 | char prop_name[MAX_PROP_NAME]; | |
219 | ||
add78fc0 | 220 | vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); |
adaafaa3 YG |
221 | if (!vreg->name) { |
222 | err = -ENOMEM; | |
223 | goto out; | |
224 | } | |
225 | ||
226 | vreg->reg = devm_regulator_get(dev, name); | |
227 | if (IS_ERR(vreg->reg)) { | |
228 | err = PTR_ERR(vreg->reg); | |
229 | vreg->reg = NULL; | |
230 | if (!optional) | |
231 | dev_err(dev, "failed to get %s, %d\n", name, err); | |
232 | goto out; | |
233 | } | |
234 | ||
235 | if (dev->of_node) { | |
236 | snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); | |
237 | err = of_property_read_u32(dev->of_node, | |
238 | prop_name, &vreg->max_uA); | |
239 | if (err && err != -EINVAL) { | |
240 | dev_err(dev, "%s: failed to read %s\n", | |
241 | __func__, prop_name); | |
242 | goto out; | |
243 | } else if (err == -EINVAL || !vreg->max_uA) { | |
244 | if (regulator_count_voltages(vreg->reg) > 0) { | |
245 | dev_err(dev, "%s: %s is mandatory\n", | |
246 | __func__, prop_name); | |
247 | goto out; | |
248 | } | |
249 | err = 0; | |
250 | } | |
251 | snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name); | |
3ea981ed JL |
252 | vreg->is_always_on = of_property_read_bool(dev->of_node, |
253 | prop_name); | |
adaafaa3 YG |
254 | } |
255 | ||
256 | if (!strcmp(name, "vdda-pll")) { | |
257 | vreg->max_uV = VDDA_PLL_MAX_UV; | |
258 | vreg->min_uV = VDDA_PLL_MIN_UV; | |
259 | } else if (!strcmp(name, "vdda-phy")) { | |
260 | vreg->max_uV = VDDA_PHY_MAX_UV; | |
261 | vreg->min_uV = VDDA_PHY_MIN_UV; | |
262 | } else if (!strcmp(name, "vddp-ref-clk")) { | |
263 | vreg->max_uV = VDDP_REF_CLK_MAX_UV; | |
264 | vreg->min_uV = VDDP_REF_CLK_MIN_UV; | |
265 | } | |
266 | ||
267 | out: | |
268 | if (err) | |
269 | kfree(vreg->name); | |
270 | return err; | |
271 | } | |
272 | ||
89bd296b | 273 | static int ufs_qcom_phy_init_vreg(struct device *dev, |
adaafaa3 YG |
274 | struct ufs_qcom_phy_vreg *vreg, const char *name) |
275 | { | |
89bd296b | 276 | return __ufs_qcom_phy_init_vreg(dev, vreg, name, false); |
adaafaa3 YG |
277 | } |
278 | ||
15887cb8 VG |
279 | int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common) |
280 | { | |
281 | int err; | |
282 | ||
283 | err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll, | |
284 | "vdda-pll"); | |
285 | if (err) | |
286 | goto out; | |
287 | ||
288 | err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy, | |
289 | "vdda-phy"); | |
290 | ||
291 | if (err) | |
292 | goto out; | |
293 | ||
294 | /* vddp-ref-clk-* properties are optional */ | |
295 | __ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk, | |
296 | "vddp-ref-clk", true); | |
297 | out: | |
298 | return err; | |
299 | } | |
300 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); | |
301 | ||
89bd296b | 302 | static int ufs_qcom_phy_cfg_vreg(struct device *dev, |
adaafaa3 YG |
303 | struct ufs_qcom_phy_vreg *vreg, bool on) |
304 | { | |
305 | int ret = 0; | |
306 | struct regulator *reg = vreg->reg; | |
307 | const char *name = vreg->name; | |
308 | int min_uV; | |
309 | int uA_load; | |
adaafaa3 | 310 | |
adaafaa3 YG |
311 | if (regulator_count_voltages(reg) > 0) { |
312 | min_uV = on ? vreg->min_uV : 0; | |
313 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
314 | if (ret) { | |
315 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
316 | __func__, name, ret); | |
317 | goto out; | |
318 | } | |
319 | uA_load = on ? vreg->max_uA : 0; | |
7e476c7d | 320 | ret = regulator_set_load(reg, uA_load); |
adaafaa3 YG |
321 | if (ret >= 0) { |
322 | /* | |
7e476c7d | 323 | * regulator_set_load() returns new regulator |
adaafaa3 YG |
324 | * mode upon success. |
325 | */ | |
326 | ret = 0; | |
327 | } else { | |
328 | dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", | |
329 | __func__, name, uA_load, ret); | |
330 | goto out; | |
331 | } | |
332 | } | |
333 | out: | |
334 | return ret; | |
335 | } | |
336 | ||
89bd296b | 337 | static int ufs_qcom_phy_enable_vreg(struct device *dev, |
adaafaa3 YG |
338 | struct ufs_qcom_phy_vreg *vreg) |
339 | { | |
adaafaa3 YG |
340 | int ret = 0; |
341 | ||
342 | if (!vreg || vreg->enabled) | |
343 | goto out; | |
344 | ||
89bd296b | 345 | ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true); |
adaafaa3 YG |
346 | if (ret) { |
347 | dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", | |
348 | __func__, ret); | |
349 | goto out; | |
350 | } | |
351 | ||
352 | ret = regulator_enable(vreg->reg); | |
353 | if (ret) { | |
354 | dev_err(dev, "%s: enable failed, err=%d\n", | |
355 | __func__, ret); | |
356 | goto out; | |
357 | } | |
358 | ||
359 | vreg->enabled = true; | |
360 | out: | |
361 | return ret; | |
362 | } | |
363 | ||
feb3d798 | 364 | static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy) |
adaafaa3 YG |
365 | { |
366 | int ret = 0; | |
adaafaa3 YG |
367 | |
368 | if (phy->is_ref_clk_enabled) | |
369 | goto out; | |
370 | ||
371 | /* | |
372 | * reference clock is propagated in a daisy-chained manner from | |
373 | * source to phy, so ungate them at each stage. | |
374 | */ | |
375 | ret = clk_prepare_enable(phy->ref_clk_src); | |
376 | if (ret) { | |
377 | dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", | |
378 | __func__, ret); | |
379 | goto out; | |
380 | } | |
381 | ||
382 | /* | |
383 | * "ref_clk_parent" is optional clock hence make sure that clk reference | |
384 | * is available before trying to enable the clock. | |
385 | */ | |
386 | if (phy->ref_clk_parent) { | |
387 | ret = clk_prepare_enable(phy->ref_clk_parent); | |
388 | if (ret) { | |
389 | dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", | |
390 | __func__, ret); | |
391 | goto out_disable_src; | |
392 | } | |
393 | } | |
394 | ||
395 | ret = clk_prepare_enable(phy->ref_clk); | |
396 | if (ret) { | |
397 | dev_err(phy->dev, "%s: ref_clk enable failed %d\n", | |
398 | __func__, ret); | |
399 | goto out_disable_parent; | |
400 | } | |
401 | ||
402 | phy->is_ref_clk_enabled = true; | |
403 | goto out; | |
404 | ||
405 | out_disable_parent: | |
406 | if (phy->ref_clk_parent) | |
407 | clk_disable_unprepare(phy->ref_clk_parent); | |
408 | out_disable_src: | |
409 | clk_disable_unprepare(phy->ref_clk_src); | |
410 | out: | |
411 | return ret; | |
412 | } | |
413 | ||
89bd296b | 414 | static int ufs_qcom_phy_disable_vreg(struct device *dev, |
adaafaa3 YG |
415 | struct ufs_qcom_phy_vreg *vreg) |
416 | { | |
adaafaa3 YG |
417 | int ret = 0; |
418 | ||
419 | if (!vreg || !vreg->enabled || vreg->is_always_on) | |
420 | goto out; | |
421 | ||
422 | ret = regulator_disable(vreg->reg); | |
423 | ||
424 | if (!ret) { | |
425 | /* ignore errors on applying disable config */ | |
89bd296b | 426 | ufs_qcom_phy_cfg_vreg(dev, vreg, false); |
adaafaa3 YG |
427 | vreg->enabled = false; |
428 | } else { | |
429 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
430 | __func__, vreg->name, ret); | |
431 | } | |
432 | out: | |
433 | return ret; | |
434 | } | |
435 | ||
feb3d798 | 436 | static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy) |
adaafaa3 | 437 | { |
adaafaa3 YG |
438 | if (phy->is_ref_clk_enabled) { |
439 | clk_disable_unprepare(phy->ref_clk); | |
440 | /* | |
441 | * "ref_clk_parent" is optional clock hence make sure that clk | |
442 | * reference is available before trying to disable the clock. | |
443 | */ | |
444 | if (phy->ref_clk_parent) | |
445 | clk_disable_unprepare(phy->ref_clk_parent); | |
446 | clk_disable_unprepare(phy->ref_clk_src); | |
447 | phy->is_ref_clk_enabled = false; | |
448 | } | |
449 | } | |
450 | ||
451 | #define UFS_REF_CLK_EN (1 << 5) | |
452 | ||
453 | static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) | |
454 | { | |
455 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
456 | ||
457 | if (phy->dev_ref_clk_ctrl_mmio && | |
458 | (enable ^ phy->is_dev_ref_clk_enabled)) { | |
459 | u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); | |
460 | ||
461 | if (enable) | |
462 | temp |= UFS_REF_CLK_EN; | |
463 | else | |
464 | temp &= ~UFS_REF_CLK_EN; | |
465 | ||
466 | /* | |
467 | * If we are here to disable this clock immediately after | |
468 | * entering into hibern8, we need to make sure that device | |
469 | * ref_clk is active atleast 1us after the hibern8 enter. | |
470 | */ | |
471 | if (!enable) | |
472 | udelay(1); | |
473 | ||
474 | writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); | |
475 | /* ensure that ref_clk is enabled/disabled before we return */ | |
476 | wmb(); | |
477 | /* | |
478 | * If we call hibern8 exit after this, we need to make sure that | |
479 | * device ref_clk is stable for atleast 1us before the hibern8 | |
480 | * exit command. | |
481 | */ | |
482 | if (enable) | |
483 | udelay(1); | |
484 | ||
485 | phy->is_dev_ref_clk_enabled = enable; | |
486 | } | |
487 | } | |
488 | ||
489 | void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) | |
490 | { | |
491 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); | |
492 | } | |
65d49b3d | 493 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); |
adaafaa3 YG |
494 | |
495 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) | |
496 | { | |
497 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); | |
498 | } | |
65d49b3d | 499 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); |
adaafaa3 YG |
500 | |
501 | /* Turn ON M-PHY RMMI interface clocks */ | |
feb3d798 | 502 | static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy) |
adaafaa3 | 503 | { |
adaafaa3 YG |
504 | int ret = 0; |
505 | ||
506 | if (phy->is_iface_clk_enabled) | |
507 | goto out; | |
508 | ||
509 | ret = clk_prepare_enable(phy->tx_iface_clk); | |
510 | if (ret) { | |
511 | dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", | |
512 | __func__, ret); | |
513 | goto out; | |
514 | } | |
515 | ret = clk_prepare_enable(phy->rx_iface_clk); | |
516 | if (ret) { | |
517 | clk_disable_unprepare(phy->tx_iface_clk); | |
518 | dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", | |
519 | __func__, ret); | |
520 | goto out; | |
521 | } | |
522 | phy->is_iface_clk_enabled = true; | |
523 | ||
524 | out: | |
525 | return ret; | |
526 | } | |
527 | ||
528 | /* Turn OFF M-PHY RMMI interface clocks */ | |
feb3d798 | 529 | void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy) |
adaafaa3 | 530 | { |
adaafaa3 YG |
531 | if (phy->is_iface_clk_enabled) { |
532 | clk_disable_unprepare(phy->tx_iface_clk); | |
533 | clk_disable_unprepare(phy->rx_iface_clk); | |
534 | phy->is_iface_clk_enabled = false; | |
535 | } | |
536 | } | |
537 | ||
538 | int ufs_qcom_phy_start_serdes(struct phy *generic_phy) | |
539 | { | |
540 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
541 | int ret = 0; | |
542 | ||
543 | if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { | |
544 | dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", | |
545 | __func__); | |
546 | ret = -ENOTSUPP; | |
547 | } else { | |
548 | ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); | |
549 | } | |
550 | ||
551 | return ret; | |
552 | } | |
65d49b3d | 553 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); |
adaafaa3 YG |
554 | |
555 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | |
556 | { | |
557 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
558 | int ret = 0; | |
559 | ||
560 | if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { | |
561 | dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", | |
562 | __func__); | |
563 | ret = -ENOTSUPP; | |
564 | } else { | |
565 | ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, | |
566 | tx_lanes); | |
567 | } | |
568 | ||
569 | return ret; | |
570 | } | |
65d49b3d | 571 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); |
adaafaa3 YG |
572 | |
573 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | |
574 | u8 major, u16 minor, u16 step) | |
575 | { | |
576 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
577 | ||
578 | ufs_qcom_phy->host_ctrl_rev_major = major; | |
579 | ufs_qcom_phy->host_ctrl_rev_minor = minor; | |
580 | ufs_qcom_phy->host_ctrl_rev_step = step; | |
581 | } | |
65d49b3d | 582 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); |
adaafaa3 YG |
583 | |
584 | int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) | |
585 | { | |
586 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
587 | int ret = 0; | |
588 | ||
589 | if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) { | |
590 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n", | |
591 | __func__); | |
592 | ret = -ENOTSUPP; | |
593 | } else { | |
594 | ret = ufs_qcom_phy->phy_spec_ops-> | |
595 | calibrate_phy(ufs_qcom_phy, is_rate_B); | |
596 | if (ret) | |
597 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n", | |
598 | __func__, ret); | |
599 | } | |
600 | ||
601 | return ret; | |
602 | } | |
65d49b3d | 603 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); |
adaafaa3 | 604 | |
adaafaa3 YG |
605 | int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) |
606 | { | |
607 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
608 | ||
609 | if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { | |
610 | dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", | |
611 | __func__); | |
612 | return -ENOTSUPP; | |
613 | } | |
614 | ||
615 | return ufs_qcom_phy->phy_spec_ops-> | |
616 | is_physical_coding_sublayer_ready(ufs_qcom_phy); | |
617 | } | |
65d49b3d | 618 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); |
adaafaa3 YG |
619 | |
620 | int ufs_qcom_phy_power_on(struct phy *generic_phy) | |
621 | { | |
622 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
623 | struct device *dev = phy_common->dev; | |
624 | int err; | |
625 | ||
3d4640f1 VG |
626 | if (phy_common->is_powered_on) |
627 | return 0; | |
628 | ||
89bd296b | 629 | err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy); |
adaafaa3 YG |
630 | if (err) { |
631 | dev_err(dev, "%s enable vdda_phy failed, err=%d\n", | |
632 | __func__, err); | |
633 | goto out; | |
634 | } | |
635 | ||
636 | phy_common->phy_spec_ops->power_control(phy_common, true); | |
637 | ||
638 | /* vdda_pll also enables ref clock LDOs so enable it first */ | |
89bd296b | 639 | err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll); |
adaafaa3 YG |
640 | if (err) { |
641 | dev_err(dev, "%s enable vdda_pll failed, err=%d\n", | |
642 | __func__, err); | |
643 | goto out_disable_phy; | |
644 | } | |
645 | ||
feb3d798 | 646 | err = ufs_qcom_phy_enable_iface_clk(phy_common); |
adaafaa3 | 647 | if (err) { |
feb3d798 | 648 | dev_err(dev, "%s enable phy iface clock failed, err=%d\n", |
adaafaa3 YG |
649 | __func__, err); |
650 | goto out_disable_pll; | |
651 | } | |
652 | ||
feb3d798 VG |
653 | err = ufs_qcom_phy_enable_ref_clk(phy_common); |
654 | if (err) { | |
655 | dev_err(dev, "%s enable phy ref clock failed, err=%d\n", | |
656 | __func__, err); | |
657 | goto out_disable_iface_clk; | |
658 | } | |
659 | ||
adaafaa3 YG |
660 | /* enable device PHY ref_clk pad rail */ |
661 | if (phy_common->vddp_ref_clk.reg) { | |
89bd296b | 662 | err = ufs_qcom_phy_enable_vreg(dev, |
adaafaa3 YG |
663 | &phy_common->vddp_ref_clk); |
664 | if (err) { | |
665 | dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", | |
666 | __func__, err); | |
667 | goto out_disable_ref_clk; | |
668 | } | |
669 | } | |
670 | ||
671 | phy_common->is_powered_on = true; | |
672 | goto out; | |
673 | ||
674 | out_disable_ref_clk: | |
feb3d798 VG |
675 | ufs_qcom_phy_disable_ref_clk(phy_common); |
676 | out_disable_iface_clk: | |
677 | ufs_qcom_phy_disable_iface_clk(phy_common); | |
adaafaa3 | 678 | out_disable_pll: |
89bd296b | 679 | ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll); |
adaafaa3 | 680 | out_disable_phy: |
89bd296b | 681 | ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy); |
adaafaa3 YG |
682 | out: |
683 | return err; | |
684 | } | |
358d6c87 | 685 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on); |
adaafaa3 YG |
686 | |
687 | int ufs_qcom_phy_power_off(struct phy *generic_phy) | |
688 | { | |
689 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
690 | ||
3d4640f1 VG |
691 | if (!phy_common->is_powered_on) |
692 | return 0; | |
693 | ||
adaafaa3 YG |
694 | phy_common->phy_spec_ops->power_control(phy_common, false); |
695 | ||
696 | if (phy_common->vddp_ref_clk.reg) | |
89bd296b | 697 | ufs_qcom_phy_disable_vreg(phy_common->dev, |
adaafaa3 | 698 | &phy_common->vddp_ref_clk); |
feb3d798 VG |
699 | ufs_qcom_phy_disable_ref_clk(phy_common); |
700 | ufs_qcom_phy_disable_iface_clk(phy_common); | |
adaafaa3 | 701 | |
89bd296b VG |
702 | ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll); |
703 | ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy); | |
adaafaa3 YG |
704 | phy_common->is_powered_on = false; |
705 | ||
706 | return 0; | |
707 | } | |
358d6c87 | 708 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); |