aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/phy
diff options
context:
space:
mode:
authorYaniv Gardi <ygardi@codeaurora.org>2015-01-15 09:32:35 -0500
committerChristoph Hellwig <hch@lst.de>2015-01-20 13:43:24 -0500
commitadaafaa393ef1900e23f7708e29d023f721c54b3 (patch)
treea665f01e63c6724b31209f2bc72dfcc64f24951d /drivers/phy
parentc7e09574d620246347a5fb87118e624d69c27f41 (diff)
phy: qcom-ufs: add support for QUALCOMM Technologies UFS PHY drivers
This change adds a generic and common API support for ufs phy QUALCOMM Technologies. This support provides common code and also points to specific phy callbacks to differentiate between different behaviors of frequent use-cases (like power on, power off, phy calibration etc). Signed-off-by: Yaniv Gardi <ygardi@codeaurora.org> Reviewed-by: Dov Levenglick <dovl@codeaurora.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/phy')
-rw-r--r--drivers/phy/Kconfig7
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h118
-rw-r--r--drivers/phy/phy-qcom-ufs.c745
4 files changed, 871 insertions, 0 deletions
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index ccad8809ecb1..26a7623e551e 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -277,4 +277,11 @@ config PHY_STIH41X_USB
277 Enable this to support the USB transceiver that is part of 277 Enable this to support the USB transceiver that is part of
278 STMicroelectronics STiH41x SoC series. 278 STMicroelectronics STiH41x SoC series.
279 279
280config PHY_QCOM_UFS
281 tristate "Qualcomm UFS PHY driver"
282 depends on OF && ARCH_MSM
283 select GENERIC_PHY
284 help
285 Support for UFS PHY on QCOM chipsets.
286
280endmenu 287endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index aa74f961e44e..335965d3a9b7 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
34obj-$(CONFIG_PHY_XGENE) += phy-xgene.o 34obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
35obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o 35obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o
36obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o 36obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o
37obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
new file mode 100644
index 000000000000..dac200f4d639
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef UFS_QCOM_PHY_I_H_
16#define UFS_QCOM_PHY_I_H_
17
18#include <linux/clk.h>
19#include <linux/slab.h>
20#include <linux/phy/phy.h>
21#include <linux/platform_device.h>
22#include <linux/io.h>
23#include <linux/delay.h>
24
25#define UFS_QCOM_PHY_NAME_LEN 30
26
27struct ufs_qcom_phy_calibration {
28 u32 reg_offset;
29 u32 cfg_value;
30};
31
32struct ufs_qcom_phy_vreg {
33 const char *name;
34 struct regulator *reg;
35 int max_uA;
36 int min_uV;
37 int max_uV;
38 bool enabled;
39 bool is_always_on;
40};
41
42struct ufs_qcom_phy {
43 struct list_head list;
44 struct device *dev;
45 void __iomem *mmio;
46 void __iomem *dev_ref_clk_ctrl_mmio;
47 struct clk *tx_iface_clk;
48 struct clk *rx_iface_clk;
49 bool is_iface_clk_enabled;
50 struct clk *ref_clk_src;
51 struct clk *ref_clk_parent;
52 struct clk *ref_clk;
53 bool is_ref_clk_enabled;
54 bool is_dev_ref_clk_enabled;
55 struct ufs_qcom_phy_vreg vdda_pll;
56 struct ufs_qcom_phy_vreg vdda_phy;
57 struct ufs_qcom_phy_vreg vddp_ref_clk;
58 unsigned int quirks;
59
60 /*
61 * If UFS link is put into Hibern8 and if UFS PHY analog hardware is
62 * power collapsed (by clearing UFS_PHY_POWER_DOWN_CONTROL), Hibern8
63 * exit might fail even after powering on UFS PHY analog hardware.
64 * Enabling this quirk will help to solve above issue by doing
65 * custom PHY settings just before PHY analog power collapse.
66 */
67 #define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE BIT(0)
68
69 u8 host_ctrl_rev_major;
70 u16 host_ctrl_rev_minor;
71 u16 host_ctrl_rev_step;
72
73 char name[UFS_QCOM_PHY_NAME_LEN];
74 struct ufs_qcom_phy_calibration *cached_regs;
75 int cached_regs_table_size;
76 bool is_powered_on;
77 struct ufs_qcom_phy_specific_ops *phy_spec_ops;
78};
79
80/**
81 * struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
82 * specific implementation per phy. Each UFS phy, should implement
83 * those functions according to its spec and requirements
84 * @calibrate_phy: pointer to a function that calibrate the phy
85 * @start_serdes: pointer to a function that starts the serdes
86 * @is_physical_coding_sublayer_ready: pointer to a function that
87 * checks pcs readiness. returns 0 for success and non-zero for error.
88 * @set_tx_lane_enable: pointer to a function that enable tx lanes
89 * @power_control: pointer to a function that controls analog rail of phy
90 * and writes to QSERDES_RX_SIGDET_CNTRL attribute
91 */
92struct ufs_qcom_phy_specific_ops {
93 int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
94 void (*start_serdes)(struct ufs_qcom_phy *phy);
95 int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
96 void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
97 void (*power_control)(struct ufs_qcom_phy *phy, bool val);
98};
99
100struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
101int ufs_qcom_phy_power_on(struct phy *generic_phy);
102int ufs_qcom_phy_power_off(struct phy *generic_phy);
103int ufs_qcom_phy_exit(struct phy *generic_phy);
104int ufs_qcom_phy_init_clks(struct phy *generic_phy,
105 struct ufs_qcom_phy *phy_common);
106int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
107 struct ufs_qcom_phy *phy_common);
108int ufs_qcom_phy_remove(struct phy *generic_phy,
109 struct ufs_qcom_phy *ufs_qcom_phy);
110struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
111 struct ufs_qcom_phy *common_cfg,
112 struct phy_ops *ufs_qcom_phy_gen_ops,
113 struct ufs_qcom_phy_specific_ops *phy_spec_ops);
114int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
115 struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
116 struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
117 bool is_rate_B);
118#endif
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
new file mode 100644
index 000000000000..44ee983d57fe
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -0,0 +1,745 @@
1/*
2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include "phy-qcom-ufs-i.h"
16
17#define MAX_PROP_NAME 32
18#define VDDA_PHY_MIN_UV 1000000
19#define VDDA_PHY_MAX_UV 1000000
20#define VDDA_PLL_MIN_UV 1800000
21#define VDDA_PLL_MAX_UV 1800000
22#define VDDP_REF_CLK_MIN_UV 1200000
23#define VDDP_REF_CLK_MAX_UV 1200000
24
25static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
26 const char *, bool);
27static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
28 const char *);
29static int ufs_qcom_phy_base_init(struct platform_device *pdev,
30 struct ufs_qcom_phy *phy_common);
31
32int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
33 struct ufs_qcom_phy_calibration *tbl_A,
34 int tbl_size_A,
35 struct ufs_qcom_phy_calibration *tbl_B,
36 int tbl_size_B, bool is_rate_B)
37{
38 int i;
39 int ret = 0;
40
41 if (!tbl_A) {
42 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
43 ret = EINVAL;
44 goto out;
45 }
46
47 for (i = 0; i < tbl_size_A; i++)
48 writel_relaxed(tbl_A[i].cfg_value,
49 ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
50
51 /*
52 * In case we would like to work in rate B, we need
53 * to override a registers that were configured in rate A table
54 * with registers of rate B table.
55 * table.
56 */
57 if (is_rate_B) {
58 if (!tbl_B) {
59 dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
60 __func__);
61 ret = EINVAL;
62 goto out;
63 }
64
65 for (i = 0; i < tbl_size_B; i++)
66 writel_relaxed(tbl_B[i].cfg_value,
67 ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
68 }
69
70 /* flush buffered writes */
71 mb();
72
73out:
74 return ret;
75}
76
77struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
78 struct ufs_qcom_phy *common_cfg,
79 struct phy_ops *ufs_qcom_phy_gen_ops,
80 struct ufs_qcom_phy_specific_ops *phy_spec_ops)
81{
82 int err;
83 struct device *dev = &pdev->dev;
84 struct phy *generic_phy = NULL;
85 struct phy_provider *phy_provider;
86
87 err = ufs_qcom_phy_base_init(pdev, common_cfg);
88 if (err) {
89 dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
90 goto out;
91 }
92
93 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
94 if (IS_ERR(phy_provider)) {
95 err = PTR_ERR(phy_provider);
96 dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
97 goto out;
98 }
99
100 generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
101 if (IS_ERR(generic_phy)) {
102 err = PTR_ERR(generic_phy);
103 dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
104 goto out;
105 }
106
107 common_cfg->phy_spec_ops = phy_spec_ops;
108 common_cfg->dev = dev;
109
110out:
111 return generic_phy;
112}
113
114/*
115 * This assumes the embedded phy structure inside generic_phy is of type
116 * struct ufs_qcom_phy. In order to function properly it's crucial
117 * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
118 * as the first inside generic_phy.
119 */
120struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
121{
122 return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
123}
124
125static
126int ufs_qcom_phy_base_init(struct platform_device *pdev,
127 struct ufs_qcom_phy *phy_common)
128{
129 struct device *dev = &pdev->dev;
130 struct resource *res;
131 int err = 0;
132
133 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
134 if (!res) {
135 dev_err(dev, "%s: phy_mem resource not found\n", __func__);
136 err = -ENOMEM;
137 goto out;
138 }
139
140 phy_common->mmio = devm_ioremap_resource(dev, res);
141 if (IS_ERR((void const *)phy_common->mmio)) {
142 err = PTR_ERR((void const *)phy_common->mmio);
143 phy_common->mmio = NULL;
144 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
145 __func__, err);
146 goto out;
147 }
148
149 /* "dev_ref_clk_ctrl_mem" is optional resource */
150 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
151 "dev_ref_clk_ctrl_mem");
152 if (!res) {
153 dev_dbg(dev, "%s: dev_ref_clk_ctrl_mem resource not found\n",
154 __func__);
155 goto out;
156 }
157
158 phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
159 if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) {
160 err = PTR_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio);
161 phy_common->dev_ref_clk_ctrl_mmio = NULL;
162 dev_err(dev, "%s: ioremap for dev_ref_clk_ctrl_mem resource failed %d\n",
163 __func__, err);
164 }
165
166out:
167 return err;
168}
169
170static int __ufs_qcom_phy_clk_get(struct phy *phy,
171 const char *name, struct clk **clk_out, bool err_print)
172{
173 struct clk *clk;
174 int err = 0;
175 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
176 struct device *dev = ufs_qcom_phy->dev;
177
178 clk = devm_clk_get(dev, name);
179 if (IS_ERR(clk)) {
180 err = PTR_ERR(clk);
181 if (err_print)
182 dev_err(dev, "failed to get %s err %d", name, err);
183 } else {
184 *clk_out = clk;
185 }
186
187 return err;
188}
189
190static
191int ufs_qcom_phy_clk_get(struct phy *phy,
192 const char *name, struct clk **clk_out)
193{
194 return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
195}
196
197int
198ufs_qcom_phy_init_clks(struct phy *generic_phy,
199 struct ufs_qcom_phy *phy_common)
200{
201 int err;
202
203 err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
204 &phy_common->tx_iface_clk);
205 if (err)
206 goto out;
207
208 err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
209 &phy_common->rx_iface_clk);
210 if (err)
211 goto out;
212
213 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
214 &phy_common->ref_clk_src);
215 if (err)
216 goto out;
217
218 /*
219 * "ref_clk_parent" is optional hence don't abort init if it's not
220 * found.
221 */
222 __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
223 &phy_common->ref_clk_parent, false);
224
225 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
226 &phy_common->ref_clk);
227
228out:
229 return err;
230}
231
232int
233ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
234 struct ufs_qcom_phy *phy_common)
235{
236 int err;
237
238 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
239 "vdda-pll");
240 if (err)
241 goto out;
242
243 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
244 "vdda-phy");
245
246 if (err)
247 goto out;
248
249 /* vddp-ref-clk-* properties are optional */
250 __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
251 "vddp-ref-clk", true);
252out:
253 return err;
254}
255
256static int __ufs_qcom_phy_init_vreg(struct phy *phy,
257 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
258{
259 int err = 0;
260 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
261 struct device *dev = ufs_qcom_phy->dev;
262
263 char prop_name[MAX_PROP_NAME];
264
265 vreg->name = kstrdup(name, GFP_KERNEL);
266 if (!vreg->name) {
267 err = -ENOMEM;
268 goto out;
269 }
270
271 vreg->reg = devm_regulator_get(dev, name);
272 if (IS_ERR(vreg->reg)) {
273 err = PTR_ERR(vreg->reg);
274 vreg->reg = NULL;
275 if (!optional)
276 dev_err(dev, "failed to get %s, %d\n", name, err);
277 goto out;
278 }
279
280 if (dev->of_node) {
281 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
282 err = of_property_read_u32(dev->of_node,
283 prop_name, &vreg->max_uA);
284 if (err && err != -EINVAL) {
285 dev_err(dev, "%s: failed to read %s\n",
286 __func__, prop_name);
287 goto out;
288 } else if (err == -EINVAL || !vreg->max_uA) {
289 if (regulator_count_voltages(vreg->reg) > 0) {
290 dev_err(dev, "%s: %s is mandatory\n",
291 __func__, prop_name);
292 goto out;
293 }
294 err = 0;
295 }
296 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
297 if (of_get_property(dev->of_node, prop_name, NULL))
298 vreg->is_always_on = true;
299 else
300 vreg->is_always_on = false;
301 }
302
303 if (!strcmp(name, "vdda-pll")) {
304 vreg->max_uV = VDDA_PLL_MAX_UV;
305 vreg->min_uV = VDDA_PLL_MIN_UV;
306 } else if (!strcmp(name, "vdda-phy")) {
307 vreg->max_uV = VDDA_PHY_MAX_UV;
308 vreg->min_uV = VDDA_PHY_MIN_UV;
309 } else if (!strcmp(name, "vddp-ref-clk")) {
310 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
311 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
312 }
313
314out:
315 if (err)
316 kfree(vreg->name);
317 return err;
318}
319
320static int ufs_qcom_phy_init_vreg(struct phy *phy,
321 struct ufs_qcom_phy_vreg *vreg, const char *name)
322{
323 return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
324}
325
326static
327int ufs_qcom_phy_cfg_vreg(struct phy *phy,
328 struct ufs_qcom_phy_vreg *vreg, bool on)
329{
330 int ret = 0;
331 struct regulator *reg = vreg->reg;
332 const char *name = vreg->name;
333 int min_uV;
334 int uA_load;
335 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
336 struct device *dev = ufs_qcom_phy->dev;
337
338 BUG_ON(!vreg);
339
340 if (regulator_count_voltages(reg) > 0) {
341 min_uV = on ? vreg->min_uV : 0;
342 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
343 if (ret) {
344 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
345 __func__, name, ret);
346 goto out;
347 }
348 uA_load = on ? vreg->max_uA : 0;
349 ret = regulator_set_optimum_mode(reg, uA_load);
350 if (ret >= 0) {
351 /*
352 * regulator_set_optimum_mode() returns new regulator
353 * mode upon success.
354 */
355 ret = 0;
356 } else {
357 dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
358 __func__, name, uA_load, ret);
359 goto out;
360 }
361 }
362out:
363 return ret;
364}
365
366static
367int ufs_qcom_phy_enable_vreg(struct phy *phy,
368 struct ufs_qcom_phy_vreg *vreg)
369{
370 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
371 struct device *dev = ufs_qcom_phy->dev;
372 int ret = 0;
373
374 if (!vreg || vreg->enabled)
375 goto out;
376
377 ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
378 if (ret) {
379 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
380 __func__, ret);
381 goto out;
382 }
383
384 ret = regulator_enable(vreg->reg);
385 if (ret) {
386 dev_err(dev, "%s: enable failed, err=%d\n",
387 __func__, ret);
388 goto out;
389 }
390
391 vreg->enabled = true;
392out:
393 return ret;
394}
395
396int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
397{
398 int ret = 0;
399 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
400
401 if (phy->is_ref_clk_enabled)
402 goto out;
403
404 /*
405 * reference clock is propagated in a daisy-chained manner from
406 * source to phy, so ungate them at each stage.
407 */
408 ret = clk_prepare_enable(phy->ref_clk_src);
409 if (ret) {
410 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
411 __func__, ret);
412 goto out;
413 }
414
415 /*
416 * "ref_clk_parent" is optional clock hence make sure that clk reference
417 * is available before trying to enable the clock.
418 */
419 if (phy->ref_clk_parent) {
420 ret = clk_prepare_enable(phy->ref_clk_parent);
421 if (ret) {
422 dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
423 __func__, ret);
424 goto out_disable_src;
425 }
426 }
427
428 ret = clk_prepare_enable(phy->ref_clk);
429 if (ret) {
430 dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
431 __func__, ret);
432 goto out_disable_parent;
433 }
434
435 phy->is_ref_clk_enabled = true;
436 goto out;
437
438out_disable_parent:
439 if (phy->ref_clk_parent)
440 clk_disable_unprepare(phy->ref_clk_parent);
441out_disable_src:
442 clk_disable_unprepare(phy->ref_clk_src);
443out:
444 return ret;
445}
446
447static
448int ufs_qcom_phy_disable_vreg(struct phy *phy,
449 struct ufs_qcom_phy_vreg *vreg)
450{
451 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
452 struct device *dev = ufs_qcom_phy->dev;
453 int ret = 0;
454
455 if (!vreg || !vreg->enabled || vreg->is_always_on)
456 goto out;
457
458 ret = regulator_disable(vreg->reg);
459
460 if (!ret) {
461 /* ignore errors on applying disable config */
462 ufs_qcom_phy_cfg_vreg(phy, vreg, false);
463 vreg->enabled = false;
464 } else {
465 dev_err(dev, "%s: %s disable failed, err=%d\n",
466 __func__, vreg->name, ret);
467 }
468out:
469 return ret;
470}
471
472void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
473{
474 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
475
476 if (phy->is_ref_clk_enabled) {
477 clk_disable_unprepare(phy->ref_clk);
478 /*
479 * "ref_clk_parent" is optional clock hence make sure that clk
480 * reference is available before trying to disable the clock.
481 */
482 if (phy->ref_clk_parent)
483 clk_disable_unprepare(phy->ref_clk_parent);
484 clk_disable_unprepare(phy->ref_clk_src);
485 phy->is_ref_clk_enabled = false;
486 }
487}
488
489#define UFS_REF_CLK_EN (1 << 5)
490
491static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
492{
493 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
494
495 if (phy->dev_ref_clk_ctrl_mmio &&
496 (enable ^ phy->is_dev_ref_clk_enabled)) {
497 u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
498
499 if (enable)
500 temp |= UFS_REF_CLK_EN;
501 else
502 temp &= ~UFS_REF_CLK_EN;
503
504 /*
505 * If we are here to disable this clock immediately after
506 * entering into hibern8, we need to make sure that device
507 * ref_clk is active atleast 1us after the hibern8 enter.
508 */
509 if (!enable)
510 udelay(1);
511
512 writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
513 /* ensure that ref_clk is enabled/disabled before we return */
514 wmb();
515 /*
516 * If we call hibern8 exit after this, we need to make sure that
517 * device ref_clk is stable for atleast 1us before the hibern8
518 * exit command.
519 */
520 if (enable)
521 udelay(1);
522
523 phy->is_dev_ref_clk_enabled = enable;
524 }
525}
526
527void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
528{
529 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
530}
531
532void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
533{
534 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
535}
536
537/* Turn ON M-PHY RMMI interface clocks */
538int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
539{
540 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
541 int ret = 0;
542
543 if (phy->is_iface_clk_enabled)
544 goto out;
545
546 ret = clk_prepare_enable(phy->tx_iface_clk);
547 if (ret) {
548 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
549 __func__, ret);
550 goto out;
551 }
552 ret = clk_prepare_enable(phy->rx_iface_clk);
553 if (ret) {
554 clk_disable_unprepare(phy->tx_iface_clk);
555 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
556 __func__, ret);
557 goto out;
558 }
559 phy->is_iface_clk_enabled = true;
560
561out:
562 return ret;
563}
564
565/* Turn OFF M-PHY RMMI interface clocks */
566void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
567{
568 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
569
570 if (phy->is_iface_clk_enabled) {
571 clk_disable_unprepare(phy->tx_iface_clk);
572 clk_disable_unprepare(phy->rx_iface_clk);
573 phy->is_iface_clk_enabled = false;
574 }
575}
576
577int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
578{
579 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
580 int ret = 0;
581
582 if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
583 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
584 __func__);
585 ret = -ENOTSUPP;
586 } else {
587 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
588 }
589
590 return ret;
591}
592
593int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
594{
595 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
596 int ret = 0;
597
598 if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
599 dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
600 __func__);
601 ret = -ENOTSUPP;
602 } else {
603 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
604 tx_lanes);
605 }
606
607 return ret;
608}
609
610void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
611 u8 major, u16 minor, u16 step)
612{
613 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
614
615 ufs_qcom_phy->host_ctrl_rev_major = major;
616 ufs_qcom_phy->host_ctrl_rev_minor = minor;
617 ufs_qcom_phy->host_ctrl_rev_step = step;
618}
619
620int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
621{
622 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
623 int ret = 0;
624
625 if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
626 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
627 __func__);
628 ret = -ENOTSUPP;
629 } else {
630 ret = ufs_qcom_phy->phy_spec_ops->
631 calibrate_phy(ufs_qcom_phy, is_rate_B);
632 if (ret)
633 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
634 __func__, ret);
635 }
636
637 return ret;
638}
639
640int ufs_qcom_phy_remove(struct phy *generic_phy,
641 struct ufs_qcom_phy *ufs_qcom_phy)
642{
643 phy_power_off(generic_phy);
644
645 kfree(ufs_qcom_phy->vdda_pll.name);
646 kfree(ufs_qcom_phy->vdda_phy.name);
647
648 return 0;
649}
650
651int ufs_qcom_phy_exit(struct phy *generic_phy)
652{
653 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
654
655 if (ufs_qcom_phy->is_powered_on)
656 phy_power_off(generic_phy);
657
658 return 0;
659}
660
661int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
662{
663 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
664
665 if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
666 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
667 __func__);
668 return -ENOTSUPP;
669 }
670
671 return ufs_qcom_phy->phy_spec_ops->
672 is_physical_coding_sublayer_ready(ufs_qcom_phy);
673}
674
675int ufs_qcom_phy_power_on(struct phy *generic_phy)
676{
677 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
678 struct device *dev = phy_common->dev;
679 int err;
680
681 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
682 if (err) {
683 dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
684 __func__, err);
685 goto out;
686 }
687
688 phy_common->phy_spec_ops->power_control(phy_common, true);
689
690 /* vdda_pll also enables ref clock LDOs so enable it first */
691 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
692 if (err) {
693 dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
694 __func__, err);
695 goto out_disable_phy;
696 }
697
698 err = ufs_qcom_phy_enable_ref_clk(generic_phy);
699 if (err) {
700 dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
701 __func__, err);
702 goto out_disable_pll;
703 }
704
705 /* enable device PHY ref_clk pad rail */
706 if (phy_common->vddp_ref_clk.reg) {
707 err = ufs_qcom_phy_enable_vreg(generic_phy,
708 &phy_common->vddp_ref_clk);
709 if (err) {
710 dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
711 __func__, err);
712 goto out_disable_ref_clk;
713 }
714 }
715
716 phy_common->is_powered_on = true;
717 goto out;
718
719out_disable_ref_clk:
720 ufs_qcom_phy_disable_ref_clk(generic_phy);
721out_disable_pll:
722 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
723out_disable_phy:
724 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
725out:
726 return err;
727}
728
729int ufs_qcom_phy_power_off(struct phy *generic_phy)
730{
731 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
732
733 phy_common->phy_spec_ops->power_control(phy_common, false);
734
735 if (phy_common->vddp_ref_clk.reg)
736 ufs_qcom_phy_disable_vreg(generic_phy,
737 &phy_common->vddp_ref_clk);
738 ufs_qcom_phy_disable_ref_clk(generic_phy);
739
740 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
741 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
742 phy_common->is_powered_on = false;
743
744 return 0;
745}