diff options
-rw-r--r-- | drivers/scsi/ufs/Kconfig | 13 | ||||
-rw-r--r-- | drivers/scsi/ufs/Makefile | 1 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.c | 1004 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.h | 170 |
4 files changed, 1188 insertions, 0 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 6e07b2afddeb..8a1f4b355416 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig | |||
@@ -70,3 +70,16 @@ config SCSI_UFSHCD_PLATFORM | |||
70 | If you have a controller with this interface, say Y or M here. | 70 | If you have a controller with this interface, say Y or M here. |
71 | 71 | ||
72 | If unsure, say N. | 72 | If unsure, say N. |
73 | |||
74 | config SCSI_UFS_QCOM | ||
75 | bool "QCOM specific hooks to UFS controller platform driver" | ||
76 | depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM | ||
77 | select PHY_QCOM_UFS | ||
78 | help | ||
79 | This selects the QCOM specific additions to UFSHCD platform driver. | ||
80 | UFS host on QCOM needs some vendor specific configuration before | ||
81 | accessing the hardware which includes PHY configuration and vendor | ||
82 | specific registers. | ||
83 | |||
84 | Select this if you have UFS controller on QCOM chipset. | ||
85 | If unsure, say N. | ||
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 1e5bd48457d6..8303bcce7a23 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | # UFSHCD makefile | 1 | # UFSHCD makefile |
2 | obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o | ||
2 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o | 3 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o |
3 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o | 4 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o |
4 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o | 5 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o |
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c new file mode 100644 index 000000000000..9217af9bf734 --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom.c | |||
@@ -0,0 +1,1004 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/time.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/phy/phy.h> | ||
19 | |||
20 | #include <linux/phy/phy-qcom-ufs.h> | ||
21 | #include "ufshcd.h" | ||
22 | #include "unipro.h" | ||
23 | #include "ufs-qcom.h" | ||
24 | #include "ufshci.h" | ||
25 | |||
26 | static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; | ||
27 | |||
28 | static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result); | ||
29 | static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, | ||
30 | const char *speed_mode); | ||
31 | static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); | ||
32 | |||
33 | static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) | ||
34 | { | ||
35 | int err = 0; | ||
36 | |||
37 | err = ufshcd_dme_get(hba, | ||
38 | UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); | ||
39 | if (err) | ||
40 | dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", | ||
41 | __func__, err); | ||
42 | |||
43 | return err; | ||
44 | } | ||
45 | |||
46 | static int ufs_qcom_host_clk_get(struct device *dev, | ||
47 | const char *name, struct clk **clk_out) | ||
48 | { | ||
49 | struct clk *clk; | ||
50 | int err = 0; | ||
51 | |||
52 | clk = devm_clk_get(dev, name); | ||
53 | if (IS_ERR(clk)) { | ||
54 | err = PTR_ERR(clk); | ||
55 | dev_err(dev, "%s: failed to get %s err %d", | ||
56 | __func__, name, err); | ||
57 | } else { | ||
58 | *clk_out = clk; | ||
59 | } | ||
60 | |||
61 | return err; | ||
62 | } | ||
63 | |||
64 | static int ufs_qcom_host_clk_enable(struct device *dev, | ||
65 | const char *name, struct clk *clk) | ||
66 | { | ||
67 | int err = 0; | ||
68 | |||
69 | err = clk_prepare_enable(clk); | ||
70 | if (err) | ||
71 | dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) | ||
77 | { | ||
78 | if (!host->is_lane_clks_enabled) | ||
79 | return; | ||
80 | |||
81 | clk_disable_unprepare(host->tx_l1_sync_clk); | ||
82 | clk_disable_unprepare(host->tx_l0_sync_clk); | ||
83 | clk_disable_unprepare(host->rx_l1_sync_clk); | ||
84 | clk_disable_unprepare(host->rx_l0_sync_clk); | ||
85 | |||
86 | host->is_lane_clks_enabled = false; | ||
87 | } | ||
88 | |||
89 | static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) | ||
90 | { | ||
91 | int err = 0; | ||
92 | struct device *dev = host->hba->dev; | ||
93 | |||
94 | if (host->is_lane_clks_enabled) | ||
95 | return 0; | ||
96 | |||
97 | err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", | ||
98 | host->rx_l0_sync_clk); | ||
99 | if (err) | ||
100 | goto out; | ||
101 | |||
102 | err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", | ||
103 | host->tx_l0_sync_clk); | ||
104 | if (err) | ||
105 | goto disable_rx_l0; | ||
106 | |||
107 | err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", | ||
108 | host->rx_l1_sync_clk); | ||
109 | if (err) | ||
110 | goto disable_tx_l0; | ||
111 | |||
112 | err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", | ||
113 | host->tx_l1_sync_clk); | ||
114 | if (err) | ||
115 | goto disable_rx_l1; | ||
116 | |||
117 | host->is_lane_clks_enabled = true; | ||
118 | goto out; | ||
119 | |||
120 | disable_rx_l1: | ||
121 | clk_disable_unprepare(host->rx_l1_sync_clk); | ||
122 | disable_tx_l0: | ||
123 | clk_disable_unprepare(host->tx_l0_sync_clk); | ||
124 | disable_rx_l0: | ||
125 | clk_disable_unprepare(host->rx_l0_sync_clk); | ||
126 | out: | ||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) | ||
131 | { | ||
132 | int err = 0; | ||
133 | struct device *dev = host->hba->dev; | ||
134 | |||
135 | err = ufs_qcom_host_clk_get(dev, | ||
136 | "rx_lane0_sync_clk", &host->rx_l0_sync_clk); | ||
137 | if (err) | ||
138 | goto out; | ||
139 | |||
140 | err = ufs_qcom_host_clk_get(dev, | ||
141 | "tx_lane0_sync_clk", &host->tx_l0_sync_clk); | ||
142 | if (err) | ||
143 | goto out; | ||
144 | |||
145 | err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", | ||
146 | &host->rx_l1_sync_clk); | ||
147 | if (err) | ||
148 | goto out; | ||
149 | |||
150 | err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", | ||
151 | &host->tx_l1_sync_clk); | ||
152 | out: | ||
153 | return err; | ||
154 | } | ||
155 | |||
156 | static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) | ||
157 | { | ||
158 | struct ufs_qcom_host *host = hba->priv; | ||
159 | struct phy *phy = host->generic_phy; | ||
160 | u32 tx_lanes; | ||
161 | int err = 0; | ||
162 | |||
163 | err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); | ||
164 | if (err) | ||
165 | goto out; | ||
166 | |||
167 | err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); | ||
168 | if (err) | ||
169 | dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", | ||
170 | __func__); | ||
171 | |||
172 | out: | ||
173 | return err; | ||
174 | } | ||
175 | |||
176 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) | ||
177 | { | ||
178 | int err; | ||
179 | u32 tx_fsm_val = 0; | ||
180 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); | ||
181 | |||
182 | do { | ||
183 | err = ufshcd_dme_get(hba, | ||
184 | UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); | ||
185 | if (err || tx_fsm_val == TX_FSM_HIBERN8) | ||
186 | break; | ||
187 | |||
188 | /* sleep for max. 200us */ | ||
189 | usleep_range(100, 200); | ||
190 | } while (time_before(jiffies, timeout)); | ||
191 | |||
192 | /* | ||
193 | * we might have scheduled out for long during polling so | ||
194 | * check the state again. | ||
195 | */ | ||
196 | if (time_after(jiffies, timeout)) | ||
197 | err = ufshcd_dme_get(hba, | ||
198 | UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); | ||
199 | |||
200 | if (err) { | ||
201 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", | ||
202 | __func__, err); | ||
203 | } else if (tx_fsm_val != TX_FSM_HIBERN8) { | ||
204 | err = tx_fsm_val; | ||
205 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", | ||
206 | __func__, err); | ||
207 | } | ||
208 | |||
209 | return err; | ||
210 | } | ||
211 | |||
212 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) | ||
213 | { | ||
214 | struct ufs_qcom_host *host = hba->priv; | ||
215 | struct phy *phy = host->generic_phy; | ||
216 | int ret = 0; | ||
217 | u8 major; | ||
218 | u16 minor, step; | ||
219 | bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) | ||
220 | ? true : false; | ||
221 | |||
222 | /* Assert PHY reset and apply PHY calibration values */ | ||
223 | ufs_qcom_assert_reset(hba); | ||
224 | /* provide 1ms delay to let the reset pulse propagate */ | ||
225 | usleep_range(1000, 1100); | ||
226 | |||
227 | ufs_qcom_get_controller_revision(hba, &major, &minor, &step); | ||
228 | ufs_qcom_phy_save_controller_version(phy, major, minor, step); | ||
229 | ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); | ||
230 | if (ret) { | ||
231 | dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", | ||
232 | __func__, ret); | ||
233 | goto out; | ||
234 | } | ||
235 | |||
236 | /* De-assert PHY reset and start serdes */ | ||
237 | ufs_qcom_deassert_reset(hba); | ||
238 | |||
239 | /* | ||
240 | * after reset deassertion, phy will need all ref clocks, | ||
241 | * voltage, current to settle down before starting serdes. | ||
242 | */ | ||
243 | usleep_range(1000, 1100); | ||
244 | ret = ufs_qcom_phy_start_serdes(phy); | ||
245 | if (ret) { | ||
246 | dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n", | ||
247 | __func__, ret); | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | ret = ufs_qcom_phy_is_pcs_ready(phy); | ||
252 | if (ret) | ||
253 | dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", | ||
254 | __func__, ret); | ||
255 | |||
256 | out: | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * The UTP controller has a number of internal clock gating cells (CGCs). | ||
262 | * Internal hardware sub-modules within the UTP controller control the CGCs. | ||
263 | * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved | ||
264 | * in a specific operation, UTP controller CGCs are by default disabled and | ||
265 | * this function enables them (after every UFS link startup) to save some power | ||
266 | * leakage. | ||
267 | */ | ||
268 | static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) | ||
269 | { | ||
270 | ufshcd_writel(hba, | ||
271 | ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, | ||
272 | REG_UFS_CFG2); | ||
273 | |||
274 | /* Ensure that HW clock gating is enabled before next operations */ | ||
275 | mb(); | ||
276 | } | ||
277 | |||
278 | static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) | ||
279 | { | ||
280 | struct ufs_qcom_host *host = hba->priv; | ||
281 | int err = 0; | ||
282 | |||
283 | switch (status) { | ||
284 | case PRE_CHANGE: | ||
285 | ufs_qcom_power_up_sequence(hba); | ||
286 | /* | ||
287 | * The PHY PLL output is the source of tx/rx lane symbol | ||
288 | * clocks, hence, enable the lane clocks only after PHY | ||
289 | * is initialized. | ||
290 | */ | ||
291 | err = ufs_qcom_enable_lane_clks(host); | ||
292 | break; | ||
293 | case POST_CHANGE: | ||
294 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ | ||
295 | err = ufs_qcom_check_hibern8(hba); | ||
296 | ufs_qcom_enable_hw_clk_gating(hba); | ||
297 | |||
298 | break; | ||
299 | default: | ||
300 | dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); | ||
301 | err = -EINVAL; | ||
302 | break; | ||
303 | } | ||
304 | return err; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * Returns non-zero for success (which rate of core_clk) and 0 | ||
309 | * in case of a failure | ||
310 | */ | ||
311 | static unsigned long | ||
312 | ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) | ||
313 | { | ||
314 | struct ufs_clk_info *clki; | ||
315 | u32 core_clk_period_in_ns; | ||
316 | u32 tx_clk_cycles_per_us = 0; | ||
317 | unsigned long core_clk_rate = 0; | ||
318 | u32 core_clk_cycles_per_us = 0; | ||
319 | |||
320 | static u32 pwm_fr_table[][2] = { | ||
321 | {UFS_PWM_G1, 0x1}, | ||
322 | {UFS_PWM_G2, 0x1}, | ||
323 | {UFS_PWM_G3, 0x1}, | ||
324 | {UFS_PWM_G4, 0x1}, | ||
325 | }; | ||
326 | |||
327 | static u32 hs_fr_table_rA[][2] = { | ||
328 | {UFS_HS_G1, 0x1F}, | ||
329 | {UFS_HS_G2, 0x3e}, | ||
330 | }; | ||
331 | |||
332 | static u32 hs_fr_table_rB[][2] = { | ||
333 | {UFS_HS_G1, 0x24}, | ||
334 | {UFS_HS_G2, 0x49}, | ||
335 | }; | ||
336 | |||
337 | if (gear == 0) { | ||
338 | dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); | ||
339 | goto out_error; | ||
340 | } | ||
341 | |||
342 | list_for_each_entry(clki, &hba->clk_list_head, list) { | ||
343 | if (!strcmp(clki->name, "core_clk")) | ||
344 | core_clk_rate = clk_get_rate(clki->clk); | ||
345 | } | ||
346 | |||
347 | /* If frequency is smaller than 1MHz, set to 1MHz */ | ||
348 | if (core_clk_rate < DEFAULT_CLK_RATE_HZ) | ||
349 | core_clk_rate = DEFAULT_CLK_RATE_HZ; | ||
350 | |||
351 | core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; | ||
352 | ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); | ||
353 | |||
354 | core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; | ||
355 | core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; | ||
356 | core_clk_period_in_ns &= MASK_CLK_NS_REG; | ||
357 | |||
358 | switch (hs) { | ||
359 | case FASTAUTO_MODE: | ||
360 | case FAST_MODE: | ||
361 | if (rate == PA_HS_MODE_A) { | ||
362 | if (gear > ARRAY_SIZE(hs_fr_table_rA)) { | ||
363 | dev_err(hba->dev, | ||
364 | "%s: index %d exceeds table size %zu\n", | ||
365 | __func__, gear, | ||
366 | ARRAY_SIZE(hs_fr_table_rA)); | ||
367 | goto out_error; | ||
368 | } | ||
369 | tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; | ||
370 | } else if (rate == PA_HS_MODE_B) { | ||
371 | if (gear > ARRAY_SIZE(hs_fr_table_rB)) { | ||
372 | dev_err(hba->dev, | ||
373 | "%s: index %d exceeds table size %zu\n", | ||
374 | __func__, gear, | ||
375 | ARRAY_SIZE(hs_fr_table_rB)); | ||
376 | goto out_error; | ||
377 | } | ||
378 | tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; | ||
379 | } else { | ||
380 | dev_err(hba->dev, "%s: invalid rate = %d\n", | ||
381 | __func__, rate); | ||
382 | goto out_error; | ||
383 | } | ||
384 | break; | ||
385 | case SLOWAUTO_MODE: | ||
386 | case SLOW_MODE: | ||
387 | if (gear > ARRAY_SIZE(pwm_fr_table)) { | ||
388 | dev_err(hba->dev, | ||
389 | "%s: index %d exceeds table size %zu\n", | ||
390 | __func__, gear, | ||
391 | ARRAY_SIZE(pwm_fr_table)); | ||
392 | goto out_error; | ||
393 | } | ||
394 | tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; | ||
395 | break; | ||
396 | case UNCHANGED: | ||
397 | default: | ||
398 | dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); | ||
399 | goto out_error; | ||
400 | } | ||
401 | |||
402 | /* this register 2 fields shall be written at once */ | ||
403 | ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, | ||
404 | REG_UFS_TX_SYMBOL_CLK_NS_US); | ||
405 | goto out; | ||
406 | |||
407 | out_error: | ||
408 | core_clk_rate = 0; | ||
409 | out: | ||
410 | return core_clk_rate; | ||
411 | } | ||
412 | |||
413 | static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status) | ||
414 | { | ||
415 | unsigned long core_clk_rate = 0; | ||
416 | u32 core_clk_cycles_per_100ms; | ||
417 | |||
418 | switch (status) { | ||
419 | case PRE_CHANGE: | ||
420 | core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1, | ||
421 | SLOWAUTO_MODE, 0); | ||
422 | if (!core_clk_rate) { | ||
423 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", | ||
424 | __func__); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | core_clk_cycles_per_100ms = | ||
428 | (core_clk_rate / MSEC_PER_SEC) * 100; | ||
429 | ufshcd_writel(hba, core_clk_cycles_per_100ms, | ||
430 | REG_UFS_PA_LINK_STARTUP_TIMER); | ||
431 | break; | ||
432 | case POST_CHANGE: | ||
433 | ufs_qcom_link_startup_post_change(hba); | ||
434 | break; | ||
435 | default: | ||
436 | break; | ||
437 | } | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
443 | { | ||
444 | struct ufs_qcom_host *host = hba->priv; | ||
445 | struct phy *phy = host->generic_phy; | ||
446 | int ret = 0; | ||
447 | |||
448 | if (ufs_qcom_is_link_off(hba)) { | ||
449 | /* | ||
450 | * Disable the tx/rx lane symbol clocks before PHY is | ||
451 | * powered down as the PLL source should be disabled | ||
452 | * after downstream clocks are disabled. | ||
453 | */ | ||
454 | ufs_qcom_disable_lane_clks(host); | ||
455 | phy_power_off(phy); | ||
456 | |||
457 | /* Assert PHY soft reset */ | ||
458 | ufs_qcom_assert_reset(hba); | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * If UniPro link is not active, PHY ref_clk, main PHY analog power | ||
464 | * rail and low noise analog power rail for PLL can be switched off. | ||
465 | */ | ||
466 | if (!ufs_qcom_is_link_active(hba)) | ||
467 | phy_power_off(phy); | ||
468 | |||
469 | out: | ||
470 | return ret; | ||
471 | } | ||
472 | |||
473 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
474 | { | ||
475 | struct ufs_qcom_host *host = hba->priv; | ||
476 | struct phy *phy = host->generic_phy; | ||
477 | int err; | ||
478 | |||
479 | err = phy_power_on(phy); | ||
480 | if (err) { | ||
481 | dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", | ||
482 | __func__, err); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | hba->is_sys_suspended = false; | ||
487 | |||
488 | out: | ||
489 | return err; | ||
490 | } | ||
491 | |||
492 | struct ufs_qcom_dev_params { | ||
493 | u32 pwm_rx_gear; /* pwm rx gear to work in */ | ||
494 | u32 pwm_tx_gear; /* pwm tx gear to work in */ | ||
495 | u32 hs_rx_gear; /* hs rx gear to work in */ | ||
496 | u32 hs_tx_gear; /* hs tx gear to work in */ | ||
497 | u32 rx_lanes; /* number of rx lanes */ | ||
498 | u32 tx_lanes; /* number of tx lanes */ | ||
499 | u32 rx_pwr_pwm; /* rx pwm working pwr */ | ||
500 | u32 tx_pwr_pwm; /* tx pwm working pwr */ | ||
501 | u32 rx_pwr_hs; /* rx hs working pwr */ | ||
502 | u32 tx_pwr_hs; /* tx hs working pwr */ | ||
503 | u32 hs_rate; /* rate A/B to work in HS */ | ||
504 | u32 desired_working_mode; | ||
505 | }; | ||
506 | |||
507 | static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, | ||
508 | struct ufs_pa_layer_attr *dev_max, | ||
509 | struct ufs_pa_layer_attr *agreed_pwr) | ||
510 | { | ||
511 | int min_qcom_gear; | ||
512 | int min_dev_gear; | ||
513 | bool is_dev_sup_hs = false; | ||
514 | bool is_qcom_max_hs = false; | ||
515 | |||
516 | if (dev_max->pwr_rx == FAST_MODE) | ||
517 | is_dev_sup_hs = true; | ||
518 | |||
519 | if (qcom_param->desired_working_mode == FAST) { | ||
520 | is_qcom_max_hs = true; | ||
521 | min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear, | ||
522 | qcom_param->hs_tx_gear); | ||
523 | } else { | ||
524 | min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear, | ||
525 | qcom_param->pwm_tx_gear); | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * device doesn't support HS but qcom_param->desired_working_mode is | ||
530 | * HS, thus device and qcom_param don't agree | ||
531 | */ | ||
532 | if (!is_dev_sup_hs && is_qcom_max_hs) { | ||
533 | pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n", | ||
534 | __func__); | ||
535 | return -ENOTSUPP; | ||
536 | } else if (is_dev_sup_hs && is_qcom_max_hs) { | ||
537 | /* | ||
538 | * since device supports HS, it supports FAST_MODE. | ||
539 | * since qcom_param->desired_working_mode is also HS | ||
540 | * then final decision (FAST/FASTAUTO) is done according | ||
541 | * to qcom_params as it is the restricting factor | ||
542 | */ | ||
543 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
544 | qcom_param->rx_pwr_hs; | ||
545 | } else { | ||
546 | /* | ||
547 | * here qcom_param->desired_working_mode is PWM. | ||
548 | * it doesn't matter whether device supports HS or PWM, | ||
549 | * in both cases qcom_param->desired_working_mode will | ||
550 | * determine the mode | ||
551 | */ | ||
552 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
553 | qcom_param->rx_pwr_pwm; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * we would like tx to work in the minimum number of lanes | ||
558 | * between device capability and vendor preferences. | ||
559 | * the same decision will be made for rx | ||
560 | */ | ||
561 | agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, | ||
562 | qcom_param->tx_lanes); | ||
563 | agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, | ||
564 | qcom_param->rx_lanes); | ||
565 | |||
566 | /* device maximum gear is the minimum between device rx and tx gears */ | ||
567 | min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); | ||
568 | |||
569 | /* | ||
570 | * if both device capabilities and vendor pre-defined preferences are | ||
571 | * both HS or both PWM then set the minimum gear to be the chosen | ||
572 | * working gear. | ||
573 | * if one is PWM and one is HS then the one that is PWM get to decide | ||
574 | * what is the gear, as it is the one that also decided previously what | ||
575 | * pwr the device will be configured to. | ||
576 | */ | ||
577 | if ((is_dev_sup_hs && is_qcom_max_hs) || | ||
578 | (!is_dev_sup_hs && !is_qcom_max_hs)) | ||
579 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = | ||
580 | min_t(u32, min_dev_gear, min_qcom_gear); | ||
581 | else if (!is_dev_sup_hs) | ||
582 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear; | ||
583 | else | ||
584 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear; | ||
585 | |||
586 | agreed_pwr->hs_rate = qcom_param->hs_rate; | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) | ||
591 | { | ||
592 | int vote; | ||
593 | int err = 0; | ||
594 | char mode[BUS_VECTOR_NAME_LEN]; | ||
595 | |||
596 | ufs_qcom_get_speed_mode(&host->dev_req_params, mode); | ||
597 | |||
598 | vote = ufs_qcom_get_bus_vote(host, mode); | ||
599 | if (vote >= 0) | ||
600 | err = ufs_qcom_set_bus_vote(host, vote); | ||
601 | else | ||
602 | err = vote; | ||
603 | |||
604 | if (err) | ||
605 | dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); | ||
606 | else | ||
607 | host->bus_vote.saved_vote = vote; | ||
608 | return err; | ||
609 | } | ||
610 | |||
611 | static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, | ||
612 | bool status, | ||
613 | struct ufs_pa_layer_attr *dev_max_params, | ||
614 | struct ufs_pa_layer_attr *dev_req_params) | ||
615 | { | ||
616 | u32 val; | ||
617 | struct ufs_qcom_host *host = hba->priv; | ||
618 | struct phy *phy = host->generic_phy; | ||
619 | struct ufs_qcom_dev_params ufs_qcom_cap; | ||
620 | int ret = 0; | ||
621 | int res = 0; | ||
622 | |||
623 | if (!dev_req_params) { | ||
624 | pr_err("%s: incoming dev_req_params is NULL\n", __func__); | ||
625 | ret = -EINVAL; | ||
626 | goto out; | ||
627 | } | ||
628 | |||
629 | switch (status) { | ||
630 | case PRE_CHANGE: | ||
631 | ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX; | ||
632 | ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX; | ||
633 | ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX; | ||
634 | ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX; | ||
635 | ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX; | ||
636 | ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX; | ||
637 | ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM; | ||
638 | ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM; | ||
639 | ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS; | ||
640 | ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS; | ||
641 | ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; | ||
642 | ufs_qcom_cap.desired_working_mode = | ||
643 | UFS_QCOM_LIMIT_DESIRED_MODE; | ||
644 | |||
645 | ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, | ||
646 | dev_max_params, | ||
647 | dev_req_params); | ||
648 | if (ret) { | ||
649 | pr_err("%s: failed to determine capabilities\n", | ||
650 | __func__); | ||
651 | goto out; | ||
652 | } | ||
653 | |||
654 | break; | ||
655 | case POST_CHANGE: | ||
656 | if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, | ||
657 | dev_req_params->pwr_rx, | ||
658 | dev_req_params->hs_rate)) { | ||
659 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", | ||
660 | __func__); | ||
661 | /* | ||
662 | * we return error code at the end of the routine, | ||
663 | * but continue to configure UFS_PHY_TX_LANE_ENABLE | ||
664 | * and bus voting as usual | ||
665 | */ | ||
666 | ret = -EINVAL; | ||
667 | } | ||
668 | |||
669 | val = ~(MAX_U32 << dev_req_params->lane_tx); | ||
670 | res = ufs_qcom_phy_set_tx_lane_enable(phy, val); | ||
671 | if (res) { | ||
672 | dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n", | ||
673 | __func__, res); | ||
674 | ret = res; | ||
675 | } | ||
676 | |||
677 | /* cache the power mode parameters to use internally */ | ||
678 | memcpy(&host->dev_req_params, | ||
679 | dev_req_params, sizeof(*dev_req_params)); | ||
680 | ufs_qcom_update_bus_bw_vote(host); | ||
681 | break; | ||
682 | default: | ||
683 | ret = -EINVAL; | ||
684 | break; | ||
685 | } | ||
686 | out: | ||
687 | return ret; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks | ||
692 | * @hba: host controller instance | ||
693 | * | ||
694 | * QCOM UFS host controller might have some non standard behaviours (quirks) | ||
695 | * than what is specified by UFSHCI specification. Advertise all such | ||
696 | * quirks to standard UFS host controller driver so standard takes them into | ||
697 | * account. | ||
698 | */ | ||
699 | static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) | ||
700 | { | ||
701 | u8 major; | ||
702 | u16 minor, step; | ||
703 | |||
704 | ufs_qcom_get_controller_revision(hba, &major, &minor, &step); | ||
705 | |||
706 | /* | ||
707 | * TBD | ||
708 | * here we should be advertising controller quirks according to | ||
709 | * controller version. | ||
710 | */ | ||
711 | } | ||
712 | |||
713 | static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, | ||
714 | const char *speed_mode) | ||
715 | { | ||
716 | struct device *dev = host->hba->dev; | ||
717 | struct device_node *np = dev->of_node; | ||
718 | int err; | ||
719 | const char *key = "qcom,bus-vector-names"; | ||
720 | |||
721 | if (!speed_mode) { | ||
722 | err = -EINVAL; | ||
723 | goto out; | ||
724 | } | ||
725 | |||
726 | if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) | ||
727 | err = of_property_match_string(np, key, "MAX"); | ||
728 | else | ||
729 | err = of_property_match_string(np, key, speed_mode); | ||
730 | |||
731 | out: | ||
732 | if (err < 0) | ||
733 | dev_err(dev, "%s: Invalid %s mode %d\n", | ||
734 | __func__, speed_mode, err); | ||
735 | return err; | ||
736 | } | ||
737 | |||
738 | static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) | ||
739 | { | ||
740 | int err = 0; | ||
741 | |||
742 | if (vote != host->bus_vote.curr_vote) | ||
743 | host->bus_vote.curr_vote = vote; | ||
744 | |||
745 | return err; | ||
746 | } | ||
747 | |||
748 | static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) | ||
749 | { | ||
750 | int gear = max_t(u32, p->gear_rx, p->gear_tx); | ||
751 | int lanes = max_t(u32, p->lane_rx, p->lane_tx); | ||
752 | int pwr; | ||
753 | |||
754 | /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ | ||
755 | if (!gear) | ||
756 | gear = 1; | ||
757 | |||
758 | if (!lanes) | ||
759 | lanes = 1; | ||
760 | |||
761 | if (!p->pwr_rx && !p->pwr_tx) { | ||
762 | pwr = SLOWAUTO_MODE; | ||
763 | snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); | ||
764 | } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || | ||
765 | p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { | ||
766 | pwr = FAST_MODE; | ||
767 | snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", | ||
768 | p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); | ||
769 | } else { | ||
770 | pwr = SLOW_MODE; | ||
771 | snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", | ||
772 | "PWM", gear, lanes); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) | ||
777 | { | ||
778 | struct ufs_qcom_host *host = hba->priv; | ||
779 | int err = 0; | ||
780 | int vote = 0; | ||
781 | |||
782 | /* | ||
783 | * In case ufs_qcom_init() is not yet done, simply ignore. | ||
784 | * This ufs_qcom_setup_clocks() shall be called from | ||
785 | * ufs_qcom_init() after init is done. | ||
786 | */ | ||
787 | if (!host) | ||
788 | return 0; | ||
789 | |||
790 | if (on) { | ||
791 | err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); | ||
792 | if (err) | ||
793 | goto out; | ||
794 | |||
795 | err = ufs_qcom_phy_enable_ref_clk(host->generic_phy); | ||
796 | if (err) { | ||
797 | dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n", | ||
798 | __func__, err); | ||
799 | ufs_qcom_phy_disable_iface_clk(host->generic_phy); | ||
800 | goto out; | ||
801 | } | ||
802 | /* enable the device ref clock */ | ||
803 | ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy); | ||
804 | vote = host->bus_vote.saved_vote; | ||
805 | if (vote == host->bus_vote.min_bw_vote) | ||
806 | ufs_qcom_update_bus_bw_vote(host); | ||
807 | } else { | ||
808 | /* M-PHY RMMI interface clocks can be turned off */ | ||
809 | ufs_qcom_phy_disable_iface_clk(host->generic_phy); | ||
810 | if (!ufs_qcom_is_link_active(hba)) { | ||
811 | /* turn off UFS local PHY ref_clk */ | ||
812 | ufs_qcom_phy_disable_ref_clk(host->generic_phy); | ||
813 | /* disable device ref_clk */ | ||
814 | ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy); | ||
815 | } | ||
816 | vote = host->bus_vote.min_bw_vote; | ||
817 | } | ||
818 | |||
819 | err = ufs_qcom_set_bus_vote(host, vote); | ||
820 | if (err) | ||
821 | dev_err(hba->dev, "%s: set bus vote failed %d\n", | ||
822 | __func__, err); | ||
823 | |||
824 | out: | ||
825 | return err; | ||
826 | } | ||
827 | |||
828 | static ssize_t | ||
829 | show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, | ||
830 | char *buf) | ||
831 | { | ||
832 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
833 | struct ufs_qcom_host *host = hba->priv; | ||
834 | |||
835 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
836 | host->bus_vote.is_max_bw_needed); | ||
837 | } | ||
838 | |||
839 | static ssize_t | ||
840 | store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, | ||
841 | const char *buf, size_t count) | ||
842 | { | ||
843 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
844 | struct ufs_qcom_host *host = hba->priv; | ||
845 | uint32_t value; | ||
846 | |||
847 | if (!kstrtou32(buf, 0, &value)) { | ||
848 | host->bus_vote.is_max_bw_needed = !!value; | ||
849 | ufs_qcom_update_bus_bw_vote(host); | ||
850 | } | ||
851 | |||
852 | return count; | ||
853 | } | ||
854 | |||
855 | static int ufs_qcom_bus_register(struct ufs_qcom_host *host) | ||
856 | { | ||
857 | int err; | ||
858 | struct device *dev = host->hba->dev; | ||
859 | struct device_node *np = dev->of_node; | ||
860 | |||
861 | err = of_property_count_strings(np, "qcom,bus-vector-names"); | ||
862 | if (err < 0 ) { | ||
863 | dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", | ||
864 | __func__, err); | ||
865 | goto out; | ||
866 | } | ||
867 | |||
868 | /* cache the vote index for minimum and maximum bandwidth */ | ||
869 | host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); | ||
870 | host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); | ||
871 | |||
872 | host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; | ||
873 | host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; | ||
874 | sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); | ||
875 | host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; | ||
876 | host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; | ||
877 | err = device_create_file(dev, &host->bus_vote.max_bus_bw); | ||
878 | out: | ||
879 | return err; | ||
880 | } | ||
881 | |||
882 | #define ANDROID_BOOT_DEV_MAX 30 | ||
883 | static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; | ||
884 | static int get_android_boot_dev(char *str) | ||
885 | { | ||
886 | strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); | ||
887 | return 1; | ||
888 | } | ||
889 | __setup("androidboot.bootdevice=", get_android_boot_dev); | ||
890 | |||
891 | /** | ||
892 | * ufs_qcom_init - bind phy with controller | ||
893 | * @hba: host controller instance | ||
894 | * | ||
895 | * Binds PHY with controller and powers up PHY enabling clocks | ||
896 | * and regulators. | ||
897 | * | ||
898 | * Returns -EPROBE_DEFER if binding fails, returns negative error | ||
899 | * on phy power up failure and returns zero on success. | ||
900 | */ | ||
901 | static int ufs_qcom_init(struct ufs_hba *hba) | ||
902 | { | ||
903 | int err; | ||
904 | struct device *dev = hba->dev; | ||
905 | struct ufs_qcom_host *host; | ||
906 | |||
907 | if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) | ||
908 | return -ENODEV; | ||
909 | |||
910 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); | ||
911 | if (!host) { | ||
912 | err = -ENOMEM; | ||
913 | dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); | ||
914 | goto out; | ||
915 | } | ||
916 | |||
917 | host->hba = hba; | ||
918 | hba->priv = (void *)host; | ||
919 | |||
920 | host->generic_phy = devm_phy_get(dev, "ufsphy"); | ||
921 | |||
922 | if (IS_ERR(host->generic_phy)) { | ||
923 | err = PTR_ERR(host->generic_phy); | ||
924 | dev_err(dev, "%s: PHY get failed %d\n", __func__, err); | ||
925 | goto out; | ||
926 | } | ||
927 | |||
928 | err = ufs_qcom_bus_register(host); | ||
929 | if (err) | ||
930 | goto out_host_free; | ||
931 | |||
932 | phy_init(host->generic_phy); | ||
933 | err = phy_power_on(host->generic_phy); | ||
934 | if (err) | ||
935 | goto out_unregister_bus; | ||
936 | |||
937 | err = ufs_qcom_init_lane_clks(host); | ||
938 | if (err) | ||
939 | goto out_disable_phy; | ||
940 | |||
941 | ufs_qcom_advertise_quirks(hba); | ||
942 | |||
943 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING; | ||
944 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; | ||
945 | |||
946 | ufs_qcom_setup_clocks(hba, true); | ||
947 | |||
948 | if (hba->dev->id < MAX_UFS_QCOM_HOSTS) | ||
949 | ufs_qcom_hosts[hba->dev->id] = host; | ||
950 | |||
951 | goto out; | ||
952 | |||
953 | out_disable_phy: | ||
954 | phy_power_off(host->generic_phy); | ||
955 | out_unregister_bus: | ||
956 | phy_exit(host->generic_phy); | ||
957 | out_host_free: | ||
958 | devm_kfree(dev, host); | ||
959 | hba->priv = NULL; | ||
960 | out: | ||
961 | return err; | ||
962 | } | ||
963 | |||
964 | static void ufs_qcom_exit(struct ufs_hba *hba) | ||
965 | { | ||
966 | struct ufs_qcom_host *host = hba->priv; | ||
967 | |||
968 | ufs_qcom_disable_lane_clks(host); | ||
969 | phy_power_off(host->generic_phy); | ||
970 | } | ||
971 | |||
972 | static | ||
973 | void ufs_qcom_clk_scale_notify(struct ufs_hba *hba) | ||
974 | { | ||
975 | struct ufs_qcom_host *host = hba->priv; | ||
976 | struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; | ||
977 | |||
978 | if (!dev_req_params) | ||
979 | return; | ||
980 | |||
981 | ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, | ||
982 | dev_req_params->pwr_rx, | ||
983 | dev_req_params->hs_rate); | ||
984 | } | ||
985 | |||
986 | /** | ||
987 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations | ||
988 | * | ||
989 | * The variant operations configure the necessary controller and PHY | ||
990 | * handshake during initialization. | ||
991 | */ | ||
992 | static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { | ||
993 | .name = "qcom", | ||
994 | .init = ufs_qcom_init, | ||
995 | .exit = ufs_qcom_exit, | ||
996 | .clk_scale_notify = ufs_qcom_clk_scale_notify, | ||
997 | .setup_clocks = ufs_qcom_setup_clocks, | ||
998 | .hce_enable_notify = ufs_qcom_hce_enable_notify, | ||
999 | .link_startup_notify = ufs_qcom_link_startup_notify, | ||
1000 | .pwr_change_notify = ufs_qcom_pwr_change_notify, | ||
1001 | .suspend = ufs_qcom_suspend, | ||
1002 | .resume = ufs_qcom_resume, | ||
1003 | }; | ||
1004 | EXPORT_SYMBOL(ufs_hba_qcom_vops); | ||
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h new file mode 100644 index 000000000000..9a6febd007df --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef UFS_QCOM_H_ | ||
15 | #define UFS_QCOM_H_ | ||
16 | |||
17 | #define MAX_UFS_QCOM_HOSTS 1 | ||
18 | #define MAX_U32 (~(u32)0) | ||
19 | #define MPHY_TX_FSM_STATE 0x41 | ||
20 | #define TX_FSM_HIBERN8 0x1 | ||
21 | #define HBRN8_POLL_TOUT_MS 100 | ||
22 | #define DEFAULT_CLK_RATE_HZ 1000000 | ||
23 | #define BUS_VECTOR_NAME_LEN 32 | ||
24 | |||
25 | #define UFS_HW_VER_MAJOR_SHFT (28) | ||
26 | #define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT) | ||
27 | #define UFS_HW_VER_MINOR_SHFT (16) | ||
28 | #define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT) | ||
29 | #define UFS_HW_VER_STEP_SHFT (0) | ||
30 | #define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT) | ||
31 | |||
32 | /* vendor specific pre-defined parameters */ | ||
33 | #define SLOW 1 | ||
34 | #define FAST 2 | ||
35 | |||
36 | #define UFS_QCOM_LIMIT_NUM_LANES_RX 2 | ||
37 | #define UFS_QCOM_LIMIT_NUM_LANES_TX 2 | ||
38 | #define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2 | ||
39 | #define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2 | ||
40 | #define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4 | ||
41 | #define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4 | ||
42 | #define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE | ||
43 | #define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE | ||
44 | #define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE | ||
45 | #define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE | ||
46 | #define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B | ||
47 | #define UFS_QCOM_LIMIT_DESIRED_MODE FAST | ||
48 | |||
49 | /* QCOM UFS host controller vendor specific registers */ | ||
50 | enum { | ||
51 | REG_UFS_SYS1CLK_1US = 0xC0, | ||
52 | REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4, | ||
53 | REG_UFS_LOCAL_PORT_ID_REG = 0xC8, | ||
54 | REG_UFS_PA_ERR_CODE = 0xCC, | ||
55 | REG_UFS_RETRY_TIMER_REG = 0xD0, | ||
56 | REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8, | ||
57 | REG_UFS_CFG1 = 0xDC, | ||
58 | REG_UFS_CFG2 = 0xE0, | ||
59 | REG_UFS_HW_VERSION = 0xE4, | ||
60 | |||
61 | UFS_DBG_RD_REG_UAWM = 0x100, | ||
62 | UFS_DBG_RD_REG_UARM = 0x200, | ||
63 | UFS_DBG_RD_REG_TXUC = 0x300, | ||
64 | UFS_DBG_RD_REG_RXUC = 0x400, | ||
65 | UFS_DBG_RD_REG_DFC = 0x500, | ||
66 | UFS_DBG_RD_REG_TRLUT = 0x600, | ||
67 | UFS_DBG_RD_REG_TMRLUT = 0x700, | ||
68 | UFS_UFS_DBG_RD_REG_OCSC = 0x800, | ||
69 | |||
70 | UFS_UFS_DBG_RD_DESC_RAM = 0x1500, | ||
71 | UFS_UFS_DBG_RD_PRDT_RAM = 0x1700, | ||
72 | UFS_UFS_DBG_RD_RESP_RAM = 0x1800, | ||
73 | UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, | ||
74 | }; | ||
75 | |||
76 | /* bit definitions for REG_UFS_CFG2 register */ | ||
77 | #define UAWM_HW_CGC_EN (1 << 0) | ||
78 | #define UARM_HW_CGC_EN (1 << 1) | ||
79 | #define TXUC_HW_CGC_EN (1 << 2) | ||
80 | #define RXUC_HW_CGC_EN (1 << 3) | ||
81 | #define DFC_HW_CGC_EN (1 << 4) | ||
82 | #define TRLUT_HW_CGC_EN (1 << 5) | ||
83 | #define TMRLUT_HW_CGC_EN (1 << 6) | ||
84 | #define OCSC_HW_CGC_EN (1 << 7) | ||
85 | |||
86 | #define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ | ||
87 | TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ | ||
88 | DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ | ||
89 | TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) | ||
90 | |||
91 | /* bit offset */ | ||
92 | enum { | ||
93 | OFFSET_UFS_PHY_SOFT_RESET = 1, | ||
94 | OFFSET_CLK_NS_REG = 10, | ||
95 | }; | ||
96 | |||
97 | /* bit masks */ | ||
98 | enum { | ||
99 | MASK_UFS_PHY_SOFT_RESET = 0x2, | ||
100 | MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF, | ||
101 | MASK_CLK_NS_REG = 0xFFFC00, | ||
102 | }; | ||
103 | |||
104 | enum ufs_qcom_phy_init_type { | ||
105 | UFS_PHY_INIT_FULL, | ||
106 | UFS_PHY_INIT_CFG_RESTORE, | ||
107 | }; | ||
108 | |||
109 | static inline void | ||
110 | ufs_qcom_get_controller_revision(struct ufs_hba *hba, | ||
111 | u8 *major, u16 *minor, u16 *step) | ||
112 | { | ||
113 | u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); | ||
114 | |||
115 | *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT; | ||
116 | *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT; | ||
117 | *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT; | ||
118 | }; | ||
119 | |||
120 | static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) | ||
121 | { | ||
122 | ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, | ||
123 | 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); | ||
124 | |||
125 | /* | ||
126 | * Make sure assertion of ufs phy reset is written to | ||
127 | * register before returning | ||
128 | */ | ||
129 | mb(); | ||
130 | } | ||
131 | |||
132 | static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) | ||
133 | { | ||
134 | ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, | ||
135 | 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); | ||
136 | |||
137 | /* | ||
138 | * Make sure de-assertion of ufs phy reset is written to | ||
139 | * register before returning | ||
140 | */ | ||
141 | mb(); | ||
142 | } | ||
143 | |||
144 | struct ufs_qcom_bus_vote { | ||
145 | uint32_t client_handle; | ||
146 | uint32_t curr_vote; | ||
147 | int min_bw_vote; | ||
148 | int max_bw_vote; | ||
149 | int saved_vote; | ||
150 | bool is_max_bw_needed; | ||
151 | struct device_attribute max_bus_bw; | ||
152 | }; | ||
153 | |||
154 | struct ufs_qcom_host { | ||
155 | struct phy *generic_phy; | ||
156 | struct ufs_hba *hba; | ||
157 | struct ufs_qcom_bus_vote bus_vote; | ||
158 | struct ufs_pa_layer_attr dev_req_params; | ||
159 | struct clk *rx_l0_sync_clk; | ||
160 | struct clk *tx_l0_sync_clk; | ||
161 | struct clk *rx_l1_sync_clk; | ||
162 | struct clk *tx_l1_sync_clk; | ||
163 | bool is_lane_clks_enabled; | ||
164 | }; | ||
165 | |||
166 | #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) | ||
167 | #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) | ||
168 | #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) | ||
169 | |||
170 | #endif /* UFS_QCOM_H_ */ | ||