diff options
-rw-r--r-- | drivers/scsi/ufs/ufs.h | 36 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pci.c | 45 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pltfrm.c | 60 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 945 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.h | 76 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshci.h | 9 |
6 files changed, 989 insertions, 182 deletions
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 37d64c1fb8da..42c459a9d3fe 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
@@ -129,6 +129,7 @@ enum { | |||
129 | /* Flag idn for Query Requests*/ | 129 | /* Flag idn for Query Requests*/ |
130 | enum flag_idn { | 130 | enum flag_idn { |
131 | QUERY_FLAG_IDN_FDEVICEINIT = 0x01, | 131 | QUERY_FLAG_IDN_FDEVICEINIT = 0x01, |
132 | QUERY_FLAG_IDN_PWR_ON_WPE = 0x03, | ||
132 | QUERY_FLAG_IDN_BKOPS_EN = 0x04, | 133 | QUERY_FLAG_IDN_BKOPS_EN = 0x04, |
133 | }; | 134 | }; |
134 | 135 | ||
@@ -194,6 +195,18 @@ enum unit_desc_param { | |||
194 | UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, | 195 | UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, |
195 | }; | 196 | }; |
196 | 197 | ||
198 | /* | ||
199 | * Logical Unit Write Protect | ||
200 | * 00h: LU not write protected | ||
201 | * 01h: LU write protected when fPowerOnWPEn =1 | ||
202 | * 02h: LU permanently write protected when fPermanentWPEn =1 | ||
203 | */ | ||
204 | enum ufs_lu_wp_type { | ||
205 | UFS_LU_NO_WP = 0x00, | ||
206 | UFS_LU_POWER_ON_WP = 0x01, | ||
207 | UFS_LU_PERM_WP = 0x02, | ||
208 | }; | ||
209 | |||
197 | /* bActiveICCLevel parameter current units */ | 210 | /* bActiveICCLevel parameter current units */ |
198 | enum { | 211 | enum { |
199 | UFSHCD_NANO_AMP = 0, | 212 | UFSHCD_NANO_AMP = 0, |
@@ -226,11 +239,12 @@ enum { | |||
226 | }; | 239 | }; |
227 | 240 | ||
228 | /* Background operation status */ | 241 | /* Background operation status */ |
229 | enum { | 242 | enum bkops_status { |
230 | BKOPS_STATUS_NO_OP = 0x0, | 243 | BKOPS_STATUS_NO_OP = 0x0, |
231 | BKOPS_STATUS_NON_CRITICAL = 0x1, | 244 | BKOPS_STATUS_NON_CRITICAL = 0x1, |
232 | BKOPS_STATUS_PERF_IMPACT = 0x2, | 245 | BKOPS_STATUS_PERF_IMPACT = 0x2, |
233 | BKOPS_STATUS_CRITICAL = 0x3, | 246 | BKOPS_STATUS_CRITICAL = 0x3, |
247 | BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL, | ||
234 | }; | 248 | }; |
235 | 249 | ||
236 | /* UTP QUERY Transaction Specific Fields OpCode */ | 250 | /* UTP QUERY Transaction Specific Fields OpCode */ |
@@ -291,6 +305,14 @@ enum { | |||
291 | UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, | 305 | UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, |
292 | UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, | 306 | UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, |
293 | }; | 307 | }; |
308 | |||
309 | /* UFS device power modes */ | ||
310 | enum ufs_dev_pwr_mode { | ||
311 | UFS_ACTIVE_PWR_MODE = 1, | ||
312 | UFS_SLEEP_PWR_MODE = 2, | ||
313 | UFS_POWERDOWN_PWR_MODE = 3, | ||
314 | }; | ||
315 | |||
294 | /** | 316 | /** |
295 | * struct utp_upiu_header - UPIU header structure | 317 | * struct utp_upiu_header - UPIU header structure |
296 | * @dword_0: UPIU header DW-0 | 318 | * @dword_0: UPIU header DW-0 |
@@ -437,6 +459,12 @@ struct ufs_query_res { | |||
437 | #define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ | 459 | #define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ |
438 | #define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ | 460 | #define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ |
439 | 461 | ||
462 | /* | ||
463 | * VCCQ & VCCQ2 current requirement when UFS device is in sleep state | ||
464 | * and link is in Hibern8 state. | ||
465 | */ | ||
466 | #define UFS_VREG_LPM_LOAD_UA 1000 /* uA */ | ||
467 | |||
440 | struct ufs_vreg { | 468 | struct ufs_vreg { |
441 | struct regulator *reg; | 469 | struct regulator *reg; |
442 | const char *name; | 470 | const char *name; |
@@ -454,4 +482,10 @@ struct ufs_vreg_info { | |||
454 | struct ufs_vreg *vdd_hba; | 482 | struct ufs_vreg *vdd_hba; |
455 | }; | 483 | }; |
456 | 484 | ||
485 | struct ufs_dev_info { | ||
486 | bool f_power_on_wp_en; | ||
487 | /* Keeps information if any of the LU is power on write protected */ | ||
488 | bool is_lu_power_on_wp; | ||
489 | }; | ||
490 | |||
457 | #endif /* End of Header */ | 491 | #endif /* End of Header */ |
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 2a26faa95b77..955ed5587011 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c | |||
@@ -43,34 +43,24 @@ | |||
43 | * @pdev: pointer to PCI device handle | 43 | * @pdev: pointer to PCI device handle |
44 | * @state: power state | 44 | * @state: power state |
45 | * | 45 | * |
46 | * Returns -ENOSYS | 46 | * Returns 0 if successful |
47 | * Returns non-zero otherwise | ||
47 | */ | 48 | */ |
48 | static int ufshcd_pci_suspend(struct device *dev) | 49 | static int ufshcd_pci_suspend(struct device *dev) |
49 | { | 50 | { |
50 | /* | 51 | return ufshcd_system_suspend(dev_get_drvdata(dev)); |
51 | * TODO: | ||
52 | * 1. Call ufshcd_suspend | ||
53 | * 2. Do bus specific power management | ||
54 | */ | ||
55 | |||
56 | return -ENOSYS; | ||
57 | } | 52 | } |
58 | 53 | ||
59 | /** | 54 | /** |
60 | * ufshcd_pci_resume - resume power management function | 55 | * ufshcd_pci_resume - resume power management function |
61 | * @pdev: pointer to PCI device handle | 56 | * @pdev: pointer to PCI device handle |
62 | * | 57 | * |
63 | * Returns -ENOSYS | 58 | * Returns 0 if successful |
59 | * Returns non-zero otherwise | ||
64 | */ | 60 | */ |
65 | static int ufshcd_pci_resume(struct device *dev) | 61 | static int ufshcd_pci_resume(struct device *dev) |
66 | { | 62 | { |
67 | /* | 63 | return ufshcd_system_resume(dev_get_drvdata(dev)); |
68 | * TODO: | ||
69 | * 1. Call ufshcd_resume. | ||
70 | * 2. Do bus specific wake up | ||
71 | */ | ||
72 | |||
73 | return -ENOSYS; | ||
74 | } | 64 | } |
75 | #else | 65 | #else |
76 | #define ufshcd_pci_suspend NULL | 66 | #define ufshcd_pci_suspend NULL |
@@ -80,30 +70,15 @@ static int ufshcd_pci_resume(struct device *dev) | |||
80 | #ifdef CONFIG_PM_RUNTIME | 70 | #ifdef CONFIG_PM_RUNTIME |
81 | static int ufshcd_pci_runtime_suspend(struct device *dev) | 71 | static int ufshcd_pci_runtime_suspend(struct device *dev) |
82 | { | 72 | { |
83 | struct ufs_hba *hba = dev_get_drvdata(dev); | 73 | return ufshcd_runtime_suspend(dev_get_drvdata(dev)); |
84 | |||
85 | if (!hba) | ||
86 | return 0; | ||
87 | |||
88 | return ufshcd_runtime_suspend(hba); | ||
89 | } | 74 | } |
90 | static int ufshcd_pci_runtime_resume(struct device *dev) | 75 | static int ufshcd_pci_runtime_resume(struct device *dev) |
91 | { | 76 | { |
92 | struct ufs_hba *hba = dev_get_drvdata(dev); | 77 | return ufshcd_runtime_resume(dev_get_drvdata(dev)); |
93 | |||
94 | if (!hba) | ||
95 | return 0; | ||
96 | |||
97 | return ufshcd_runtime_resume(hba); | ||
98 | } | 78 | } |
99 | static int ufshcd_pci_runtime_idle(struct device *dev) | 79 | static int ufshcd_pci_runtime_idle(struct device *dev) |
100 | { | 80 | { |
101 | struct ufs_hba *hba = dev_get_drvdata(dev); | 81 | return ufshcd_runtime_idle(dev_get_drvdata(dev)); |
102 | |||
103 | if (!hba) | ||
104 | return 0; | ||
105 | |||
106 | return ufshcd_runtime_idle(hba); | ||
107 | } | 82 | } |
108 | #else /* !CONFIG_PM_RUNTIME */ | 83 | #else /* !CONFIG_PM_RUNTIME */ |
109 | #define ufshcd_pci_runtime_suspend NULL | 84 | #define ufshcd_pci_runtime_suspend NULL |
@@ -117,7 +92,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev) | |||
117 | */ | 92 | */ |
118 | static void ufshcd_pci_shutdown(struct pci_dev *pdev) | 93 | static void ufshcd_pci_shutdown(struct pci_dev *pdev) |
119 | { | 94 | { |
120 | ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); | 95 | ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); |
121 | } | 96 | } |
122 | 97 | ||
123 | /** | 98 | /** |
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index dde4e6e3be70..2482bbac3681 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c | |||
@@ -225,45 +225,24 @@ out: | |||
225 | * ufshcd_pltfrm_suspend - suspend power management function | 225 | * ufshcd_pltfrm_suspend - suspend power management function |
226 | * @dev: pointer to device handle | 226 | * @dev: pointer to device handle |
227 | * | 227 | * |
228 | * | 228 | * Returns 0 if successful |
229 | * Returns 0 | 229 | * Returns non-zero otherwise |
230 | */ | 230 | */ |
231 | static int ufshcd_pltfrm_suspend(struct device *dev) | 231 | static int ufshcd_pltfrm_suspend(struct device *dev) |
232 | { | 232 | { |
233 | struct platform_device *pdev = to_platform_device(dev); | 233 | return ufshcd_system_suspend(dev_get_drvdata(dev)); |
234 | struct ufs_hba *hba = platform_get_drvdata(pdev); | ||
235 | |||
236 | /* | ||
237 | * TODO: | ||
238 | * 1. Call ufshcd_suspend | ||
239 | * 2. Do bus specific power management | ||
240 | */ | ||
241 | |||
242 | disable_irq(hba->irq); | ||
243 | |||
244 | return 0; | ||
245 | } | 234 | } |
246 | 235 | ||
247 | /** | 236 | /** |
248 | * ufshcd_pltfrm_resume - resume power management function | 237 | * ufshcd_pltfrm_resume - resume power management function |
249 | * @dev: pointer to device handle | 238 | * @dev: pointer to device handle |
250 | * | 239 | * |
251 | * Returns 0 | 240 | * Returns 0 if successful |
241 | * Returns non-zero otherwise | ||
252 | */ | 242 | */ |
253 | static int ufshcd_pltfrm_resume(struct device *dev) | 243 | static int ufshcd_pltfrm_resume(struct device *dev) |
254 | { | 244 | { |
255 | struct platform_device *pdev = to_platform_device(dev); | 245 | return ufshcd_system_resume(dev_get_drvdata(dev)); |
256 | struct ufs_hba *hba = platform_get_drvdata(pdev); | ||
257 | |||
258 | /* | ||
259 | * TODO: | ||
260 | * 1. Call ufshcd_resume. | ||
261 | * 2. Do bus specific wake up | ||
262 | */ | ||
263 | |||
264 | enable_irq(hba->irq); | ||
265 | |||
266 | return 0; | ||
267 | } | 246 | } |
268 | #else | 247 | #else |
269 | #define ufshcd_pltfrm_suspend NULL | 248 | #define ufshcd_pltfrm_suspend NULL |
@@ -273,30 +252,15 @@ static int ufshcd_pltfrm_resume(struct device *dev) | |||
273 | #ifdef CONFIG_PM_RUNTIME | 252 | #ifdef CONFIG_PM_RUNTIME |
274 | static int ufshcd_pltfrm_runtime_suspend(struct device *dev) | 253 | static int ufshcd_pltfrm_runtime_suspend(struct device *dev) |
275 | { | 254 | { |
276 | struct ufs_hba *hba = dev_get_drvdata(dev); | 255 | return ufshcd_runtime_suspend(dev_get_drvdata(dev)); |
277 | |||
278 | if (!hba) | ||
279 | return 0; | ||
280 | |||
281 | return ufshcd_runtime_suspend(hba); | ||
282 | } | 256 | } |
283 | static int ufshcd_pltfrm_runtime_resume(struct device *dev) | 257 | static int ufshcd_pltfrm_runtime_resume(struct device *dev) |
284 | { | 258 | { |
285 | struct ufs_hba *hba = dev_get_drvdata(dev); | 259 | return ufshcd_runtime_resume(dev_get_drvdata(dev)); |
286 | |||
287 | if (!hba) | ||
288 | return 0; | ||
289 | |||
290 | return ufshcd_runtime_resume(hba); | ||
291 | } | 260 | } |
292 | static int ufshcd_pltfrm_runtime_idle(struct device *dev) | 261 | static int ufshcd_pltfrm_runtime_idle(struct device *dev) |
293 | { | 262 | { |
294 | struct ufs_hba *hba = dev_get_drvdata(dev); | 263 | return ufshcd_runtime_idle(dev_get_drvdata(dev)); |
295 | |||
296 | if (!hba) | ||
297 | return 0; | ||
298 | |||
299 | return ufshcd_runtime_idle(hba); | ||
300 | } | 264 | } |
301 | #else /* !CONFIG_PM_RUNTIME */ | 265 | #else /* !CONFIG_PM_RUNTIME */ |
302 | #define ufshcd_pltfrm_runtime_suspend NULL | 266 | #define ufshcd_pltfrm_runtime_suspend NULL |
@@ -304,6 +268,11 @@ static int ufshcd_pltfrm_runtime_idle(struct device *dev) | |||
304 | #define ufshcd_pltfrm_runtime_idle NULL | 268 | #define ufshcd_pltfrm_runtime_idle NULL |
305 | #endif /* CONFIG_PM_RUNTIME */ | 269 | #endif /* CONFIG_PM_RUNTIME */ |
306 | 270 | ||
271 | static void ufshcd_pltfrm_shutdown(struct platform_device *pdev) | ||
272 | { | ||
273 | ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); | ||
274 | } | ||
275 | |||
307 | /** | 276 | /** |
308 | * ufshcd_pltfrm_probe - probe routine of the driver | 277 | * ufshcd_pltfrm_probe - probe routine of the driver |
309 | * @pdev: pointer to Platform device handle | 278 | * @pdev: pointer to Platform device handle |
@@ -404,6 +373,7 @@ static const struct dev_pm_ops ufshcd_dev_pm_ops = { | |||
404 | static struct platform_driver ufshcd_pltfrm_driver = { | 373 | static struct platform_driver ufshcd_pltfrm_driver = { |
405 | .probe = ufshcd_pltfrm_probe, | 374 | .probe = ufshcd_pltfrm_probe, |
406 | .remove = ufshcd_pltfrm_remove, | 375 | .remove = ufshcd_pltfrm_remove, |
376 | .shutdown = ufshcd_pltfrm_shutdown, | ||
407 | .driver = { | 377 | .driver = { |
408 | .name = "ufshcd", | 378 | .name = "ufshcd", |
409 | .owner = THIS_MODULE, | 379 | .owner = THIS_MODULE, |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a1eae495fa67..f2b50bc13ac9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ | 45 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
46 | UTP_TASK_REQ_COMPL |\ | 46 | UTP_TASK_REQ_COMPL |\ |
47 | UIC_POWER_MODE |\ | ||
48 | UFSHCD_ERROR_MASK) | 47 | UFSHCD_ERROR_MASK) |
49 | /* UIC command timeout, unit: ms */ | 48 | /* UIC command timeout, unit: ms */ |
50 | #define UIC_CMD_TIMEOUT 500 | 49 | #define UIC_CMD_TIMEOUT 500 |
@@ -138,12 +137,72 @@ enum { | |||
138 | #define ufshcd_clear_eh_in_progress(h) \ | 137 | #define ufshcd_clear_eh_in_progress(h) \ |
139 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) | 138 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) |
140 | 139 | ||
140 | #define ufshcd_set_ufs_dev_active(h) \ | ||
141 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | ||
142 | #define ufshcd_set_ufs_dev_sleep(h) \ | ||
143 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | ||
144 | #define ufshcd_set_ufs_dev_poweroff(h) \ | ||
145 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | ||
146 | #define ufshcd_is_ufs_dev_active(h) \ | ||
147 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | ||
148 | #define ufshcd_is_ufs_dev_sleep(h) \ | ||
149 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | ||
150 | #define ufshcd_is_ufs_dev_poweroff(h) \ | ||
151 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | ||
152 | |||
153 | static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { | ||
154 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | ||
155 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | ||
156 | {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | ||
157 | {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | ||
158 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | ||
159 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, | ||
160 | }; | ||
161 | |||
162 | static inline enum ufs_dev_pwr_mode | ||
163 | ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) | ||
164 | { | ||
165 | return ufs_pm_lvl_states[lvl].dev_state; | ||
166 | } | ||
167 | |||
168 | static inline enum uic_link_state | ||
169 | ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) | ||
170 | { | ||
171 | return ufs_pm_lvl_states[lvl].link_state; | ||
172 | } | ||
173 | |||
141 | static void ufshcd_tmc_handler(struct ufs_hba *hba); | 174 | static void ufshcd_tmc_handler(struct ufs_hba *hba); |
142 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | 175 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); |
143 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); | 176 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); |
144 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); | 177 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); |
145 | static void ufshcd_hba_exit(struct ufs_hba *hba); | 178 | static void ufshcd_hba_exit(struct ufs_hba *hba); |
146 | static int ufshcd_probe_hba(struct ufs_hba *hba); | 179 | static int ufshcd_probe_hba(struct ufs_hba *hba); |
180 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); | ||
181 | static irqreturn_t ufshcd_intr(int irq, void *__hba); | ||
182 | |||
183 | static inline int ufshcd_enable_irq(struct ufs_hba *hba) | ||
184 | { | ||
185 | int ret = 0; | ||
186 | |||
187 | if (!hba->is_irq_enabled) { | ||
188 | ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, | ||
189 | hba); | ||
190 | if (ret) | ||
191 | dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", | ||
192 | __func__, ret); | ||
193 | hba->is_irq_enabled = true; | ||
194 | } | ||
195 | |||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | static inline void ufshcd_disable_irq(struct ufs_hba *hba) | ||
200 | { | ||
201 | if (hba->is_irq_enabled) { | ||
202 | free_irq(hba->irq, hba); | ||
203 | hba->is_irq_enabled = false; | ||
204 | } | ||
205 | } | ||
147 | 206 | ||
148 | /* | 207 | /* |
149 | * ufshcd_wait_for_register - wait for register value to change | 208 | * ufshcd_wait_for_register - wait for register value to change |
@@ -609,15 +668,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |||
609 | * @uic_cmd: UIC command | 668 | * @uic_cmd: UIC command |
610 | * | 669 | * |
611 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called | 670 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called |
612 | * with mutex held. | 671 | * with mutex held and host_lock locked. |
613 | * Returns 0 only if success. | 672 | * Returns 0 only if success. |
614 | */ | 673 | */ |
615 | static int | 674 | static int |
616 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 675 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
617 | { | 676 | { |
618 | int ret; | ||
619 | unsigned long flags; | ||
620 | |||
621 | if (!ufshcd_ready_for_uic_cmd(hba)) { | 677 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
622 | dev_err(hba->dev, | 678 | dev_err(hba->dev, |
623 | "Controller not ready to accept UIC commands\n"); | 679 | "Controller not ready to accept UIC commands\n"); |
@@ -626,13 +682,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |||
626 | 682 | ||
627 | init_completion(&uic_cmd->done); | 683 | init_completion(&uic_cmd->done); |
628 | 684 | ||
629 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
630 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); | 685 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); |
631 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
632 | 686 | ||
633 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | 687 | return 0; |
634 | |||
635 | return ret; | ||
636 | } | 688 | } |
637 | 689 | ||
638 | /** | 690 | /** |
@@ -646,9 +698,15 @@ static int | |||
646 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 698 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
647 | { | 699 | { |
648 | int ret; | 700 | int ret; |
701 | unsigned long flags; | ||
649 | 702 | ||
650 | mutex_lock(&hba->uic_cmd_mutex); | 703 | mutex_lock(&hba->uic_cmd_mutex); |
704 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
651 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); | 705 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); |
706 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
707 | if (!ret) | ||
708 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | ||
709 | |||
652 | mutex_unlock(&hba->uic_cmd_mutex); | 710 | mutex_unlock(&hba->uic_cmd_mutex); |
653 | 711 | ||
654 | return ret; | 712 | return ret; |
@@ -1789,44 +1847,54 @@ out: | |||
1789 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | 1847 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); |
1790 | 1848 | ||
1791 | /** | 1849 | /** |
1792 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | 1850 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power |
1793 | * using DME_SET primitives. | 1851 | * state) and waits for it to take effect. |
1852 | * | ||
1794 | * @hba: per adapter instance | 1853 | * @hba: per adapter instance |
1795 | * @mode: powr mode value | 1854 | * @cmd: UIC command to execute |
1855 | * | ||
1856 | * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & | ||
1857 | * DME_HIBERNATE_EXIT commands take some time to take its effect on both host | ||
1858 | * and device UniPro link and hence it's final completion would be indicated by | ||
1859 | * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in | ||
1860 | * addition to normal UIC command completion Status (UCCS). This function only | ||
1861 | * returns after the relevant status bits indicate the completion. | ||
1796 | * | 1862 | * |
1797 | * Returns 0 on success, non-zero value on failure | 1863 | * Returns 0 on success, non-zero value on failure |
1798 | */ | 1864 | */ |
1799 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | 1865 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) |
1800 | { | 1866 | { |
1801 | struct uic_command uic_cmd = {0}; | 1867 | struct completion uic_async_done; |
1802 | struct completion pwr_done; | ||
1803 | unsigned long flags; | 1868 | unsigned long flags; |
1804 | u8 status; | 1869 | u8 status; |
1805 | int ret; | 1870 | int ret; |
1806 | 1871 | ||
1807 | uic_cmd.command = UIC_CMD_DME_SET; | ||
1808 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | ||
1809 | uic_cmd.argument3 = mode; | ||
1810 | init_completion(&pwr_done); | ||
1811 | |||
1812 | mutex_lock(&hba->uic_cmd_mutex); | 1872 | mutex_lock(&hba->uic_cmd_mutex); |
1873 | init_completion(&uic_async_done); | ||
1813 | 1874 | ||
1814 | spin_lock_irqsave(hba->host->host_lock, flags); | 1875 | spin_lock_irqsave(hba->host->host_lock, flags); |
1815 | hba->pwr_done = &pwr_done; | 1876 | hba->uic_async_done = &uic_async_done; |
1877 | ret = __ufshcd_send_uic_cmd(hba, cmd); | ||
1816 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1878 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1817 | ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1818 | if (ret) { | 1879 | if (ret) { |
1819 | dev_err(hba->dev, | 1880 | dev_err(hba->dev, |
1820 | "pwr mode change with mode 0x%x uic error %d\n", | 1881 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", |
1821 | mode, ret); | 1882 | cmd->command, cmd->argument3, ret); |
1883 | goto out; | ||
1884 | } | ||
1885 | ret = ufshcd_wait_for_uic_cmd(hba, cmd); | ||
1886 | if (ret) { | ||
1887 | dev_err(hba->dev, | ||
1888 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", | ||
1889 | cmd->command, cmd->argument3, ret); | ||
1822 | goto out; | 1890 | goto out; |
1823 | } | 1891 | } |
1824 | 1892 | ||
1825 | if (!wait_for_completion_timeout(hba->pwr_done, | 1893 | if (!wait_for_completion_timeout(hba->uic_async_done, |
1826 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { | 1894 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { |
1827 | dev_err(hba->dev, | 1895 | dev_err(hba->dev, |
1828 | "pwr mode change with mode 0x%x completion timeout\n", | 1896 | "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", |
1829 | mode); | 1897 | cmd->command, cmd->argument3); |
1830 | ret = -ETIMEDOUT; | 1898 | ret = -ETIMEDOUT; |
1831 | goto out; | 1899 | goto out; |
1832 | } | 1900 | } |
@@ -1834,19 +1902,62 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | |||
1834 | status = ufshcd_get_upmcrs(hba); | 1902 | status = ufshcd_get_upmcrs(hba); |
1835 | if (status != PWR_LOCAL) { | 1903 | if (status != PWR_LOCAL) { |
1836 | dev_err(hba->dev, | 1904 | dev_err(hba->dev, |
1837 | "pwr mode change failed, host umpcrs:0x%x\n", | 1905 | "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", |
1838 | status); | 1906 | cmd->command, status); |
1839 | ret = (status != PWR_OK) ? status : -1; | 1907 | ret = (status != PWR_OK) ? status : -1; |
1840 | } | 1908 | } |
1841 | out: | 1909 | out: |
1842 | spin_lock_irqsave(hba->host->host_lock, flags); | 1910 | spin_lock_irqsave(hba->host->host_lock, flags); |
1843 | hba->pwr_done = NULL; | 1911 | hba->uic_async_done = NULL; |
1844 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1912 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1845 | mutex_unlock(&hba->uic_cmd_mutex); | 1913 | mutex_unlock(&hba->uic_cmd_mutex); |
1846 | return ret; | 1914 | return ret; |
1847 | } | 1915 | } |
1848 | 1916 | ||
1849 | /** | 1917 | /** |
1918 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | ||
1919 | * using DME_SET primitives. | ||
1920 | * @hba: per adapter instance | ||
1921 | * @mode: powr mode value | ||
1922 | * | ||
1923 | * Returns 0 on success, non-zero value on failure | ||
1924 | */ | ||
1925 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | ||
1926 | { | ||
1927 | struct uic_command uic_cmd = {0}; | ||
1928 | |||
1929 | uic_cmd.command = UIC_CMD_DME_SET; | ||
1930 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | ||
1931 | uic_cmd.argument3 = mode; | ||
1932 | |||
1933 | return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | ||
1934 | } | ||
1935 | |||
1936 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) | ||
1937 | { | ||
1938 | struct uic_command uic_cmd = {0}; | ||
1939 | |||
1940 | uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; | ||
1941 | |||
1942 | return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | ||
1943 | } | ||
1944 | |||
1945 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) | ||
1946 | { | ||
1947 | struct uic_command uic_cmd = {0}; | ||
1948 | int ret; | ||
1949 | |||
1950 | uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; | ||
1951 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | ||
1952 | if (ret) { | ||
1953 | ufshcd_set_link_off(hba); | ||
1954 | ret = ufshcd_host_reset_and_restore(hba); | ||
1955 | } | ||
1956 | |||
1957 | return ret; | ||
1958 | } | ||
1959 | |||
1960 | /** | ||
1850 | * ufshcd_config_max_pwr_mode - Set & Change power mode with | 1961 | * ufshcd_config_max_pwr_mode - Set & Change power mode with |
1851 | * maximum capability attribute information. | 1962 | * maximum capability attribute information. |
1852 | * @hba: per adapter instance | 1963 | * @hba: per adapter instance |
@@ -2045,6 +2156,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) | |||
2045 | msleep(5); | 2156 | msleep(5); |
2046 | } | 2157 | } |
2047 | 2158 | ||
2159 | /* UniPro link is disabled at this point */ | ||
2160 | ufshcd_set_link_off(hba); | ||
2161 | |||
2048 | if (hba->vops && hba->vops->hce_enable_notify) | 2162 | if (hba->vops && hba->vops->hce_enable_notify) |
2049 | hba->vops->hce_enable_notify(hba, PRE_CHANGE); | 2163 | hba->vops->hce_enable_notify(hba, PRE_CHANGE); |
2050 | 2164 | ||
@@ -2077,7 +2191,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) | |||
2077 | } | 2191 | } |
2078 | 2192 | ||
2079 | /* enable UIC related interrupts */ | 2193 | /* enable UIC related interrupts */ |
2080 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); | 2194 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); |
2081 | 2195 | ||
2082 | if (hba->vops && hba->vops->hce_enable_notify) | 2196 | if (hba->vops && hba->vops->hce_enable_notify) |
2083 | hba->vops->hce_enable_notify(hba, POST_CHANGE); | 2197 | hba->vops->hce_enable_notify(hba, POST_CHANGE); |
@@ -2206,6 +2320,62 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) | |||
2206 | scsi_activate_tcq(sdev, lun_qdepth); | 2320 | scsi_activate_tcq(sdev, lun_qdepth); |
2207 | } | 2321 | } |
2208 | 2322 | ||
2323 | /* | ||
2324 | * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR | ||
2325 | * @hba: per-adapter instance | ||
2326 | * @lun: UFS device lun id | ||
2327 | * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info | ||
2328 | * | ||
2329 | * Returns 0 in case of success and b_lu_write_protect status would be returned | ||
2330 | * @b_lu_write_protect parameter. | ||
2331 | * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. | ||
2332 | * Returns -EINVAL in case of invalid parameters passed to this function. | ||
2333 | */ | ||
2334 | static int ufshcd_get_lu_wp(struct ufs_hba *hba, | ||
2335 | u8 lun, | ||
2336 | u8 *b_lu_write_protect) | ||
2337 | { | ||
2338 | int ret; | ||
2339 | |||
2340 | if (!b_lu_write_protect) | ||
2341 | ret = -EINVAL; | ||
2342 | /* | ||
2343 | * According to UFS device spec, RPMB LU can't be write | ||
2344 | * protected so skip reading bLUWriteProtect parameter for | ||
2345 | * it. For other W-LUs, UNIT DESCRIPTOR is not available. | ||
2346 | */ | ||
2347 | else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) | ||
2348 | ret = -ENOTSUPP; | ||
2349 | else | ||
2350 | ret = ufshcd_read_unit_desc_param(hba, | ||
2351 | lun, | ||
2352 | UNIT_DESC_PARAM_LU_WR_PROTECT, | ||
2353 | b_lu_write_protect, | ||
2354 | sizeof(*b_lu_write_protect)); | ||
2355 | return ret; | ||
2356 | } | ||
2357 | |||
2358 | /** | ||
2359 | * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect | ||
2360 | * status | ||
2361 | * @hba: per-adapter instance | ||
2362 | * @sdev: pointer to SCSI device | ||
2363 | * | ||
2364 | */ | ||
2365 | static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, | ||
2366 | struct scsi_device *sdev) | ||
2367 | { | ||
2368 | if (hba->dev_info.f_power_on_wp_en && | ||
2369 | !hba->dev_info.is_lu_power_on_wp) { | ||
2370 | u8 b_lu_write_protect; | ||
2371 | |||
2372 | if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), | ||
2373 | &b_lu_write_protect) && | ||
2374 | (b_lu_write_protect == UFS_LU_POWER_ON_WP)) | ||
2375 | hba->dev_info.is_lu_power_on_wp = true; | ||
2376 | } | ||
2377 | } | ||
2378 | |||
2209 | /** | 2379 | /** |
2210 | * ufshcd_slave_alloc - handle initial SCSI device configurations | 2380 | * ufshcd_slave_alloc - handle initial SCSI device configurations |
2211 | * @sdev: pointer to SCSI device | 2381 | * @sdev: pointer to SCSI device |
@@ -2232,6 +2402,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) | |||
2232 | 2402 | ||
2233 | ufshcd_set_queue_depth(sdev); | 2403 | ufshcd_set_queue_depth(sdev); |
2234 | 2404 | ||
2405 | ufshcd_get_lu_power_on_wp_status(hba, sdev); | ||
2406 | |||
2235 | return 0; | 2407 | return 0; |
2236 | } | 2408 | } |
2237 | 2409 | ||
@@ -2462,8 +2634,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) | |||
2462 | complete(&hba->active_uic_cmd->done); | 2634 | complete(&hba->active_uic_cmd->done); |
2463 | } | 2635 | } |
2464 | 2636 | ||
2465 | if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) | 2637 | if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) |
2466 | complete(hba->pwr_done); | 2638 | complete(hba->uic_async_done); |
2467 | } | 2639 | } |
2468 | 2640 | ||
2469 | /** | 2641 | /** |
@@ -2675,33 +2847,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) | |||
2675 | } | 2847 | } |
2676 | 2848 | ||
2677 | /** | 2849 | /** |
2678 | * ufshcd_urgent_bkops - handle urgent bkops exception event | 2850 | * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status |
2679 | * @hba: per-adapter instance | 2851 | * @hba: per-adapter instance |
2852 | * @status: bkops_status value | ||
2680 | * | 2853 | * |
2681 | * Enable fBackgroundOpsEn flag in the device to permit background | 2854 | * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn |
2682 | * operations. | 2855 | * flag in the device to permit background operations if the device |
2856 | * bkops_status is greater than or equal to "status" argument passed to | ||
2857 | * this function, disable otherwise. | ||
2858 | * | ||
2859 | * Returns 0 for success, non-zero in case of failure. | ||
2860 | * | ||
2861 | * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag | ||
2862 | * to know whether auto bkops is enabled or disabled after this function | ||
2863 | * returns control to it. | ||
2683 | */ | 2864 | */ |
2684 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | 2865 | static int ufshcd_bkops_ctrl(struct ufs_hba *hba, |
2866 | enum bkops_status status) | ||
2685 | { | 2867 | { |
2686 | int err; | 2868 | int err; |
2687 | u32 status = 0; | 2869 | u32 curr_status = 0; |
2688 | 2870 | ||
2689 | err = ufshcd_get_bkops_status(hba, &status); | 2871 | err = ufshcd_get_bkops_status(hba, &curr_status); |
2690 | if (err) { | 2872 | if (err) { |
2691 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | 2873 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", |
2692 | __func__, err); | 2874 | __func__, err); |
2693 | goto out; | 2875 | goto out; |
2876 | } else if (curr_status > BKOPS_STATUS_MAX) { | ||
2877 | dev_err(hba->dev, "%s: invalid BKOPS status %d\n", | ||
2878 | __func__, curr_status); | ||
2879 | err = -EINVAL; | ||
2880 | goto out; | ||
2694 | } | 2881 | } |
2695 | 2882 | ||
2696 | status = status & 0xF; | 2883 | if (curr_status >= status) |
2697 | |||
2698 | /* handle only if status indicates performance impact or critical */ | ||
2699 | if (status >= BKOPS_STATUS_PERF_IMPACT) | ||
2700 | err = ufshcd_enable_auto_bkops(hba); | 2884 | err = ufshcd_enable_auto_bkops(hba); |
2885 | else | ||
2886 | err = ufshcd_disable_auto_bkops(hba); | ||
2701 | out: | 2887 | out: |
2702 | return err; | 2888 | return err; |
2703 | } | 2889 | } |
2704 | 2890 | ||
2891 | /** | ||
2892 | * ufshcd_urgent_bkops - handle urgent bkops exception event | ||
2893 | * @hba: per-adapter instance | ||
2894 | * | ||
2895 | * Enable fBackgroundOpsEn flag in the device to permit background | ||
2896 | * operations. | ||
2897 | * | ||
2898 | * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled | ||
2899 | * and negative error value for any other failure. | ||
2900 | */ | ||
2901 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | ||
2902 | { | ||
2903 | return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); | ||
2904 | } | ||
2905 | |||
2705 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) | 2906 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) |
2706 | { | 2907 | { |
2707 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, | 2908 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
@@ -2733,7 +2934,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) | |||
2733 | status &= hba->ee_ctrl_mask; | 2934 | status &= hba->ee_ctrl_mask; |
2734 | if (status & MASK_EE_URGENT_BKOPS) { | 2935 | if (status & MASK_EE_URGENT_BKOPS) { |
2735 | err = ufshcd_urgent_bkops(hba); | 2936 | err = ufshcd_urgent_bkops(hba); |
2736 | if (err) | 2937 | if (err < 0) |
2737 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", | 2938 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", |
2738 | __func__, err); | 2939 | __func__, err); |
2739 | } | 2940 | } |
@@ -3539,7 +3740,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) | |||
3539 | if (ret) | 3740 | if (ret) |
3540 | goto out; | 3741 | goto out; |
3541 | 3742 | ||
3542 | ufshcd_config_max_pwr_mode(hba); | 3743 | /* UniPro link is active now */ |
3744 | ufshcd_set_link_active(hba); | ||
3543 | 3745 | ||
3544 | ret = ufshcd_verify_dev_init(hba); | 3746 | ret = ufshcd_verify_dev_init(hba); |
3545 | if (ret) | 3747 | if (ret) |
@@ -3549,11 +3751,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) | |||
3549 | if (ret) | 3751 | if (ret) |
3550 | goto out; | 3752 | goto out; |
3551 | 3753 | ||
3754 | /* UFS device is also active now */ | ||
3755 | ufshcd_set_ufs_dev_active(hba); | ||
3552 | ufshcd_force_reset_auto_bkops(hba); | 3756 | ufshcd_force_reset_auto_bkops(hba); |
3553 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | 3757 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; |
3758 | hba->wlun_dev_clr_ua = true; | ||
3759 | |||
3760 | ufshcd_config_max_pwr_mode(hba); | ||
3761 | |||
3762 | /* | ||
3763 | * If we are in error handling context or in power management callbacks | ||
3764 | * context, no need to scan the host | ||
3765 | */ | ||
3766 | if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { | ||
3767 | bool flag; | ||
3768 | |||
3769 | /* clear any previous UFS device information */ | ||
3770 | memset(&hba->dev_info, 0, sizeof(hba->dev_info)); | ||
3771 | if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, | ||
3772 | QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) | ||
3773 | hba->dev_info.f_power_on_wp_en = flag; | ||
3554 | 3774 | ||
3555 | /* If we are in error handling context no need to scan the host */ | ||
3556 | if (!ufshcd_eh_in_progress(hba)) { | ||
3557 | if (!hba->is_init_prefetch) | 3775 | if (!hba->is_init_prefetch) |
3558 | ufshcd_init_icc_levels(hba); | 3776 | ufshcd_init_icc_levels(hba); |
3559 | 3777 | ||
@@ -3573,8 +3791,10 @@ out: | |||
3573 | * If we failed to initialize the device or the device is not | 3791 | * If we failed to initialize the device or the device is not |
3574 | * present, turn off the power/clocks etc. | 3792 | * present, turn off the power/clocks etc. |
3575 | */ | 3793 | */ |
3576 | if (ret && !ufshcd_eh_in_progress(hba)) | 3794 | if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { |
3795 | pm_runtime_put_sync(hba->dev); | ||
3577 | ufshcd_hba_exit(hba); | 3796 | ufshcd_hba_exit(hba); |
3797 | } | ||
3578 | 3798 | ||
3579 | return ret; | 3799 | return ret; |
3580 | } | 3800 | } |
@@ -3609,6 +3829,42 @@ static struct scsi_host_template ufshcd_driver_template = { | |||
3609 | .can_queue = UFSHCD_CAN_QUEUE, | 3829 | .can_queue = UFSHCD_CAN_QUEUE, |
3610 | }; | 3830 | }; |
3611 | 3831 | ||
3832 | static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, | ||
3833 | int ua) | ||
3834 | { | ||
3835 | int ret = 0; | ||
3836 | struct regulator *reg = vreg->reg; | ||
3837 | const char *name = vreg->name; | ||
3838 | |||
3839 | BUG_ON(!vreg); | ||
3840 | |||
3841 | ret = regulator_set_optimum_mode(reg, ua); | ||
3842 | if (ret >= 0) { | ||
3843 | /* | ||
3844 | * regulator_set_optimum_mode() returns new regulator | ||
3845 | * mode upon success. | ||
3846 | */ | ||
3847 | ret = 0; | ||
3848 | } else { | ||
3849 | dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n", | ||
3850 | __func__, name, ua, ret); | ||
3851 | } | ||
3852 | |||
3853 | return ret; | ||
3854 | } | ||
3855 | |||
3856 | static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, | ||
3857 | struct ufs_vreg *vreg) | ||
3858 | { | ||
3859 | return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); | ||
3860 | } | ||
3861 | |||
3862 | static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, | ||
3863 | struct ufs_vreg *vreg) | ||
3864 | { | ||
3865 | return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); | ||
3866 | } | ||
3867 | |||
3612 | static int ufshcd_config_vreg(struct device *dev, | 3868 | static int ufshcd_config_vreg(struct device *dev, |
3613 | struct ufs_vreg *vreg, bool on) | 3869 | struct ufs_vreg *vreg, bool on) |
3614 | { | 3870 | { |
@@ -3629,18 +3885,9 @@ static int ufshcd_config_vreg(struct device *dev, | |||
3629 | } | 3885 | } |
3630 | 3886 | ||
3631 | uA_load = on ? vreg->max_uA : 0; | 3887 | uA_load = on ? vreg->max_uA : 0; |
3632 | ret = regulator_set_optimum_mode(reg, uA_load); | 3888 | ret = ufshcd_config_vreg_load(dev, vreg, uA_load); |
3633 | if (ret >= 0) { | 3889 | if (ret) |
3634 | /* | ||
3635 | * regulator_set_optimum_mode() returns new regulator | ||
3636 | * mode upon success. | ||
3637 | */ | ||
3638 | ret = 0; | ||
3639 | } else { | ||
3640 | dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", | ||
3641 | __func__, name, uA_load, ret); | ||
3642 | goto out; | 3890 | goto out; |
3643 | } | ||
3644 | } | 3891 | } |
3645 | out: | 3892 | out: |
3646 | return ret; | 3893 | return ret; |
@@ -3776,7 +4023,8 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) | |||
3776 | return 0; | 4023 | return 0; |
3777 | } | 4024 | } |
3778 | 4025 | ||
3779 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) | 4026 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, |
4027 | bool skip_ref_clk) | ||
3780 | { | 4028 | { |
3781 | int ret = 0; | 4029 | int ret = 0; |
3782 | struct ufs_clk_info *clki; | 4030 | struct ufs_clk_info *clki; |
@@ -3787,6 +4035,9 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) | |||
3787 | 4035 | ||
3788 | list_for_each_entry(clki, head, list) { | 4036 | list_for_each_entry(clki, head, list) { |
3789 | if (!IS_ERR_OR_NULL(clki->clk)) { | 4037 | if (!IS_ERR_OR_NULL(clki->clk)) { |
4038 | if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) | ||
4039 | continue; | ||
4040 | |||
3790 | if (on && !clki->enabled) { | 4041 | if (on && !clki->enabled) { |
3791 | ret = clk_prepare_enable(clki->clk); | 4042 | ret = clk_prepare_enable(clki->clk); |
3792 | if (ret) { | 4043 | if (ret) { |
@@ -3812,6 +4063,11 @@ out: | |||
3812 | return ret; | 4063 | return ret; |
3813 | } | 4064 | } |
3814 | 4065 | ||
4066 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) | ||
4067 | { | ||
4068 | return __ufshcd_setup_clocks(hba, on, false); | ||
4069 | } | ||
4070 | |||
3815 | static int ufshcd_init_clocks(struct ufs_hba *hba) | 4071 | static int ufshcd_init_clocks(struct ufs_hba *hba) |
3816 | { | 4072 | { |
3817 | int ret = 0; | 4073 | int ret = 0; |
@@ -3968,68 +4224,532 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) | |||
3968 | } | 4224 | } |
3969 | } | 4225 | } |
3970 | 4226 | ||
4227 | static int | ||
4228 | ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) | ||
4229 | { | ||
4230 | unsigned char cmd[6] = {REQUEST_SENSE, | ||
4231 | 0, | ||
4232 | 0, | ||
4233 | 0, | ||
4234 | SCSI_SENSE_BUFFERSIZE, | ||
4235 | 0}; | ||
4236 | char *buffer; | ||
4237 | int ret; | ||
4238 | |||
4239 | buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); | ||
4240 | if (!buffer) { | ||
4241 | ret = -ENOMEM; | ||
4242 | goto out; | ||
4243 | } | ||
4244 | |||
4245 | ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, | ||
4246 | SCSI_SENSE_BUFFERSIZE, NULL, | ||
4247 | msecs_to_jiffies(1000), 3, NULL, REQ_PM); | ||
4248 | if (ret) | ||
4249 | pr_err("%s: failed with err %d\n", __func__, ret); | ||
4250 | |||
4251 | kfree(buffer); | ||
4252 | out: | ||
4253 | return ret; | ||
4254 | } | ||
4255 | |||
4256 | /** | ||
4257 | * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device | ||
4258 | * power mode | ||
4259 | * @hba: per adapter instance | ||
4260 | * @pwr_mode: device power mode to set | ||
4261 | * | ||
4262 | * Returns 0 if requested power mode is set successfully | ||
4263 | * Returns non-zero if failed to set the requested power mode | ||
4264 | */ | ||
4265 | static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | ||
4266 | enum ufs_dev_pwr_mode pwr_mode) | ||
4267 | { | ||
4268 | unsigned char cmd[6] = { START_STOP }; | ||
4269 | struct scsi_sense_hdr sshdr; | ||
4270 | struct scsi_device *sdp = hba->sdev_ufs_device; | ||
4271 | int ret; | ||
4272 | |||
4273 | if (!sdp || !scsi_device_online(sdp)) | ||
4274 | return -ENODEV; | ||
4275 | |||
4276 | /* | ||
4277 | * If scsi commands fail, the scsi mid-layer schedules scsi error- | ||
4278 | * handling, which would wait for host to be resumed. Since we know | ||
4279 | * we are functional while we are here, skip host resume in error | ||
4280 | * handling context. | ||
4281 | */ | ||
4282 | hba->host->eh_noresume = 1; | ||
4283 | if (hba->wlun_dev_clr_ua) { | ||
4284 | ret = ufshcd_send_request_sense(hba, sdp); | ||
4285 | if (ret) | ||
4286 | goto out; | ||
4287 | /* Unit attention condition is cleared now */ | ||
4288 | hba->wlun_dev_clr_ua = false; | ||
4289 | } | ||
4290 | |||
4291 | cmd[4] = pwr_mode << 4; | ||
4292 | |||
4293 | /* | ||
4294 | * Current function would be generally called from the power management | ||
4295 | * callbacks hence set the REQ_PM flag so that it doesn't resume the | ||
4296 | * already suspended childs. | ||
4297 | */ | ||
4298 | ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, | ||
4299 | START_STOP_TIMEOUT, 0, NULL, REQ_PM); | ||
4300 | if (ret) { | ||
4301 | sdev_printk(KERN_WARNING, sdp, | ||
4302 | "START_STOP failed for power mode: %d\n", pwr_mode); | ||
4303 | scsi_show_result(ret); | ||
4304 | if (driver_byte(ret) & DRIVER_SENSE) { | ||
4305 | scsi_show_sense_hdr(&sshdr); | ||
4306 | scsi_show_extd_sense(sshdr.asc, sshdr.ascq); | ||
4307 | } | ||
4308 | } | ||
4309 | |||
4310 | if (!ret) | ||
4311 | hba->curr_dev_pwr_mode = pwr_mode; | ||
4312 | out: | ||
4313 | hba->host->eh_noresume = 0; | ||
4314 | return ret; | ||
4315 | } | ||
4316 | |||
4317 | static int ufshcd_link_state_transition(struct ufs_hba *hba, | ||
4318 | enum uic_link_state req_link_state, | ||
4319 | int check_for_bkops) | ||
4320 | { | ||
4321 | int ret = 0; | ||
4322 | |||
4323 | if (req_link_state == hba->uic_link_state) | ||
4324 | return 0; | ||
4325 | |||
4326 | if (req_link_state == UIC_LINK_HIBERN8_STATE) { | ||
4327 | ret = ufshcd_uic_hibern8_enter(hba); | ||
4328 | if (!ret) | ||
4329 | ufshcd_set_link_hibern8(hba); | ||
4330 | else | ||
4331 | goto out; | ||
4332 | } | ||
4333 | /* | ||
4334 | * If autobkops is enabled, link can't be turned off because | ||
4335 | * turning off the link would also turn off the device. | ||
4336 | */ | ||
4337 | else if ((req_link_state == UIC_LINK_OFF_STATE) && | ||
4338 | (!check_for_bkops || (check_for_bkops && | ||
4339 | !hba->auto_bkops_enabled))) { | ||
4340 | /* | ||
4341 | * Change controller state to "reset state" which | ||
4342 | * should also put the link in off/reset state | ||
4343 | */ | ||
4344 | ufshcd_hba_stop(hba); | ||
4345 | /* | ||
4346 | * TODO: Check if we need any delay to make sure that | ||
4347 | * controller is reset | ||
4348 | */ | ||
4349 | ufshcd_set_link_off(hba); | ||
4350 | } | ||
4351 | |||
4352 | out: | ||
4353 | return ret; | ||
4354 | } | ||
4355 | |||
4356 | static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) | ||
4357 | { | ||
4358 | /* | ||
4359 | * If UFS device is either in UFS_Sleep turn off VCC rail to save some | ||
4360 | * power. | ||
4361 | * | ||
4362 | * If UFS device and link is in OFF state, all power supplies (VCC, | ||
4363 | * VCCQ, VCCQ2) can be turned off if power on write protect is not | ||
4364 | * required. If UFS link is inactive (Hibern8 or OFF state) and device | ||
4365 | * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. | ||
4366 | * | ||
4367 | * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway | ||
4368 | * in low power state which would save some power. | ||
4369 | */ | ||
4370 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | ||
4371 | !hba->dev_info.is_lu_power_on_wp) { | ||
4372 | ufshcd_setup_vreg(hba, false); | ||
4373 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | ||
4374 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | ||
4375 | if (!ufshcd_is_link_active(hba)) { | ||
4376 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | ||
4377 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); | ||
4378 | } | ||
4379 | } | ||
4380 | } | ||
4381 | |||
4382 | static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) | ||
4383 | { | ||
4384 | int ret = 0; | ||
4385 | |||
4386 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | ||
4387 | !hba->dev_info.is_lu_power_on_wp) { | ||
4388 | ret = ufshcd_setup_vreg(hba, true); | ||
4389 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | ||
4390 | ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); | ||
4391 | if (!ret && !ufshcd_is_link_active(hba)) { | ||
4392 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); | ||
4393 | if (ret) | ||
4394 | goto vcc_disable; | ||
4395 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); | ||
4396 | if (ret) | ||
4397 | goto vccq_lpm; | ||
4398 | } | ||
4399 | } | ||
4400 | goto out; | ||
4401 | |||
4402 | vccq_lpm: | ||
4403 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | ||
4404 | vcc_disable: | ||
4405 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | ||
4406 | out: | ||
4407 | return ret; | ||
4408 | } | ||
4409 | |||
4410 | static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) | ||
4411 | { | ||
4412 | if (ufshcd_is_link_off(hba)) | ||
4413 | ufshcd_setup_hba_vreg(hba, false); | ||
4414 | } | ||
4415 | |||
4416 | static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) | ||
4417 | { | ||
4418 | if (ufshcd_is_link_off(hba)) | ||
4419 | ufshcd_setup_hba_vreg(hba, true); | ||
4420 | } | ||
4421 | |||
3971 | /** | 4422 | /** |
3972 | * ufshcd_suspend - suspend power management function | 4423 | * ufshcd_suspend - helper function for suspend operations |
3973 | * @hba: per adapter instance | 4424 | * @hba: per adapter instance |
3974 | * @state: power state | 4425 | * @pm_op: desired low power operation type |
4426 | * | ||
4427 | * This function will try to put the UFS device and link into low power | ||
4428 | * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" | ||
4429 | * (System PM level). | ||
4430 | * | ||
4431 | * If this function is called during shutdown, it will make sure that | ||
4432 | * both UFS device and UFS link is powered off. | ||
3975 | * | 4433 | * |
3976 | * Returns -ENOSYS | 4434 | * NOTE: UFS device & link must be active before we enter in this function. |
4435 | * | ||
4436 | * Returns 0 for success and non-zero for failure | ||
3977 | */ | 4437 | */ |
3978 | int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) | 4438 | static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
3979 | { | 4439 | { |
4440 | int ret = 0; | ||
4441 | enum ufs_pm_level pm_lvl; | ||
4442 | enum ufs_dev_pwr_mode req_dev_pwr_mode; | ||
4443 | enum uic_link_state req_link_state; | ||
4444 | |||
4445 | hba->pm_op_in_progress = 1; | ||
4446 | if (!ufshcd_is_shutdown_pm(pm_op)) { | ||
4447 | pm_lvl = ufshcd_is_runtime_pm(pm_op) ? | ||
4448 | hba->rpm_lvl : hba->spm_lvl; | ||
4449 | req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); | ||
4450 | req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); | ||
4451 | } else { | ||
4452 | req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; | ||
4453 | req_link_state = UIC_LINK_OFF_STATE; | ||
4454 | } | ||
4455 | |||
3980 | /* | 4456 | /* |
3981 | * TODO: | 4457 | * If we can't transition into any of the low power modes |
3982 | * 1. Block SCSI requests from SCSI midlayer | 4458 | * just gate the clocks. |
3983 | * 2. Change the internal driver state to non operational | ||
3984 | * 3. Set UTRLRSR and UTMRLRSR bits to zero | ||
3985 | * 4. Wait until outstanding commands are completed | ||
3986 | * 5. Set HCE to zero to send the UFS host controller to reset state | ||
3987 | */ | 4459 | */ |
4460 | if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && | ||
4461 | req_link_state == UIC_LINK_ACTIVE_STATE) { | ||
4462 | goto disable_clks; | ||
4463 | } | ||
3988 | 4464 | ||
3989 | return -ENOSYS; | 4465 | if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && |
4466 | (req_link_state == hba->uic_link_state)) | ||
4467 | goto out; | ||
4468 | |||
4469 | /* UFS device & link must be active before we enter in this function */ | ||
4470 | if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { | ||
4471 | ret = -EINVAL; | ||
4472 | goto out; | ||
4473 | } | ||
4474 | |||
4475 | if (ufshcd_is_runtime_pm(pm_op)) { | ||
4476 | /* | ||
4477 | * The device is idle with no requests in the queue, | ||
4478 | * allow background operations if needed. | ||
4479 | */ | ||
4480 | ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL); | ||
4481 | if (ret) | ||
4482 | goto out; | ||
4483 | } | ||
4484 | |||
4485 | if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && | ||
4486 | ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || | ||
4487 | !ufshcd_is_runtime_pm(pm_op))) { | ||
4488 | /* ensure that bkops is disabled */ | ||
4489 | ufshcd_disable_auto_bkops(hba); | ||
4490 | ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); | ||
4491 | if (ret) | ||
4492 | goto out; | ||
4493 | } | ||
4494 | |||
4495 | ret = ufshcd_link_state_transition(hba, req_link_state, 1); | ||
4496 | if (ret) | ||
4497 | goto set_dev_active; | ||
4498 | |||
4499 | ufshcd_vreg_set_lpm(hba); | ||
4500 | |||
4501 | disable_clks: | ||
4502 | /* | ||
4503 | * Call vendor specific suspend callback. As these callbacks may access | ||
4504 | * vendor specific host controller register space call them before the | ||
4505 | * host clocks are ON. | ||
4506 | */ | ||
4507 | if (hba->vops && hba->vops->suspend) { | ||
4508 | ret = hba->vops->suspend(hba, pm_op); | ||
4509 | if (ret) | ||
4510 | goto set_link_active; | ||
4511 | } | ||
4512 | |||
4513 | if (hba->vops && hba->vops->setup_clocks) { | ||
4514 | ret = hba->vops->setup_clocks(hba, false); | ||
4515 | if (ret) | ||
4516 | goto vops_resume; | ||
4517 | } | ||
4518 | |||
4519 | if (!ufshcd_is_link_active(hba)) | ||
4520 | ufshcd_setup_clocks(hba, false); | ||
4521 | else | ||
4522 | /* If link is active, device ref_clk can't be switched off */ | ||
4523 | __ufshcd_setup_clocks(hba, false, true); | ||
4524 | |||
4525 | /* | ||
4526 | * Disable the host irq as host controller as there won't be any | ||
4527 | * host controller trasanction expected till resume. | ||
4528 | */ | ||
4529 | ufshcd_disable_irq(hba); | ||
4530 | /* Put the host controller in low power mode if possible */ | ||
4531 | ufshcd_hba_vreg_set_lpm(hba); | ||
4532 | goto out; | ||
4533 | |||
4534 | vops_resume: | ||
4535 | if (hba->vops && hba->vops->resume) | ||
4536 | hba->vops->resume(hba, pm_op); | ||
4537 | set_link_active: | ||
4538 | ufshcd_vreg_set_hpm(hba); | ||
4539 | if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) | ||
4540 | ufshcd_set_link_active(hba); | ||
4541 | else if (ufshcd_is_link_off(hba)) | ||
4542 | ufshcd_host_reset_and_restore(hba); | ||
4543 | set_dev_active: | ||
4544 | if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) | ||
4545 | ufshcd_disable_auto_bkops(hba); | ||
4546 | out: | ||
4547 | hba->pm_op_in_progress = 0; | ||
4548 | return ret; | ||
3990 | } | 4549 | } |
3991 | EXPORT_SYMBOL_GPL(ufshcd_suspend); | ||
3992 | 4550 | ||
3993 | /** | 4551 | /** |
3994 | * ufshcd_resume - resume power management function | 4552 | * ufshcd_resume - helper function for resume operations |
3995 | * @hba: per adapter instance | 4553 | * @hba: per adapter instance |
4554 | * @pm_op: runtime PM or system PM | ||
3996 | * | 4555 | * |
3997 | * Returns -ENOSYS | 4556 | * This function basically brings the UFS device, UniPro link and controller |
4557 | * to active state. | ||
4558 | * | ||
4559 | * Returns 0 for success and non-zero for failure | ||
3998 | */ | 4560 | */ |
3999 | int ufshcd_resume(struct ufs_hba *hba) | 4561 | static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
4000 | { | 4562 | { |
4563 | int ret; | ||
4564 | enum uic_link_state old_link_state; | ||
4565 | |||
4566 | hba->pm_op_in_progress = 1; | ||
4567 | old_link_state = hba->uic_link_state; | ||
4568 | |||
4569 | ufshcd_hba_vreg_set_hpm(hba); | ||
4570 | /* Make sure clocks are enabled before accessing controller */ | ||
4571 | ret = ufshcd_setup_clocks(hba, true); | ||
4572 | if (ret) | ||
4573 | goto out; | ||
4574 | |||
4575 | if (hba->vops && hba->vops->setup_clocks) { | ||
4576 | ret = hba->vops->setup_clocks(hba, true); | ||
4577 | if (ret) | ||
4578 | goto disable_clks; | ||
4579 | } | ||
4580 | |||
4581 | /* enable the host irq as host controller would be active soon */ | ||
4582 | ret = ufshcd_enable_irq(hba); | ||
4583 | if (ret) | ||
4584 | goto disable_irq_and_vops_clks; | ||
4585 | |||
4586 | ret = ufshcd_vreg_set_hpm(hba); | ||
4587 | if (ret) | ||
4588 | goto disable_irq_and_vops_clks; | ||
4589 | |||
4001 | /* | 4590 | /* |
4002 | * TODO: | 4591 | * Call vendor specific resume callback. As these callbacks may access |
4003 | * 1. Set HCE to 1, to start the UFS host controller | 4592 | * vendor specific host controller register space call them when the |
4004 | * initialization process | 4593 | * host clocks are ON. |
4005 | * 2. Set UTRLRSR and UTMRLRSR bits to 1 | ||
4006 | * 3. Change the internal driver state to operational | ||
4007 | * 4. Unblock SCSI requests from SCSI midlayer | ||
4008 | */ | 4594 | */ |
4595 | if (hba->vops && hba->vops->resume) { | ||
4596 | ret = hba->vops->resume(hba, pm_op); | ||
4597 | if (ret) | ||
4598 | goto disable_vreg; | ||
4599 | } | ||
4600 | |||
4601 | if (ufshcd_is_link_hibern8(hba)) { | ||
4602 | ret = ufshcd_uic_hibern8_exit(hba); | ||
4603 | if (!ret) | ||
4604 | ufshcd_set_link_active(hba); | ||
4605 | else | ||
4606 | goto vendor_suspend; | ||
4607 | } else if (ufshcd_is_link_off(hba)) { | ||
4608 | ret = ufshcd_host_reset_and_restore(hba); | ||
4609 | /* | ||
4610 | * ufshcd_host_reset_and_restore() should have already | ||
4611 | * set the link state as active | ||
4612 | */ | ||
4613 | if (ret || !ufshcd_is_link_active(hba)) | ||
4614 | goto vendor_suspend; | ||
4615 | } | ||
4616 | |||
4617 | if (!ufshcd_is_ufs_dev_active(hba)) { | ||
4618 | ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); | ||
4619 | if (ret) | ||
4620 | goto set_old_link_state; | ||
4621 | } | ||
4622 | |||
4623 | ufshcd_disable_auto_bkops(hba); | ||
4624 | goto out; | ||
4625 | |||
4626 | set_old_link_state: | ||
4627 | ufshcd_link_state_transition(hba, old_link_state, 0); | ||
4628 | vendor_suspend: | ||
4629 | if (hba->vops && hba->vops->suspend) | ||
4630 | hba->vops->suspend(hba, pm_op); | ||
4631 | disable_vreg: | ||
4632 | ufshcd_vreg_set_lpm(hba); | ||
4633 | disable_irq_and_vops_clks: | ||
4634 | ufshcd_disable_irq(hba); | ||
4635 | if (hba->vops && hba->vops->setup_clocks) | ||
4636 | ret = hba->vops->setup_clocks(hba, false); | ||
4637 | disable_clks: | ||
4638 | ufshcd_setup_clocks(hba, false); | ||
4639 | out: | ||
4640 | hba->pm_op_in_progress = 0; | ||
4641 | return ret; | ||
4642 | } | ||
4643 | |||
4644 | /** | ||
4645 | * ufshcd_system_suspend - system suspend routine | ||
4646 | * @hba: per adapter instance | ||
4647 | * @pm_op: runtime PM or system PM | ||
4648 | * | ||
4649 | * Check the description of ufshcd_suspend() function for more details. | ||
4650 | * | ||
4651 | * Returns 0 for success and non-zero for failure | ||
4652 | */ | ||
4653 | int ufshcd_system_suspend(struct ufs_hba *hba) | ||
4654 | { | ||
4655 | int ret = 0; | ||
4656 | |||
4657 | if (!hba || !hba->is_powered) | ||
4658 | goto out; | ||
4659 | |||
4660 | if (pm_runtime_suspended(hba->dev)) { | ||
4661 | if (hba->rpm_lvl == hba->spm_lvl) | ||
4662 | /* | ||
4663 | * There is possibility that device may still be in | ||
4664 | * active state during the runtime suspend. | ||
4665 | */ | ||
4666 | if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == | ||
4667 | hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) | ||
4668 | goto out; | ||
4669 | |||
4670 | /* | ||
4671 | * UFS device and/or UFS link low power states during runtime | ||
4672 | * suspend seems to be different than what is expected during | ||
4673 | * system suspend. Hence runtime resume the devic & link and | ||
4674 | * let the system suspend low power states to take effect. | ||
4675 | * TODO: If resume takes longer time, we might have optimize | ||
4676 | * it in future by not resuming everything if possible. | ||
4677 | */ | ||
4678 | ret = ufshcd_runtime_resume(hba); | ||
4679 | if (ret) | ||
4680 | goto out; | ||
4681 | } | ||
4682 | |||
4683 | ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); | ||
4684 | out: | ||
4685 | return ret; | ||
4686 | } | ||
4687 | EXPORT_SYMBOL(ufshcd_system_suspend); | ||
4688 | |||
4689 | /** | ||
4690 | * ufshcd_system_resume - system resume routine | ||
4691 | * @hba: per adapter instance | ||
4692 | * | ||
4693 | * Returns 0 for success and non-zero for failure | ||
4694 | */ | ||
4009 | 4695 | ||
4010 | return -ENOSYS; | 4696 | int ufshcd_system_resume(struct ufs_hba *hba) |
4697 | { | ||
4698 | if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) | ||
4699 | /* | ||
4700 | * Let the runtime resume take care of resuming | ||
4701 | * if runtime suspended. | ||
4702 | */ | ||
4703 | return 0; | ||
4704 | |||
4705 | return ufshcd_resume(hba, UFS_SYSTEM_PM); | ||
4011 | } | 4706 | } |
4012 | EXPORT_SYMBOL_GPL(ufshcd_resume); | 4707 | EXPORT_SYMBOL(ufshcd_system_resume); |
4013 | 4708 | ||
4709 | /** | ||
4710 | * ufshcd_runtime_suspend - runtime suspend routine | ||
4711 | * @hba: per adapter instance | ||
4712 | * | ||
4713 | * Check the description of ufshcd_suspend() function for more details. | ||
4714 | * | ||
4715 | * Returns 0 for success and non-zero for failure | ||
4716 | */ | ||
4014 | int ufshcd_runtime_suspend(struct ufs_hba *hba) | 4717 | int ufshcd_runtime_suspend(struct ufs_hba *hba) |
4015 | { | 4718 | { |
4016 | if (!hba) | 4719 | if (!hba || !hba->is_powered) |
4017 | return 0; | 4720 | return 0; |
4018 | 4721 | ||
4019 | /* | 4722 | return ufshcd_suspend(hba, UFS_RUNTIME_PM); |
4020 | * The device is idle with no requests in the queue, | ||
4021 | * allow background operations. | ||
4022 | */ | ||
4023 | return ufshcd_enable_auto_bkops(hba); | ||
4024 | } | 4723 | } |
4025 | EXPORT_SYMBOL(ufshcd_runtime_suspend); | 4724 | EXPORT_SYMBOL(ufshcd_runtime_suspend); |
4026 | 4725 | ||
4726 | /** | ||
4727 | * ufshcd_runtime_resume - runtime resume routine | ||
4728 | * @hba: per adapter instance | ||
4729 | * | ||
4730 | * This function basically brings the UFS device, UniPro link and controller | ||
4731 | * to active state. Following operations are done in this function: | ||
4732 | * | ||
4733 | * 1. Turn on all the controller related clocks | ||
4734 | * 2. Bring the UniPro link out of Hibernate state | ||
4735 | * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device | ||
4736 | * to active state. | ||
4737 | * 4. If auto-bkops is enabled on the device, disable it. | ||
4738 | * | ||
4739 | * So following would be the possible power state after this function return | ||
4740 | * successfully: | ||
4741 | * S1: UFS device in Active state with VCC rail ON | ||
4742 | * UniPro link in Active state | ||
4743 | * All the UFS/UniPro controller clocks are ON | ||
4744 | * | ||
4745 | * Returns 0 for success and non-zero for failure | ||
4746 | */ | ||
4027 | int ufshcd_runtime_resume(struct ufs_hba *hba) | 4747 | int ufshcd_runtime_resume(struct ufs_hba *hba) |
4028 | { | 4748 | { |
4029 | if (!hba) | 4749 | if (!hba || !hba->is_powered) |
4030 | return 0; | 4750 | return 0; |
4031 | 4751 | else | |
4032 | return ufshcd_disable_auto_bkops(hba); | 4752 | return ufshcd_resume(hba, UFS_RUNTIME_PM); |
4033 | } | 4753 | } |
4034 | EXPORT_SYMBOL(ufshcd_runtime_resume); | 4754 | EXPORT_SYMBOL(ufshcd_runtime_resume); |
4035 | 4755 | ||
@@ -4040,6 +4760,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba) | |||
4040 | EXPORT_SYMBOL(ufshcd_runtime_idle); | 4760 | EXPORT_SYMBOL(ufshcd_runtime_idle); |
4041 | 4761 | ||
4042 | /** | 4762 | /** |
4763 | * ufshcd_shutdown - shutdown routine | ||
4764 | * @hba: per adapter instance | ||
4765 | * | ||
4766 | * This function would power off both UFS device and UFS link. | ||
4767 | * | ||
4768 | * Returns 0 always to allow force shutdown even in case of errors. | ||
4769 | */ | ||
4770 | int ufshcd_shutdown(struct ufs_hba *hba) | ||
4771 | { | ||
4772 | int ret = 0; | ||
4773 | |||
4774 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) | ||
4775 | goto out; | ||
4776 | |||
4777 | if (pm_runtime_suspended(hba->dev)) { | ||
4778 | ret = ufshcd_runtime_resume(hba); | ||
4779 | if (ret) | ||
4780 | goto out; | ||
4781 | } | ||
4782 | |||
4783 | ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); | ||
4784 | out: | ||
4785 | if (ret) | ||
4786 | dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); | ||
4787 | /* allow force shutdown even in case of errors */ | ||
4788 | return 0; | ||
4789 | } | ||
4790 | EXPORT_SYMBOL(ufshcd_shutdown); | ||
4791 | |||
4792 | /** | ||
4043 | * ufshcd_remove - de-allocate SCSI host and host memory space | 4793 | * ufshcd_remove - de-allocate SCSI host and host memory space |
4044 | * data structure memory | 4794 | * data structure memory |
4045 | * @hba - per adapter instance | 4795 | * @hba - per adapter instance |
@@ -4192,6 +4942,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | |||
4192 | if (err) { | 4942 | if (err) { |
4193 | dev_err(hba->dev, "request irq failed\n"); | 4943 | dev_err(hba->dev, "request irq failed\n"); |
4194 | goto out_disable; | 4944 | goto out_disable; |
4945 | } else { | ||
4946 | hba->is_irq_enabled = true; | ||
4195 | } | 4947 | } |
4196 | 4948 | ||
4197 | /* Enable SCSI tag mapping */ | 4949 | /* Enable SCSI tag mapping */ |
@@ -4217,6 +4969,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | |||
4217 | /* Hold auto suspend until async scan completes */ | 4969 | /* Hold auto suspend until async scan completes */ |
4218 | pm_runtime_get_sync(dev); | 4970 | pm_runtime_get_sync(dev); |
4219 | 4971 | ||
4972 | /* | ||
4973 | * The device-initialize-sequence hasn't been invoked yet. | ||
4974 | * Set the device to power-off state | ||
4975 | */ | ||
4976 | ufshcd_set_ufs_dev_poweroff(hba); | ||
4977 | |||
4220 | async_schedule(ufshcd_async_scan, hba); | 4978 | async_schedule(ufshcd_async_scan, hba); |
4221 | 4979 | ||
4222 | return 0; | 4980 | return 0; |
@@ -4224,6 +4982,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | |||
4224 | out_remove_scsi_host: | 4982 | out_remove_scsi_host: |
4225 | scsi_remove_host(hba->host); | 4983 | scsi_remove_host(hba->host); |
4226 | out_disable: | 4984 | out_disable: |
4985 | hba->is_irq_enabled = false; | ||
4227 | scsi_host_put(host); | 4986 | scsi_host_put(host); |
4228 | ufshcd_hba_exit(hba); | 4987 | ufshcd_hba_exit(hba); |
4229 | out_error: | 4988 | out_error: |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 5c25337bfcc8..e1bde0598d92 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h | |||
@@ -96,6 +96,54 @@ struct uic_command { | |||
96 | struct completion done; | 96 | struct completion done; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | /* Used to differentiate the power management options */ | ||
100 | enum ufs_pm_op { | ||
101 | UFS_RUNTIME_PM, | ||
102 | UFS_SYSTEM_PM, | ||
103 | UFS_SHUTDOWN_PM, | ||
104 | }; | ||
105 | |||
106 | #define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) | ||
107 | #define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) | ||
108 | #define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) | ||
109 | |||
110 | /* Host <-> Device UniPro Link state */ | ||
111 | enum uic_link_state { | ||
112 | UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ | ||
113 | UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ | ||
114 | UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ | ||
115 | }; | ||
116 | |||
117 | #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) | ||
118 | #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ | ||
119 | UIC_LINK_ACTIVE_STATE) | ||
120 | #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ | ||
121 | UIC_LINK_HIBERN8_STATE) | ||
122 | #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) | ||
123 | #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ | ||
124 | UIC_LINK_ACTIVE_STATE) | ||
125 | #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ | ||
126 | UIC_LINK_HIBERN8_STATE) | ||
127 | |||
128 | /* | ||
129 | * UFS Power management levels. | ||
130 | * Each level is in increasing order of power savings. | ||
131 | */ | ||
132 | enum ufs_pm_level { | ||
133 | UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ | ||
134 | UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ | ||
135 | UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ | ||
136 | UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ | ||
137 | UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ | ||
138 | UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ | ||
139 | UFS_PM_LVL_MAX | ||
140 | }; | ||
141 | |||
142 | struct ufs_pm_lvl_states { | ||
143 | enum ufs_dev_pwr_mode dev_state; | ||
144 | enum uic_link_state link_state; | ||
145 | }; | ||
146 | |||
99 | /** | 147 | /** |
100 | * struct ufshcd_lrb - local reference block | 148 | * struct ufshcd_lrb - local reference block |
101 | * @utr_descriptor_ptr: UTRD address of the command | 149 | * @utr_descriptor_ptr: UTRD address of the command |
@@ -184,6 +232,8 @@ struct ufs_clk_info { | |||
184 | * variant specific Uni-Pro initialization. | 232 | * variant specific Uni-Pro initialization. |
185 | * @link_startup_notify: called before and after Link startup is carried out | 233 | * @link_startup_notify: called before and after Link startup is carried out |
186 | * to allow variant specific Uni-Pro initialization. | 234 | * to allow variant specific Uni-Pro initialization. |
235 | * @suspend: called during host controller PM callback | ||
236 | * @resume: called during host controller PM callback | ||
187 | */ | 237 | */ |
188 | struct ufs_hba_variant_ops { | 238 | struct ufs_hba_variant_ops { |
189 | const char *name; | 239 | const char *name; |
@@ -193,6 +243,8 @@ struct ufs_hba_variant_ops { | |||
193 | int (*setup_regulators)(struct ufs_hba *, bool); | 243 | int (*setup_regulators)(struct ufs_hba *, bool); |
194 | int (*hce_enable_notify)(struct ufs_hba *, bool); | 244 | int (*hce_enable_notify)(struct ufs_hba *, bool); |
195 | int (*link_startup_notify)(struct ufs_hba *, bool); | 245 | int (*link_startup_notify)(struct ufs_hba *, bool); |
246 | int (*suspend)(struct ufs_hba *, enum ufs_pm_op); | ||
247 | int (*resume)(struct ufs_hba *, enum ufs_pm_op); | ||
196 | }; | 248 | }; |
197 | 249 | ||
198 | /** | 250 | /** |
@@ -274,6 +326,14 @@ struct ufs_hba { | |||
274 | struct scsi_device *sdev_rpmb; | 326 | struct scsi_device *sdev_rpmb; |
275 | struct scsi_device *sdev_boot; | 327 | struct scsi_device *sdev_boot; |
276 | 328 | ||
329 | enum ufs_dev_pwr_mode curr_dev_pwr_mode; | ||
330 | enum uic_link_state uic_link_state; | ||
331 | /* Desired UFS power management level during runtime PM */ | ||
332 | enum ufs_pm_level rpm_lvl; | ||
333 | /* Desired UFS power management level during system PM */ | ||
334 | enum ufs_pm_level spm_lvl; | ||
335 | int pm_op_in_progress; | ||
336 | |||
277 | struct ufshcd_lrb *lrb; | 337 | struct ufshcd_lrb *lrb; |
278 | unsigned long lrb_in_use; | 338 | unsigned long lrb_in_use; |
279 | 339 | ||
@@ -287,16 +347,17 @@ struct ufs_hba { | |||
287 | struct ufs_hba_variant_ops *vops; | 347 | struct ufs_hba_variant_ops *vops; |
288 | void *priv; | 348 | void *priv; |
289 | unsigned int irq; | 349 | unsigned int irq; |
350 | bool is_irq_enabled; | ||
290 | 351 | ||
291 | struct uic_command *active_uic_cmd; | ||
292 | struct mutex uic_cmd_mutex; | ||
293 | 352 | ||
294 | wait_queue_head_t tm_wq; | 353 | wait_queue_head_t tm_wq; |
295 | wait_queue_head_t tm_tag_wq; | 354 | wait_queue_head_t tm_tag_wq; |
296 | unsigned long tm_condition; | 355 | unsigned long tm_condition; |
297 | unsigned long tm_slots_in_use; | 356 | unsigned long tm_slots_in_use; |
298 | 357 | ||
299 | struct completion *pwr_done; | 358 | struct uic_command *active_uic_cmd; |
359 | struct mutex uic_cmd_mutex; | ||
360 | struct completion *uic_async_done; | ||
300 | 361 | ||
301 | u32 ufshcd_state; | 362 | u32 ufshcd_state; |
302 | u32 eh_flags; | 363 | u32 eh_flags; |
@@ -319,9 +380,13 @@ struct ufs_hba { | |||
319 | /* Device management request data */ | 380 | /* Device management request data */ |
320 | struct ufs_dev_cmd dev_cmd; | 381 | struct ufs_dev_cmd dev_cmd; |
321 | 382 | ||
383 | /* Keeps information of the UFS device connected to this host */ | ||
384 | struct ufs_dev_info dev_info; | ||
322 | bool auto_bkops_enabled; | 385 | bool auto_bkops_enabled; |
323 | struct ufs_vreg_info vreg_info; | 386 | struct ufs_vreg_info vreg_info; |
324 | struct list_head clk_list_head; | 387 | struct list_head clk_list_head; |
388 | |||
389 | bool wlun_dev_clr_ua; | ||
325 | }; | 390 | }; |
326 | 391 | ||
327 | #define ufshcd_writel(hba, val, reg) \ | 392 | #define ufshcd_writel(hba, val, reg) \ |
@@ -348,11 +413,12 @@ static inline void check_upiu_size(void) | |||
348 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); | 413 | GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); |
349 | } | 414 | } |
350 | 415 | ||
351 | extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); | ||
352 | extern int ufshcd_resume(struct ufs_hba *hba); | ||
353 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); | 416 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); |
354 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); | 417 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); |
355 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); | 418 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); |
419 | extern int ufshcd_system_suspend(struct ufs_hba *hba); | ||
420 | extern int ufshcd_system_resume(struct ufs_hba *hba); | ||
421 | extern int ufshcd_shutdown(struct ufs_hba *hba); | ||
356 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | 422 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, |
357 | u8 attr_set, u32 mib_val, u8 peer); | 423 | u8 attr_set, u32 mib_val, u8 peer); |
358 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | 424 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, |
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index e1b844bc9460..d5721199e9cc 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h | |||
@@ -124,8 +124,11 @@ enum { | |||
124 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) | 124 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) |
125 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) | 125 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) |
126 | 126 | ||
127 | #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ | 127 | #define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\ |
128 | UIC_POWER_MODE) | 128 | UIC_HIBERNATE_EXIT |\ |
129 | UIC_POWER_MODE) | ||
130 | |||
131 | #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK) | ||
129 | 132 | ||
130 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ | 133 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ |
131 | DEVICE_FATAL_ERROR |\ | 134 | DEVICE_FATAL_ERROR |\ |
@@ -210,7 +213,7 @@ enum { | |||
210 | #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) | 213 | #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) |
211 | 214 | ||
212 | /* UIC Commands */ | 215 | /* UIC Commands */ |
213 | enum { | 216 | enum uic_cmd_dme { |
214 | UIC_CMD_DME_GET = 0x01, | 217 | UIC_CMD_DME_GET = 0x01, |
215 | UIC_CMD_DME_SET = 0x02, | 218 | UIC_CMD_DME_SET = 0x02, |
216 | UIC_CMD_DME_PEER_GET = 0x03, | 219 | UIC_CMD_DME_PEER_GET = 0x03, |