aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ufs.h36
-rw-r--r--drivers/scsi/ufs/ufshcd.c722
-rw-r--r--drivers/scsi/ufs/ufshcd.h22
-rw-r--r--drivers/scsi/ufs/ufshci.h32
4 files changed, 545 insertions, 267 deletions
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 721050090520..f42d1cee652a 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -196,9 +196,9 @@ enum {
196 * @dword_2: UPIU header DW-2 196 * @dword_2: UPIU header DW-2
197 */ 197 */
198struct utp_upiu_header { 198struct utp_upiu_header {
199 u32 dword_0; 199 __be32 dword_0;
200 u32 dword_1; 200 __be32 dword_1;
201 u32 dword_2; 201 __be32 dword_2;
202}; 202};
203 203
204/** 204/**
@@ -207,7 +207,7 @@ struct utp_upiu_header {
207 * @cdb: Command Descriptor Block CDB DW-4 to DW-7 207 * @cdb: Command Descriptor Block CDB DW-4 to DW-7
208 */ 208 */
209struct utp_upiu_cmd { 209struct utp_upiu_cmd {
210 u32 exp_data_transfer_len; 210 __be32 exp_data_transfer_len;
211 u8 cdb[MAX_CDB_SIZE]; 211 u8 cdb[MAX_CDB_SIZE];
212}; 212};
213 213
@@ -228,10 +228,10 @@ struct utp_upiu_query {
228 u8 idn; 228 u8 idn;
229 u8 index; 229 u8 index;
230 u8 selector; 230 u8 selector;
231 u16 reserved_osf; 231 __be16 reserved_osf;
232 u16 length; 232 __be16 length;
233 u32 value; 233 __be32 value;
234 u32 reserved[2]; 234 __be32 reserved[2];
235}; 235};
236 236
237/** 237/**
@@ -256,9 +256,9 @@ struct utp_upiu_req {
256 * @sense_data: Sense data field DW-8 to DW-12 256 * @sense_data: Sense data field DW-8 to DW-12
257 */ 257 */
258struct utp_cmd_rsp { 258struct utp_cmd_rsp {
259 u32 residual_transfer_count; 259 __be32 residual_transfer_count;
260 u32 reserved[4]; 260 __be32 reserved[4];
261 u16 sense_data_len; 261 __be16 sense_data_len;
262 u8 sense_data[18]; 262 u8 sense_data[18];
263}; 263};
264 264
@@ -286,10 +286,10 @@ struct utp_upiu_rsp {
286 */ 286 */
287struct utp_upiu_task_req { 287struct utp_upiu_task_req {
288 struct utp_upiu_header header; 288 struct utp_upiu_header header;
289 u32 input_param1; 289 __be32 input_param1;
290 u32 input_param2; 290 __be32 input_param2;
291 u32 input_param3; 291 __be32 input_param3;
292 u32 reserved[2]; 292 __be32 reserved[2];
293}; 293};
294 294
295/** 295/**
@@ -301,9 +301,9 @@ struct utp_upiu_task_req {
301 */ 301 */
302struct utp_upiu_task_rsp { 302struct utp_upiu_task_rsp {
303 struct utp_upiu_header header; 303 struct utp_upiu_header header;
304 u32 output_param1; 304 __be32 output_param1;
305 u32 output_param2; 305 __be32 output_param2;
306 u32 reserved[3]; 306 __be32 reserved[3];
307}; 307};
308 308
309/** 309/**
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 04884d663e4e..0c2877251251 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -55,6 +55,9 @@
55/* Query request timeout */ 55/* Query request timeout */
56#define QUERY_REQ_TIMEOUT 30 /* msec */ 56#define QUERY_REQ_TIMEOUT 30 /* msec */
57 57
58/* Task management command timeout */
59#define TM_CMD_TIMEOUT 100 /* msecs */
60
58/* Expose the flag value from utp_upiu_query.value */ 61/* Expose the flag value from utp_upiu_query.value */
59#define MASK_QUERY_UPIU_FLAG_LOC 0xFF 62#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
60 63
@@ -71,9 +74,22 @@ enum {
71 74
72/* UFSHCD states */ 75/* UFSHCD states */
73enum { 76enum {
74 UFSHCD_STATE_OPERATIONAL,
75 UFSHCD_STATE_RESET, 77 UFSHCD_STATE_RESET,
76 UFSHCD_STATE_ERROR, 78 UFSHCD_STATE_ERROR,
79 UFSHCD_STATE_OPERATIONAL,
80};
81
82/* UFSHCD error handling flags */
83enum {
84 UFSHCD_EH_IN_PROGRESS = (1 << 0),
85};
86
87/* UFSHCD UIC layer error flags */
88enum {
89 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
90 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
91 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
92 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
77}; 93};
78 94
79/* Interrupt configuration options */ 95/* Interrupt configuration options */
@@ -83,6 +99,18 @@ enum {
83 UFSHCD_INT_CLEAR, 99 UFSHCD_INT_CLEAR,
84}; 100};
85 101
102#define ufshcd_set_eh_in_progress(h) \
103 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
104#define ufshcd_eh_in_progress(h) \
105 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
106#define ufshcd_clear_eh_in_progress(h) \
107 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
108
109static void ufshcd_tmc_handler(struct ufs_hba *hba);
110static void ufshcd_async_scan(void *data, async_cookie_t cookie);
111static int ufshcd_reset_and_restore(struct ufs_hba *hba);
112static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
113
86/* 114/*
87 * ufshcd_wait_for_register - wait for register value to change 115 * ufshcd_wait_for_register - wait for register value to change
88 * @hba - per-adapter interface 116 * @hba - per-adapter interface
@@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs)
163 */ 191 */
164static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) 192static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
165{ 193{
166 return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; 194 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
167} 195}
168 196
169/** 197/**
@@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
176static inline int 204static inline int
177ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) 205ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
178{ 206{
179 return task_req_descp->header.dword_2 & MASK_OCS; 207 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
180} 208}
181 209
182/** 210/**
183 * ufshcd_get_tm_free_slot - get a free slot for task management request 211 * ufshcd_get_tm_free_slot - get a free slot for task management request
184 * @hba: per adapter instance 212 * @hba: per adapter instance
213 * @free_slot: pointer to variable with available slot value
185 * 214 *
186 * Returns maximum number of task management request slots in case of 215 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
187 * task management queue full or returns the free slot number 216 * Returns 0 if free slot is not available, else return 1 with tag value
217 * in @free_slot.
188 */ 218 */
189static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) 219static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
190{ 220{
191 return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); 221 int tag;
222 bool ret = false;
223
224 if (!free_slot)
225 goto out;
226
227 do {
228 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
229 if (tag >= hba->nutmrs)
230 goto out;
231 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
232
233 *free_slot = tag;
234 ret = true;
235out:
236 return ret;
237}
238
239static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
240{
241 clear_bit_unlock(slot, &hba->tm_slots_in_use);
192} 242}
193 243
194/** 244/**
@@ -390,26 +440,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
390} 440}
391 441
392/** 442/**
393 * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
394 * @response: upiu query response to convert
395 */
396static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
397{
398 response->length = be16_to_cpu(response->length);
399 response->value = be32_to_cpu(response->value);
400}
401
402/**
403 * ufshcd_query_to_be() - formats the buffer to big endian
404 * @request: upiu query request to convert
405 */
406static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
407{
408 request->length = cpu_to_be16(request->length);
409 request->value = cpu_to_be32(request->value);
410}
411
412/**
413 * ufshcd_copy_query_response() - Copy the Query Response and the data 443 * ufshcd_copy_query_response() - Copy the Query Response and the data
414 * descriptor 444 * descriptor
415 * @hba: per adapter instance 445 * @hba: per adapter instance
@@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
425 UPIU_RSP_CODE_OFFSET; 455 UPIU_RSP_CODE_OFFSET;
426 456
427 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); 457 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
428 ufshcd_query_to_cpu(&query_res->upiu_res);
429 458
430 459
431 /* Get the descriptor */ 460 /* Get the descriptor */
@@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
749{ 778{
750 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 779 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
751 struct ufs_query *query = &hba->dev_cmd.query; 780 struct ufs_query *query = &hba->dev_cmd.query;
752 u16 len = query->request.upiu_req.length; 781 u16 len = be16_to_cpu(query->request.upiu_req.length);
753 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; 782 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
754 783
755 /* Query request header */ 784 /* Query request header */
@@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
766 /* Copy the Query Request buffer as is */ 795 /* Copy the Query Request buffer as is */
767 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, 796 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
768 QUERY_OSF_SIZE); 797 QUERY_OSF_SIZE);
769 ufshcd_query_to_be(&ucd_req_ptr->qr);
770 798
771 /* Copy the Descriptor */ 799 /* Copy the Descriptor */
772 if ((len > 0) && (query->request.upiu_req.opcode == 800 if ((len > 0) && (query->request.upiu_req.opcode ==
@@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
853 881
854 tag = cmd->request->tag; 882 tag = cmd->request->tag;
855 883
856 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { 884 spin_lock_irqsave(hba->host->host_lock, flags);
885 switch (hba->ufshcd_state) {
886 case UFSHCD_STATE_OPERATIONAL:
887 break;
888 case UFSHCD_STATE_RESET:
857 err = SCSI_MLQUEUE_HOST_BUSY; 889 err = SCSI_MLQUEUE_HOST_BUSY;
858 goto out; 890 goto out_unlock;
891 case UFSHCD_STATE_ERROR:
892 set_host_byte(cmd, DID_ERROR);
893 cmd->scsi_done(cmd);
894 goto out_unlock;
895 default:
896 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
897 __func__, hba->ufshcd_state);
898 set_host_byte(cmd, DID_BAD_TARGET);
899 cmd->scsi_done(cmd);
900 goto out_unlock;
859 } 901 }
902 spin_unlock_irqrestore(hba->host->host_lock, flags);
860 903
861 /* acquire the tag to make sure device cmds don't use it */ 904 /* acquire the tag to make sure device cmds don't use it */
862 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { 905 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
@@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
893 /* issue command to the controller */ 936 /* issue command to the controller */
894 spin_lock_irqsave(hba->host->host_lock, flags); 937 spin_lock_irqsave(hba->host->host_lock, flags);
895 ufshcd_send_command(hba, tag); 938 ufshcd_send_command(hba, tag);
939out_unlock:
896 spin_unlock_irqrestore(hba->host->host_lock, flags); 940 spin_unlock_irqrestore(hba->host->host_lock, flags);
897out: 941out:
898 return err; 942 return err;
@@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1151 } 1195 }
1152 1196
1153 if (flag_res) 1197 if (flag_res)
1154 *flag_res = (response->upiu_res.value & 1198 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1155 MASK_QUERY_UPIU_FLAG_LOC) & 0x1; 1199 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1156 1200
1157out_unlock: 1201out_unlock:
@@ -1170,7 +1214,7 @@ out_unlock:
1170 * 1214 *
1171 * Returns 0 for success, non-zero in case of failure 1215 * Returns 0 for success, non-zero in case of failure
1172*/ 1216*/
1173int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 1217static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1174 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) 1218 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1175{ 1219{
1176 struct ufs_query_req *request; 1220 struct ufs_query_req *request;
@@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1195 switch (opcode) { 1239 switch (opcode) {
1196 case UPIU_QUERY_OPCODE_WRITE_ATTR: 1240 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1197 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; 1241 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1198 request->upiu_req.value = *attr_val; 1242 request->upiu_req.value = cpu_to_be32(*attr_val);
1199 break; 1243 break;
1200 case UPIU_QUERY_OPCODE_READ_ATTR: 1244 case UPIU_QUERY_OPCODE_READ_ATTR:
1201 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; 1245 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
@@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1222 goto out_unlock; 1266 goto out_unlock;
1223 } 1267 }
1224 1268
1225 *attr_val = response->upiu_res.value; 1269 *attr_val = be32_to_cpu(response->upiu_res.value);
1226 1270
1227out_unlock: 1271out_unlock:
1228 mutex_unlock(&hba->dev_cmd.lock); 1272 mutex_unlock(&hba->dev_cmd.lock);
@@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1481 * 1525 *
1482 * Returns 0 on success, non-zero value on failure 1526 * Returns 0 on success, non-zero value on failure
1483 */ 1527 */
1484int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 1528static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1485{ 1529{
1486 struct uic_command uic_cmd = {0}; 1530 struct uic_command uic_cmd = {0};
1487 struct completion pwr_done; 1531 struct completion pwr_done;
@@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1701 goto out; 1745 goto out;
1702 } 1746 }
1703 1747
1704 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1705 scsi_unblock_requests(hba->host);
1706
1707 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
1708
1709out: 1748out:
1710 return err; 1749 return err;
1711} 1750}
@@ -1831,66 +1870,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1831} 1870}
1832 1871
1833/** 1872/**
1834 * ufshcd_do_reset - reset the host controller
1835 * @hba: per adapter instance
1836 *
1837 * Returns SUCCESS/FAILED
1838 */
1839static int ufshcd_do_reset(struct ufs_hba *hba)
1840{
1841 struct ufshcd_lrb *lrbp;
1842 unsigned long flags;
1843 int tag;
1844
1845 /* block commands from midlayer */
1846 scsi_block_requests(hba->host);
1847
1848 spin_lock_irqsave(hba->host->host_lock, flags);
1849 hba->ufshcd_state = UFSHCD_STATE_RESET;
1850
1851 /* send controller to reset state */
1852 ufshcd_hba_stop(hba);
1853 spin_unlock_irqrestore(hba->host->host_lock, flags);
1854
1855 /* abort outstanding commands */
1856 for (tag = 0; tag < hba->nutrs; tag++) {
1857 if (test_bit(tag, &hba->outstanding_reqs)) {
1858 lrbp = &hba->lrb[tag];
1859 if (lrbp->cmd) {
1860 scsi_dma_unmap(lrbp->cmd);
1861 lrbp->cmd->result = DID_RESET << 16;
1862 lrbp->cmd->scsi_done(lrbp->cmd);
1863 lrbp->cmd = NULL;
1864 clear_bit_unlock(tag, &hba->lrb_in_use);
1865 }
1866 }
1867 }
1868
1869 /* complete device management command */
1870 if (hba->dev_cmd.complete)
1871 complete(hba->dev_cmd.complete);
1872
1873 /* clear outstanding request/task bit maps */
1874 hba->outstanding_reqs = 0;
1875 hba->outstanding_tasks = 0;
1876
1877 /* Host controller enable */
1878 if (ufshcd_hba_enable(hba)) {
1879 dev_err(hba->dev,
1880 "Reset: Controller initialization failed\n");
1881 return FAILED;
1882 }
1883
1884 if (ufshcd_link_startup(hba)) {
1885 dev_err(hba->dev,
1886 "Reset: Link start-up failed\n");
1887 return FAILED;
1888 }
1889
1890 return SUCCESS;
1891}
1892
1893/**
1894 * ufshcd_slave_alloc - handle initial SCSI device configurations 1873 * ufshcd_slave_alloc - handle initial SCSI device configurations
1895 * @sdev: pointer to SCSI device 1874 * @sdev: pointer to SCSI device
1896 * 1875 *
@@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
1907 sdev->use_10_for_ms = 1; 1886 sdev->use_10_for_ms = 1;
1908 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); 1887 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1909 1888
1889 /* allow SCSI layer to restart the device in case of errors */
1890 sdev->allow_restart = 1;
1891
1910 /* 1892 /*
1911 * Inform SCSI Midlayer that the LUN queue depth is same as the 1893 * Inform SCSI Midlayer that the LUN queue depth is same as the
1912 * controller queue depth. If a LUN queue depth is less than the 1894 * controller queue depth. If a LUN queue depth is less than the
@@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
1934 * ufshcd_task_req_compl - handle task management request completion 1916 * ufshcd_task_req_compl - handle task management request completion
1935 * @hba: per adapter instance 1917 * @hba: per adapter instance
1936 * @index: index of the completed request 1918 * @index: index of the completed request
1919 * @resp: task management service response
1937 * 1920 *
1938 * Returns SUCCESS/FAILED 1921 * Returns non-zero value on error, zero on success
1939 */ 1922 */
1940static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) 1923static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
1941{ 1924{
1942 struct utp_task_req_desc *task_req_descp; 1925 struct utp_task_req_desc *task_req_descp;
1943 struct utp_upiu_task_rsp *task_rsp_upiup; 1926 struct utp_upiu_task_rsp *task_rsp_upiup;
@@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1958 task_req_descp[index].task_rsp_upiu; 1941 task_req_descp[index].task_rsp_upiu;
1959 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); 1942 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1960 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); 1943 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1961 1944 if (resp)
1962 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && 1945 *resp = (u8)task_result;
1963 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1964 task_result = FAILED;
1965 else
1966 task_result = SUCCESS;
1967 } else { 1946 } else {
1968 task_result = FAILED; 1947 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
1969 dev_err(hba->dev, 1948 __func__, ocs_value);
1970 "trc: Invalid ocs = %x\n", ocs_value);
1971 } 1949 }
1972 spin_unlock_irqrestore(hba->host->host_lock, flags); 1950 spin_unlock_irqrestore(hba->host->host_lock, flags);
1973 return task_result; 1951
1952 return ocs_value;
1974} 1953}
1975 1954
1976/** 1955/**
@@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2105 case OCS_ABORTED: 2084 case OCS_ABORTED:
2106 result |= DID_ABORT << 16; 2085 result |= DID_ABORT << 16;
2107 break; 2086 break;
2087 case OCS_INVALID_COMMAND_STATUS:
2088 result |= DID_REQUEUE << 16;
2089 break;
2108 case OCS_INVALID_CMD_TABLE_ATTR: 2090 case OCS_INVALID_CMD_TABLE_ATTR:
2109 case OCS_INVALID_PRDT_ATTR: 2091 case OCS_INVALID_PRDT_ATTR:
2110 case OCS_MISMATCH_DATA_BUF_SIZE: 2092 case OCS_MISMATCH_DATA_BUF_SIZE:
@@ -2422,41 +2404,145 @@ out:
2422} 2404}
2423 2405
2424/** 2406/**
2425 * ufshcd_fatal_err_handler - handle fatal errors 2407 * ufshcd_err_handler - handle UFS errors that require s/w attention
2426 * @hba: per adapter instance 2408 * @work: pointer to work structure
2427 */ 2409 */
2428static void ufshcd_fatal_err_handler(struct work_struct *work) 2410static void ufshcd_err_handler(struct work_struct *work)
2429{ 2411{
2430 struct ufs_hba *hba; 2412 struct ufs_hba *hba;
2431 hba = container_of(work, struct ufs_hba, feh_workq); 2413 unsigned long flags;
2414 u32 err_xfer = 0;
2415 u32 err_tm = 0;
2416 int err = 0;
2417 int tag;
2418
2419 hba = container_of(work, struct ufs_hba, eh_work);
2432 2420
2433 pm_runtime_get_sync(hba->dev); 2421 pm_runtime_get_sync(hba->dev);
2434 /* check if reset is already in progress */ 2422
2435 if (hba->ufshcd_state != UFSHCD_STATE_RESET) 2423 spin_lock_irqsave(hba->host->host_lock, flags);
2436 ufshcd_do_reset(hba); 2424 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
2425 spin_unlock_irqrestore(hba->host->host_lock, flags);
2426 goto out;
2427 }
2428
2429 hba->ufshcd_state = UFSHCD_STATE_RESET;
2430 ufshcd_set_eh_in_progress(hba);
2431
2432 /* Complete requests that have door-bell cleared by h/w */
2433 ufshcd_transfer_req_compl(hba);
2434 ufshcd_tmc_handler(hba);
2435 spin_unlock_irqrestore(hba->host->host_lock, flags);
2436
2437 /* Clear pending transfer requests */
2438 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
2439 if (ufshcd_clear_cmd(hba, tag))
2440 err_xfer |= 1 << tag;
2441
2442 /* Clear pending task management requests */
2443 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
2444 if (ufshcd_clear_tm_cmd(hba, tag))
2445 err_tm |= 1 << tag;
2446
2447 /* Complete the requests that are cleared by s/w */
2448 spin_lock_irqsave(hba->host->host_lock, flags);
2449 ufshcd_transfer_req_compl(hba);
2450 ufshcd_tmc_handler(hba);
2451 spin_unlock_irqrestore(hba->host->host_lock, flags);
2452
2453 /* Fatal errors need reset */
2454 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
2455 ((hba->saved_err & UIC_ERROR) &&
2456 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
2457 err = ufshcd_reset_and_restore(hba);
2458 if (err) {
2459 dev_err(hba->dev, "%s: reset and restore failed\n",
2460 __func__);
2461 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2462 }
2463 /*
2464 * Inform scsi mid-layer that we did reset and allow to handle
2465 * Unit Attention properly.
2466 */
2467 scsi_report_bus_reset(hba->host, 0);
2468 hba->saved_err = 0;
2469 hba->saved_uic_err = 0;
2470 }
2471 ufshcd_clear_eh_in_progress(hba);
2472
2473out:
2474 scsi_unblock_requests(hba->host);
2437 pm_runtime_put_sync(hba->dev); 2475 pm_runtime_put_sync(hba->dev);
2438} 2476}
2439 2477
2440/** 2478/**
2441 * ufshcd_err_handler - Check for fatal errors 2479 * ufshcd_update_uic_error - check and set fatal UIC error flags.
2442 * @work: pointer to a work queue structure 2480 * @hba: per-adapter instance
2443 */ 2481 */
2444static void ufshcd_err_handler(struct ufs_hba *hba) 2482static void ufshcd_update_uic_error(struct ufs_hba *hba)
2445{ 2483{
2446 u32 reg; 2484 u32 reg;
2447 2485
2486 /* PA_INIT_ERROR is fatal and needs UIC reset */
2487 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
2488 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2489 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2490
2491 /* UIC NL/TL/DME errors needs software retry */
2492 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
2493 if (reg)
2494 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
2495
2496 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
2497 if (reg)
2498 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
2499
2500 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
2501 if (reg)
2502 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
2503
2504 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
2505 __func__, hba->uic_error);
2506}
2507
2508/**
2509 * ufshcd_check_errors - Check for errors that need s/w attention
2510 * @hba: per-adapter instance
2511 */
2512static void ufshcd_check_errors(struct ufs_hba *hba)
2513{
2514 bool queue_eh_work = false;
2515
2448 if (hba->errors & INT_FATAL_ERRORS) 2516 if (hba->errors & INT_FATAL_ERRORS)
2449 goto fatal_eh; 2517 queue_eh_work = true;
2450 2518
2451 if (hba->errors & UIC_ERROR) { 2519 if (hba->errors & UIC_ERROR) {
2452 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 2520 hba->uic_error = 0;
2453 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 2521 ufshcd_update_uic_error(hba);
2454 goto fatal_eh; 2522 if (hba->uic_error)
2523 queue_eh_work = true;
2455 } 2524 }
2456 return; 2525
2457fatal_eh: 2526 if (queue_eh_work) {
2458 hba->ufshcd_state = UFSHCD_STATE_ERROR; 2527 /* handle fatal errors only when link is functional */
2459 schedule_work(&hba->feh_workq); 2528 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
2529 /* block commands from scsi mid-layer */
2530 scsi_block_requests(hba->host);
2531
2532 /* transfer error masks to sticky bits */
2533 hba->saved_err |= hba->errors;
2534 hba->saved_uic_err |= hba->uic_error;
2535
2536 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2537 schedule_work(&hba->eh_work);
2538 }
2539 }
2540 /*
2541 * if (!queue_eh_work) -
2542 * Other errors are either non-fatal where host recovers
2543 * itself without s/w intervention or errors that will be
2544 * handled by the SCSI core layer.
2545 */
2460} 2546}
2461 2547
2462/** 2548/**
@@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)
2469 2555
2470 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); 2556 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
2471 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; 2557 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
2472 wake_up_interruptible(&hba->ufshcd_tm_wait_queue); 2558 wake_up(&hba->tm_wq);
2473} 2559}
2474 2560
2475/** 2561/**
@@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2481{ 2567{
2482 hba->errors = UFSHCD_ERROR_MASK & intr_status; 2568 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2483 if (hba->errors) 2569 if (hba->errors)
2484 ufshcd_err_handler(hba); 2570 ufshcd_check_errors(hba);
2485 2571
2486 if (intr_status & UFSHCD_UIC_MASK) 2572 if (intr_status & UFSHCD_UIC_MASK)
2487 ufshcd_uic_cmd_compl(hba, intr_status); 2573 ufshcd_uic_cmd_compl(hba, intr_status);
@@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
2519 return retval; 2605 return retval;
2520} 2606}
2521 2607
2608static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
2609{
2610 int err = 0;
2611 u32 mask = 1 << tag;
2612 unsigned long flags;
2613
2614 if (!test_bit(tag, &hba->outstanding_tasks))
2615 goto out;
2616
2617 spin_lock_irqsave(hba->host->host_lock, flags);
2618 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
2619 spin_unlock_irqrestore(hba->host->host_lock, flags);
2620
2621 /* poll for max. 1 sec to clear door bell register by h/w */
2622 err = ufshcd_wait_for_register(hba,
2623 REG_UTP_TASK_REQ_DOOR_BELL,
2624 mask, 0, 1000, 1000);
2625out:
2626 return err;
2627}
2628
2522/** 2629/**
2523 * ufshcd_issue_tm_cmd - issues task management commands to controller 2630 * ufshcd_issue_tm_cmd - issues task management commands to controller
2524 * @hba: per adapter instance 2631 * @hba: per adapter instance
2525 * @lrbp: pointer to local reference block 2632 * @lun_id: LUN ID to which TM command is sent
2633 * @task_id: task ID to which the TM command is applicable
2634 * @tm_function: task management function opcode
2635 * @tm_response: task management service response return value
2526 * 2636 *
2527 * Returns SUCCESS/FAILED 2637 * Returns non-zero value on error, zero on success.
2528 */ 2638 */
2529static int 2639static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2530ufshcd_issue_tm_cmd(struct ufs_hba *hba, 2640 u8 tm_function, u8 *tm_response)
2531 struct ufshcd_lrb *lrbp,
2532 u8 tm_function)
2533{ 2641{
2534 struct utp_task_req_desc *task_req_descp; 2642 struct utp_task_req_desc *task_req_descp;
2535 struct utp_upiu_task_req *task_req_upiup; 2643 struct utp_upiu_task_req *task_req_upiup;
2536 struct Scsi_Host *host; 2644 struct Scsi_Host *host;
2537 unsigned long flags; 2645 unsigned long flags;
2538 int free_slot = 0; 2646 int free_slot;
2539 int err; 2647 int err;
2648 int task_tag;
2540 2649
2541 host = hba->host; 2650 host = hba->host;
2542 2651
2543 spin_lock_irqsave(host->host_lock, flags); 2652 /*
2544 2653 * Get free slot, sleep if slots are unavailable.
2545 /* If task management queue is full */ 2654 * Even though we use wait_event() which sleeps indefinitely,
2546 free_slot = ufshcd_get_tm_free_slot(hba); 2655 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2547 if (free_slot >= hba->nutmrs) { 2656 */
2548 spin_unlock_irqrestore(host->host_lock, flags); 2657 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
2549 dev_err(hba->dev, "Task management queue full\n");
2550 err = FAILED;
2551 goto out;
2552 }
2553 2658
2659 spin_lock_irqsave(host->host_lock, flags);
2554 task_req_descp = hba->utmrdl_base_addr; 2660 task_req_descp = hba->utmrdl_base_addr;
2555 task_req_descp += free_slot; 2661 task_req_descp += free_slot;
2556 2662
@@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2562 /* Configure task request UPIU */ 2668 /* Configure task request UPIU */
2563 task_req_upiup = 2669 task_req_upiup =
2564 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; 2670 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
2671 task_tag = hba->nutrs + free_slot;
2565 task_req_upiup->header.dword_0 = 2672 task_req_upiup->header.dword_0 =
2566 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, 2673 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
2567 lrbp->lun, lrbp->task_tag); 2674 lun_id, task_tag);
2568 task_req_upiup->header.dword_1 = 2675 task_req_upiup->header.dword_1 =
2569 UPIU_HEADER_DWORD(0, tm_function, 0, 0); 2676 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
2570 2677
2571 task_req_upiup->input_param1 = lrbp->lun; 2678 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2572 task_req_upiup->input_param1 = 2679 task_req_upiup->input_param2 = cpu_to_be32(task_id);
2573 cpu_to_be32(task_req_upiup->input_param1);
2574 task_req_upiup->input_param2 = lrbp->task_tag;
2575 task_req_upiup->input_param2 =
2576 cpu_to_be32(task_req_upiup->input_param2);
2577 2680
2578 /* send command to the controller */ 2681 /* send command to the controller */
2579 __set_bit(free_slot, &hba->outstanding_tasks); 2682 __set_bit(free_slot, &hba->outstanding_tasks);
@@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2582 spin_unlock_irqrestore(host->host_lock, flags); 2685 spin_unlock_irqrestore(host->host_lock, flags);
2583 2686
2584 /* wait until the task management command is completed */ 2687 /* wait until the task management command is completed */
2585 err = 2688 err = wait_event_timeout(hba->tm_wq,
2586 wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, 2689 test_bit(free_slot, &hba->tm_condition),
2587 (test_bit(free_slot, 2690 msecs_to_jiffies(TM_CMD_TIMEOUT));
2588 &hba->tm_condition) != 0),
2589 60 * HZ);
2590 if (!err) { 2691 if (!err) {
2591 dev_err(hba->dev, 2692 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2592 "Task management command timed-out\n"); 2693 __func__, tm_function);
2593 err = FAILED; 2694 if (ufshcd_clear_tm_cmd(hba, free_slot))
2594 goto out; 2695 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2696 __func__, free_slot);
2697 err = -ETIMEDOUT;
2698 } else {
2699 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
2595 } 2700 }
2701
2596 clear_bit(free_slot, &hba->tm_condition); 2702 clear_bit(free_slot, &hba->tm_condition);
2597 err = ufshcd_task_req_compl(hba, free_slot); 2703 ufshcd_put_tm_slot(hba, free_slot);
2598out: 2704 wake_up(&hba->tm_tag_wq);
2705
2599 return err; 2706 return err;
2600} 2707}
2601 2708
2602/** 2709/**
2603 * ufshcd_device_reset - reset device and abort all the pending commands 2710 * ufshcd_eh_device_reset_handler - device reset handler registered to
2711 * scsi layer.
2604 * @cmd: SCSI command pointer 2712 * @cmd: SCSI command pointer
2605 * 2713 *
2606 * Returns SUCCESS/FAILED 2714 * Returns SUCCESS/FAILED
2607 */ 2715 */
2608static int ufshcd_device_reset(struct scsi_cmnd *cmd) 2716static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
2609{ 2717{
2610 struct Scsi_Host *host; 2718 struct Scsi_Host *host;
2611 struct ufs_hba *hba; 2719 struct ufs_hba *hba;
2612 unsigned int tag; 2720 unsigned int tag;
2613 u32 pos; 2721 u32 pos;
2614 int err; 2722 int err;
2723 u8 resp = 0xF;
2724 struct ufshcd_lrb *lrbp;
2725 unsigned long flags;
2615 2726
2616 host = cmd->device->host; 2727 host = cmd->device->host;
2617 hba = shost_priv(host); 2728 hba = shost_priv(host);
2618 tag = cmd->request->tag; 2729 tag = cmd->request->tag;
2619 2730
2620 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); 2731 lrbp = &hba->lrb[tag];
2621 if (err == FAILED) 2732 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
2733 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2734 if (!err)
2735 err = resp;
2622 goto out; 2736 goto out;
2737 }
2623 2738
2624 for (pos = 0; pos < hba->nutrs; pos++) { 2739 /* clear the commands that were pending for corresponding LUN */
2625 if (test_bit(pos, &hba->outstanding_reqs) && 2740 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
2626 (hba->lrb[tag].lun == hba->lrb[pos].lun)) { 2741 if (hba->lrb[pos].lun == lrbp->lun) {
2627 2742 err = ufshcd_clear_cmd(hba, pos);
2628 /* clear the respective UTRLCLR register bit */ 2743 if (err)
2629 ufshcd_utrl_clear(hba, pos); 2744 break;
2630
2631 clear_bit(pos, &hba->outstanding_reqs);
2632
2633 if (hba->lrb[pos].cmd) {
2634 scsi_dma_unmap(hba->lrb[pos].cmd);
2635 hba->lrb[pos].cmd->result =
2636 DID_ABORT << 16;
2637 hba->lrb[pos].cmd->scsi_done(cmd);
2638 hba->lrb[pos].cmd = NULL;
2639 clear_bit_unlock(pos, &hba->lrb_in_use);
2640 wake_up(&hba->dev_cmd.tag_wq);
2641 }
2642 } 2745 }
2643 } /* end of for */ 2746 }
2747 spin_lock_irqsave(host->host_lock, flags);
2748 ufshcd_transfer_req_compl(hba);
2749 spin_unlock_irqrestore(host->host_lock, flags);
2644out: 2750out:
2751 if (!err) {
2752 err = SUCCESS;
2753 } else {
2754 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2755 err = FAILED;
2756 }
2645 return err; 2757 return err;
2646} 2758}
2647 2759
2648/** 2760/**
2649 * ufshcd_host_reset - Main reset function registered with scsi layer
2650 * @cmd: SCSI command pointer
2651 *
2652 * Returns SUCCESS/FAILED
2653 */
2654static int ufshcd_host_reset(struct scsi_cmnd *cmd)
2655{
2656 struct ufs_hba *hba;
2657
2658 hba = shost_priv(cmd->device->host);
2659
2660 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
2661 return SUCCESS;
2662
2663 return ufshcd_do_reset(hba);
2664}
2665
2666/**
2667 * ufshcd_abort - abort a specific command 2761 * ufshcd_abort - abort a specific command
2668 * @cmd: SCSI command pointer 2762 * @cmd: SCSI command pointer
2669 * 2763 *
2764 * Abort the pending command in device by sending UFS_ABORT_TASK task management
2765 * command, and in host controller by clearing the door-bell register. There can
2766 * be race between controller sending the command to the device while abort is
2767 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
2768 * really issued and then try to abort it.
2769 *
2670 * Returns SUCCESS/FAILED 2770 * Returns SUCCESS/FAILED
2671 */ 2771 */
2672static int ufshcd_abort(struct scsi_cmnd *cmd) 2772static int ufshcd_abort(struct scsi_cmnd *cmd)
@@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
2675 struct ufs_hba *hba; 2775 struct ufs_hba *hba;
2676 unsigned long flags; 2776 unsigned long flags;
2677 unsigned int tag; 2777 unsigned int tag;
2678 int err; 2778 int err = 0;
2779 int poll_cnt;
2780 u8 resp = 0xF;
2781 struct ufshcd_lrb *lrbp;
2679 2782
2680 host = cmd->device->host; 2783 host = cmd->device->host;
2681 hba = shost_priv(host); 2784 hba = shost_priv(host);
2682 tag = cmd->request->tag; 2785 tag = cmd->request->tag;
2683 2786
2684 spin_lock_irqsave(host->host_lock, flags); 2787 /* If command is already aborted/completed, return SUCCESS */
2788 if (!(test_bit(tag, &hba->outstanding_reqs)))
2789 goto out;
2685 2790
2686 /* check if command is still pending */ 2791 lrbp = &hba->lrb[tag];
2687 if (!(test_bit(tag, &hba->outstanding_reqs))) { 2792 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
2688 err = FAILED; 2793 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2689 spin_unlock_irqrestore(host->host_lock, flags); 2794 UFS_QUERY_TASK, &resp);
2795 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
2796 /* cmd pending in the device */
2797 break;
2798 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2799 u32 reg;
2800
2801 /*
2802 * cmd not pending in the device, check if it is
2803 * in transition.
2804 */
2805 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2806 if (reg & (1 << tag)) {
2807 /* sleep for max. 200us to stabilize */
2808 usleep_range(100, 200);
2809 continue;
2810 }
2811 /* command completed already */
2812 goto out;
2813 } else {
2814 if (!err)
2815 err = resp; /* service response error */
2816 goto out;
2817 }
2818 }
2819
2820 if (!poll_cnt) {
2821 err = -EBUSY;
2690 goto out; 2822 goto out;
2691 } 2823 }
2692 spin_unlock_irqrestore(host->host_lock, flags);
2693 2824
2694 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); 2825 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2695 if (err == FAILED) 2826 UFS_ABORT_TASK, &resp);
2827 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
2828 if (!err)
2829 err = resp; /* service response error */
2830 goto out;
2831 }
2832
2833 err = ufshcd_clear_cmd(hba, tag);
2834 if (err)
2696 goto out; 2835 goto out;
2697 2836
2698 scsi_dma_unmap(cmd); 2837 scsi_dma_unmap(cmd);
2699 2838
2700 spin_lock_irqsave(host->host_lock, flags); 2839 spin_lock_irqsave(host->host_lock, flags);
2701
2702 /* clear the respective UTRLCLR register bit */
2703 ufshcd_utrl_clear(hba, tag);
2704
2705 __clear_bit(tag, &hba->outstanding_reqs); 2840 __clear_bit(tag, &hba->outstanding_reqs);
2706 hba->lrb[tag].cmd = NULL; 2841 hba->lrb[tag].cmd = NULL;
2707 spin_unlock_irqrestore(host->host_lock, flags); 2842 spin_unlock_irqrestore(host->host_lock, flags);
@@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
2709 clear_bit_unlock(tag, &hba->lrb_in_use); 2844 clear_bit_unlock(tag, &hba->lrb_in_use);
2710 wake_up(&hba->dev_cmd.tag_wq); 2845 wake_up(&hba->dev_cmd.tag_wq);
2711out: 2846out:
2847 if (!err) {
2848 err = SUCCESS;
2849 } else {
2850 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2851 err = FAILED;
2852 }
2853
2854 return err;
2855}
2856
2857/**
2858 * ufshcd_host_reset_and_restore - reset and restore host controller
2859 * @hba: per-adapter instance
2860 *
2861 * Note that host controller reset may issue DME_RESET to
2862 * local and remote (device) Uni-Pro stack and the attributes
2863 * are reset to default state.
2864 *
2865 * Returns zero on success, non-zero on failure
2866 */
2867static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
2868{
2869 int err;
2870 async_cookie_t cookie;
2871 unsigned long flags;
2872
2873 /* Reset the host controller */
2874 spin_lock_irqsave(hba->host->host_lock, flags);
2875 ufshcd_hba_stop(hba);
2876 spin_unlock_irqrestore(hba->host->host_lock, flags);
2877
2878 err = ufshcd_hba_enable(hba);
2879 if (err)
2880 goto out;
2881
2882 /* Establish the link again and restore the device */
2883 cookie = async_schedule(ufshcd_async_scan, hba);
2884 /* wait for async scan to be completed */
2885 async_synchronize_cookie(++cookie);
2886 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
2887 err = -EIO;
2888out:
2889 if (err)
2890 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
2891
2892 return err;
2893}
2894
2895/**
2896 * ufshcd_reset_and_restore - reset and re-initialize host/device
2897 * @hba: per-adapter instance
2898 *
2899 * Reset and recover device, host and re-establish link. This
2900 * is helpful to recover the communication in fatal error conditions.
2901 *
2902 * Returns zero on success, non-zero on failure
2903 */
2904static int ufshcd_reset_and_restore(struct ufs_hba *hba)
2905{
2906 int err = 0;
2907 unsigned long flags;
2908
2909 err = ufshcd_host_reset_and_restore(hba);
2910
2911 /*
2912 * After reset the door-bell might be cleared, complete
2913 * outstanding requests in s/w here.
2914 */
2915 spin_lock_irqsave(hba->host->host_lock, flags);
2916 ufshcd_transfer_req_compl(hba);
2917 ufshcd_tmc_handler(hba);
2918 spin_unlock_irqrestore(hba->host->host_lock, flags);
2919
2920 return err;
2921}
2922
2923/**
2924 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
2925 * @cmd - SCSI command pointer
2926 *
2927 * Returns SUCCESS/FAILED
2928 */
2929static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
2930{
2931 int err;
2932 unsigned long flags;
2933 struct ufs_hba *hba;
2934
2935 hba = shost_priv(cmd->device->host);
2936
2937 /*
2938 * Check if there is any race with fatal error handling.
2939 * If so, wait for it to complete. Even though fatal error
2940 * handling does reset and restore in some cases, don't assume
2941 * anything out of it. We are just avoiding race here.
2942 */
2943 do {
2944 spin_lock_irqsave(hba->host->host_lock, flags);
2945 if (!(work_pending(&hba->eh_work) ||
2946 hba->ufshcd_state == UFSHCD_STATE_RESET))
2947 break;
2948 spin_unlock_irqrestore(hba->host->host_lock, flags);
2949 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
2950 flush_work(&hba->eh_work);
2951 } while (1);
2952
2953 hba->ufshcd_state = UFSHCD_STATE_RESET;
2954 ufshcd_set_eh_in_progress(hba);
2955 spin_unlock_irqrestore(hba->host->host_lock, flags);
2956
2957 err = ufshcd_reset_and_restore(hba);
2958
2959 spin_lock_irqsave(hba->host->host_lock, flags);
2960 if (!err) {
2961 err = SUCCESS;
2962 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
2963 } else {
2964 err = FAILED;
2965 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2966 }
2967 ufshcd_clear_eh_in_progress(hba);
2968 spin_unlock_irqrestore(hba->host->host_lock, flags);
2969
2712 return err; 2970 return err;
2713} 2971}
2714 2972
@@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
2737 goto out; 2995 goto out;
2738 2996
2739 ufshcd_force_reset_auto_bkops(hba); 2997 ufshcd_force_reset_auto_bkops(hba);
2740 scsi_scan_host(hba->host); 2998 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
2741 pm_runtime_put_sync(hba->dev); 2999
3000 /* If we are in error handling context no need to scan the host */
3001 if (!ufshcd_eh_in_progress(hba)) {
3002 scsi_scan_host(hba->host);
3003 pm_runtime_put_sync(hba->dev);
3004 }
2742out: 3005out:
2743 return; 3006 return;
2744} 3007}
@@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = {
2751 .slave_alloc = ufshcd_slave_alloc, 3014 .slave_alloc = ufshcd_slave_alloc,
2752 .slave_destroy = ufshcd_slave_destroy, 3015 .slave_destroy = ufshcd_slave_destroy,
2753 .eh_abort_handler = ufshcd_abort, 3016 .eh_abort_handler = ufshcd_abort,
2754 .eh_device_reset_handler = ufshcd_device_reset, 3017 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
2755 .eh_host_reset_handler = ufshcd_host_reset, 3018 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
2756 .this_id = -1, 3019 .this_id = -1,
2757 .sg_tablesize = SG_ALL, 3020 .sg_tablesize = SG_ALL,
2758 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 3021 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
2916 host->max_cmd_len = MAX_CDB_SIZE; 3179 host->max_cmd_len = MAX_CDB_SIZE;
2917 3180
2918 /* Initailize wait queue for task management */ 3181 /* Initailize wait queue for task management */
2919 init_waitqueue_head(&hba->ufshcd_tm_wait_queue); 3182 init_waitqueue_head(&hba->tm_wq);
3183 init_waitqueue_head(&hba->tm_tag_wq);
2920 3184
2921 /* Initialize work queues */ 3185 /* Initialize work queues */
2922 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); 3186 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
2923 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 3187 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
2924 3188
2925 /* Initialize UIC command mutex */ 3189 /* Initialize UIC command mutex */
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 577679a2d189..acf318e338ed 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -174,15 +174,21 @@ struct ufs_dev_cmd {
174 * @irq: Irq number of the controller 174 * @irq: Irq number of the controller
175 * @active_uic_cmd: handle of active UIC command 175 * @active_uic_cmd: handle of active UIC command
176 * @uic_cmd_mutex: mutex for uic command 176 * @uic_cmd_mutex: mutex for uic command
177 * @ufshcd_tm_wait_queue: wait queue for task management 177 * @tm_wq: wait queue for task management
178 * @tm_tag_wq: wait queue for free task management slots
179 * @tm_slots_in_use: bit map of task management request slots in use
178 * @pwr_done: completion for power mode change 180 * @pwr_done: completion for power mode change
179 * @tm_condition: condition variable for task management 181 * @tm_condition: condition variable for task management
180 * @ufshcd_state: UFSHCD states 182 * @ufshcd_state: UFSHCD states
183 * @eh_flags: Error handling flags
181 * @intr_mask: Interrupt Mask Bits 184 * @intr_mask: Interrupt Mask Bits
182 * @ee_ctrl_mask: Exception event control mask 185 * @ee_ctrl_mask: Exception event control mask
183 * @feh_workq: Work queue for fatal controller error handling 186 * @eh_work: Worker to handle UFS errors that require s/w attention
184 * @eeh_work: Worker to handle exception events 187 * @eeh_work: Worker to handle exception events
185 * @errors: HBA errors 188 * @errors: HBA errors
189 * @uic_error: UFS interconnect layer error status
190 * @saved_err: sticky error mask
191 * @saved_uic_err: sticky UIC error mask
186 * @dev_cmd: ufs device management command information 192 * @dev_cmd: ufs device management command information
187 * @auto_bkops_enabled: to track whether bkops is enabled in device 193 * @auto_bkops_enabled: to track whether bkops is enabled in device
188 */ 194 */
@@ -217,21 +223,27 @@ struct ufs_hba {
217 struct uic_command *active_uic_cmd; 223 struct uic_command *active_uic_cmd;
218 struct mutex uic_cmd_mutex; 224 struct mutex uic_cmd_mutex;
219 225
220 wait_queue_head_t ufshcd_tm_wait_queue; 226 wait_queue_head_t tm_wq;
227 wait_queue_head_t tm_tag_wq;
221 unsigned long tm_condition; 228 unsigned long tm_condition;
229 unsigned long tm_slots_in_use;
222 230
223 struct completion *pwr_done; 231 struct completion *pwr_done;
224 232
225 u32 ufshcd_state; 233 u32 ufshcd_state;
234 u32 eh_flags;
226 u32 intr_mask; 235 u32 intr_mask;
227 u16 ee_ctrl_mask; 236 u16 ee_ctrl_mask;
228 237
229 /* Work Queues */ 238 /* Work Queues */
230 struct work_struct feh_workq; 239 struct work_struct eh_work;
231 struct work_struct eeh_work; 240 struct work_struct eeh_work;
232 241
233 /* HBA Errors */ 242 /* HBA Errors */
234 u32 errors; 243 u32 errors;
244 u32 uic_error;
245 u32 saved_err;
246 u32 saved_uic_err;
235 247
236 /* Device management request data */ 248 /* Device management request data */
237 struct ufs_dev_cmd dev_cmd; 249 struct ufs_dev_cmd dev_cmd;
@@ -263,6 +275,8 @@ static inline void check_upiu_size(void)
263 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); 275 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
264} 276}
265 277
278extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
279extern int ufshcd_resume(struct ufs_hba *hba);
266extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 280extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
267extern int ufshcd_runtime_resume(struct ufs_hba *hba); 281extern int ufshcd_runtime_resume(struct ufs_hba *hba);
268extern int ufshcd_runtime_idle(struct ufs_hba *hba); 282extern int ufshcd_runtime_idle(struct ufs_hba *hba);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 0475c6619a68..9abc7e32b43d 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -304,10 +304,10 @@ enum {
304 * @size: size of physical segment DW-3 304 * @size: size of physical segment DW-3
305 */ 305 */
306struct ufshcd_sg_entry { 306struct ufshcd_sg_entry {
307 u32 base_addr; 307 __le32 base_addr;
308 u32 upper_addr; 308 __le32 upper_addr;
309 u32 reserved; 309 __le32 reserved;
310 u32 size; 310 __le32 size;
311}; 311};
312 312
313/** 313/**
@@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc {
330 * @dword3: Descriptor Header DW3 330 * @dword3: Descriptor Header DW3
331 */ 331 */
332struct request_desc_header { 332struct request_desc_header {
333 u32 dword_0; 333 __le32 dword_0;
334 u32 dword_1; 334 __le32 dword_1;
335 u32 dword_2; 335 __le32 dword_2;
336 u32 dword_3; 336 __le32 dword_3;
337}; 337};
338 338
339/** 339/**
@@ -352,16 +352,16 @@ struct utp_transfer_req_desc {
352 struct request_desc_header header; 352 struct request_desc_header header;
353 353
354 /* DW 4-5*/ 354 /* DW 4-5*/
355 u32 command_desc_base_addr_lo; 355 __le32 command_desc_base_addr_lo;
356 u32 command_desc_base_addr_hi; 356 __le32 command_desc_base_addr_hi;
357 357
358 /* DW 6 */ 358 /* DW 6 */
359 u16 response_upiu_length; 359 __le16 response_upiu_length;
360 u16 response_upiu_offset; 360 __le16 response_upiu_offset;
361 361
362 /* DW 7 */ 362 /* DW 7 */
363 u16 prd_table_length; 363 __le16 prd_table_length;
364 u16 prd_table_offset; 364 __le16 prd_table_offset;
365}; 365};
366 366
367/** 367/**
@@ -376,10 +376,10 @@ struct utp_task_req_desc {
376 struct request_desc_header header; 376 struct request_desc_header header;
377 377
378 /* DW 4-11 */ 378 /* DW 4-11 */
379 u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; 379 __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
380 380
381 /* DW 12-19 */ 381 /* DW 12-19 */
382 u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; 382 __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
383}; 383};
384 384
385#endif /* End of Header */ 385#endif /* End of Header */