aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c201
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c122
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c399
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c523
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
14 files changed, 649 insertions, 793 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index adb95674823f..3062b39fbdb9 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -267,10 +267,6 @@ struct lpfc_hba {
267 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */ 267 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
268 uint32_t nport_event_cnt; /* timestamp for nlplist entry */ 268 uint32_t nport_event_cnt; /* timestamp for nlplist entry */
269 269
270#define LPFC_RPI_HASH_SIZE 64
271#define LPFC_RPI_HASH_FUNC(x) ((x) & (0x3f))
272 /* ptr to active D_ID / RPIs */
273 struct lpfc_nodelist *fc_nlplookup[LPFC_RPI_HASH_SIZE];
274 uint32_t wwnn[2]; 270 uint32_t wwnn[2];
275 uint32_t RandomData[7]; 271 uint32_t RandomData[7];
276 272
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index acae7c48ef7d..89e8222bc7cc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -200,19 +200,13 @@ lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
200} 200}
201 201
202 202
203static ssize_t 203static int
204lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count) 204lpfc_issue_lip(struct Scsi_Host *host)
205{ 205{
206 struct Scsi_Host *host = class_to_shost(cdev);
207 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0]; 206 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
208 int val = 0;
209 LPFC_MBOXQ_t *pmboxq; 207 LPFC_MBOXQ_t *pmboxq;
210 int mbxstatus = MBXERR_ERROR; 208 int mbxstatus = MBXERR_ERROR;
211 209
212 if ((sscanf(buf, "%d", &val) != 1) ||
213 (val != 1))
214 return -EINVAL;
215
216 if ((phba->fc_flag & FC_OFFLINE_MODE) || 210 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
217 (phba->hba_state != LPFC_HBA_READY)) 211 (phba->hba_state != LPFC_HBA_READY))
218 return -EPERM; 212 return -EPERM;
@@ -229,12 +223,12 @@ lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count)
229 if (mbxstatus == MBX_TIMEOUT) 223 if (mbxstatus == MBX_TIMEOUT)
230 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 224 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
231 else 225 else
232 mempool_free( pmboxq, phba->mbox_mem_pool); 226 mempool_free(pmboxq, phba->mbox_mem_pool);
233 227
234 if (mbxstatus == MBXERR_ERROR) 228 if (mbxstatus == MBXERR_ERROR)
235 return -EIO; 229 return -EIO;
236 230
237 return strlen(buf); 231 return 0;
238} 232}
239 233
240static ssize_t 234static ssize_t
@@ -251,8 +245,6 @@ lpfc_board_online_show(struct class_device *cdev, char *buf)
251 struct Scsi_Host *host = class_to_shost(cdev); 245 struct Scsi_Host *host = class_to_shost(cdev);
252 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0]; 246 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
253 247
254 if (!phba) return 0;
255
256 if (phba->fc_flag & FC_OFFLINE_MODE) 248 if (phba->fc_flag & FC_OFFLINE_MODE)
257 return snprintf(buf, PAGE_SIZE, "0\n"); 249 return snprintf(buf, PAGE_SIZE, "0\n");
258 else 250 else
@@ -269,7 +261,7 @@ lpfc_board_online_store(struct class_device *cdev, const char *buf,
269 int val=0, status=0; 261 int val=0, status=0;
270 262
271 if (sscanf(buf, "%d", &val) != 1) 263 if (sscanf(buf, "%d", &val) != 1)
272 return 0; 264 return -EINVAL;
273 265
274 init_completion(&online_compl); 266 init_completion(&online_compl);
275 267
@@ -283,7 +275,7 @@ lpfc_board_online_store(struct class_device *cdev, const char *buf,
283 if (!status) 275 if (!status)
284 return strlen(buf); 276 return strlen(buf);
285 else 277 else
286 return 0; 278 return -EIO;
287} 279}
288 280
289 281
@@ -294,47 +286,83 @@ lpfc_##attr##_show(struct class_device *cdev, char *buf) \
294 struct Scsi_Host *host = class_to_shost(cdev);\ 286 struct Scsi_Host *host = class_to_shost(cdev);\
295 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\ 287 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
296 int val = 0;\ 288 int val = 0;\
297 if (phba){\ 289 val = phba->cfg_##attr;\
298 val = phba->cfg_##attr;\ 290 return snprintf(buf, PAGE_SIZE, "%d\n",\
299 return snprintf(buf, PAGE_SIZE, "%d\n",\ 291 phba->cfg_##attr);\
300 phba->cfg_##attr);\ 292}
293
294#define lpfc_param_hex_show(attr) \
295static ssize_t \
296lpfc_##attr##_show(struct class_device *cdev, char *buf) \
297{ \
298 struct Scsi_Host *host = class_to_shost(cdev);\
299 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
300 int val = 0;\
301 val = phba->cfg_##attr;\
302 return snprintf(buf, PAGE_SIZE, "%#x\n",\
303 phba->cfg_##attr);\
304}
305
306#define lpfc_param_init(attr, default, minval, maxval) \
307static int \
308lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
309{ \
310 if (val >= minval && val <= maxval) {\
311 phba->cfg_##attr = val;\
312 return 0;\
301 }\ 313 }\
302 return 0;\ 314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
315 "%d:0449 lpfc_"#attr" attribute cannot be set to %d, "\
316 "allowed range is ["#minval", "#maxval"]\n", \
317 phba->brd_no, val); \
318 phba->cfg_##attr = default;\
319 return -EINVAL;\
303} 320}
304 321
305#define lpfc_param_store(attr, minval, maxval) \ 322#define lpfc_param_set(attr, default, minval, maxval) \
323static int \
324lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
325{ \
326 if (val >= minval && val <= maxval) {\
327 phba->cfg_##attr = val;\
328 return 0;\
329 }\
330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
331 "%d:0450 lpfc_"#attr" attribute cannot be set to %d, "\
332 "allowed range is ["#minval", "#maxval"]\n", \
333 phba->brd_no, val); \
334 return -EINVAL;\
335}
336
337#define lpfc_param_store(attr) \
306static ssize_t \ 338static ssize_t \
307lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \ 339lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
308{ \ 340{ \
309 struct Scsi_Host *host = class_to_shost(cdev);\ 341 struct Scsi_Host *host = class_to_shost(cdev);\
310 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\ 342 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
311 int val = 0;\ 343 int val=0;\
312 if (!isdigit(buf[0]))\ 344 if (!isdigit(buf[0]))\
313 return -EINVAL;\ 345 return -EINVAL;\
314 if (sscanf(buf, "0x%x", &val) != 1)\ 346 if (sscanf(buf, "%i", &val) != 1)\
315 if (sscanf(buf, "%d", &val) != 1)\ 347 return -EINVAL;\
316 return -EINVAL;\ 348 if (lpfc_##attr##_set(phba, val) == 0) \
317 if (phba){\ 349 return strlen(buf);\
318 if (val >= minval && val <= maxval) {\ 350 else \
319 phba->cfg_##attr = val;\ 351 return -EINVAL;\
320 return strlen(buf);\
321 }\
322 }\
323 return 0;\
324} 352}
325 353
326#define LPFC_ATTR_R_NOINIT(name, desc) \ 354#define LPFC_ATTR(name, defval, minval, maxval, desc) \
327extern int lpfc_##name;\ 355static int lpfc_##name = defval;\
328module_param(lpfc_##name, int, 0);\ 356module_param(lpfc_##name, int, 0);\
329MODULE_PARM_DESC(lpfc_##name, desc);\ 357MODULE_PARM_DESC(lpfc_##name, desc);\
330lpfc_param_show(name)\ 358lpfc_param_init(name, defval, minval, maxval)
331static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
332 359
333#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ 360#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
334static int lpfc_##name = defval;\ 361static int lpfc_##name = defval;\
335module_param(lpfc_##name, int, 0);\ 362module_param(lpfc_##name, int, 0);\
336MODULE_PARM_DESC(lpfc_##name, desc);\ 363MODULE_PARM_DESC(lpfc_##name, desc);\
337lpfc_param_show(name)\ 364lpfc_param_show(name)\
365lpfc_param_init(name, defval, minval, maxval)\
338static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) 366static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
339 367
340#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ 368#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
@@ -342,7 +370,28 @@ static int lpfc_##name = defval;\
342module_param(lpfc_##name, int, 0);\ 370module_param(lpfc_##name, int, 0);\
343MODULE_PARM_DESC(lpfc_##name, desc);\ 371MODULE_PARM_DESC(lpfc_##name, desc);\
344lpfc_param_show(name)\ 372lpfc_param_show(name)\
345lpfc_param_store(name, minval, maxval)\ 373lpfc_param_init(name, defval, minval, maxval)\
374lpfc_param_set(name, defval, minval, maxval)\
375lpfc_param_store(name)\
376static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
377 lpfc_##name##_show, lpfc_##name##_store)
378
379#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
380static int lpfc_##name = defval;\
381module_param(lpfc_##name, int, 0);\
382MODULE_PARM_DESC(lpfc_##name, desc);\
383lpfc_param_hex_show(name)\
384lpfc_param_init(name, defval, minval, maxval)\
385static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
386
387#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
388static int lpfc_##name = defval;\
389module_param(lpfc_##name, int, 0);\
390MODULE_PARM_DESC(lpfc_##name, desc);\
391lpfc_param_hex_show(name)\
392lpfc_param_init(name, defval, minval, maxval)\
393lpfc_param_set(name, defval, minval, maxval)\
394lpfc_param_store(name)\
346static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ 395static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
347 lpfc_##name##_show, lpfc_##name##_store) 396 lpfc_##name##_show, lpfc_##name##_store)
348 397
@@ -364,7 +413,6 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
364 NULL); 413 NULL);
365static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, 414static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
366 NULL); 415 NULL);
367static CLASS_DEVICE_ATTR(issue_lip, S_IWUSR, NULL, lpfc_issue_lip);
368static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR, 416static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
369 lpfc_board_online_show, lpfc_board_online_store); 417 lpfc_board_online_show, lpfc_board_online_store);
370 418
@@ -388,7 +436,7 @@ static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
388# LOG_LIBDFC 0x2000 LIBDFC events 436# LOG_LIBDFC 0x2000 LIBDFC events
389# LOG_ALL_MSG 0xffff LOG all messages 437# LOG_ALL_MSG 0xffff LOG all messages
390*/ 438*/
391LPFC_ATTR_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask"); 439LPFC_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
392 440
393/* 441/*
394# lun_queue_depth: This parameter is used to limit the number of outstanding 442# lun_queue_depth: This parameter is used to limit the number of outstanding
@@ -419,7 +467,7 @@ LPFC_ATTR_R(scan_down, 1, 0, 1,
419 467
420/* 468/*
421# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 469# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
422# until the timer expires. Value range is [0,255]. Default value is 20. 470# until the timer expires. Value range is [0,255]. Default value is 30.
423# NOTE: this MUST be less then the SCSI Layer command timeout - 1. 471# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
424*/ 472*/
425LPFC_ATTR_RW(nodev_tmo, 30, 0, 255, 473LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
@@ -475,14 +523,10 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
475# is 0. Default value of cr_count is 1. The cr_count feature is disabled if 523# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
476# cr_delay is set to 0. 524# cr_delay is set to 0.
477*/ 525*/
478static int lpfc_cr_delay = 0; 526LPFC_ATTR(cr_delay, 0, 0, 63, "A count of milliseconds after which an"
479module_param(lpfc_cr_delay, int , 0);
480MODULE_PARM_DESC(lpfc_cr_delay, "A count of milliseconds after which an "
481 "interrupt response is generated"); 527 "interrupt response is generated");
482 528
483static int lpfc_cr_count = 1; 529LPFC_ATTR(cr_count, 1, 1, 255, "A count of I/O completions after which an"
484module_param(lpfc_cr_count, int, 0);
485MODULE_PARM_DESC(lpfc_cr_count, "A count of I/O completions after which an "
486 "interrupt response is generated"); 530 "interrupt response is generated");
487 531
488/* 532/*
@@ -498,9 +542,7 @@ LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
498# Specifies the maximum number of ELS cmds we can have outstanding (for 542# Specifies the maximum number of ELS cmds we can have outstanding (for
499# discovery). Value range is [1,64]. Default value = 32. 543# discovery). Value range is [1,64]. Default value = 32.
500*/ 544*/
501static int lpfc_discovery_threads = 32; 545LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands"
502module_param(lpfc_discovery_threads, int, 0);
503MODULE_PARM_DESC(lpfc_discovery_threads, "Maximum number of ELS commands "
504 "during discovery"); 546 "during discovery");
505 547
506/* 548/*
@@ -537,7 +579,6 @@ struct class_device_attribute *lpfc_host_attrs[] = {
537 &class_device_attr_lpfc_max_luns, 579 &class_device_attr_lpfc_max_luns,
538 &class_device_attr_nport_evt_cnt, 580 &class_device_attr_nport_evt_cnt,
539 &class_device_attr_management_version, 581 &class_device_attr_management_version,
540 &class_device_attr_issue_lip,
541 &class_device_attr_board_online, 582 &class_device_attr_board_online,
542 NULL, 583 NULL,
543}; 584};
@@ -992,7 +1033,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
992 struct fc_host_statistics *hs = &phba->link_stats; 1033 struct fc_host_statistics *hs = &phba->link_stats;
993 LPFC_MBOXQ_t *pmboxq; 1034 LPFC_MBOXQ_t *pmboxq;
994 MAILBOX_t *pmb; 1035 MAILBOX_t *pmb;
995 int rc=0; 1036 int rc = 0;
996 1037
997 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1038 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
998 if (!pmboxq) 1039 if (!pmboxq)
@@ -1005,18 +1046,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
1005 pmboxq->context1 = NULL; 1046 pmboxq->context1 = NULL;
1006 1047
1007 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1048 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1008 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))){ 1049 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1009 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 1050 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1010 } else 1051 else
1011 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1052 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1012 1053
1013 if (rc != MBX_SUCCESS) { 1054 if (rc != MBX_SUCCESS) {
1014 if (pmboxq) { 1055 if (rc == MBX_TIMEOUT)
1015 if (rc == MBX_TIMEOUT) 1056 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1016 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1057 else
1017 else 1058 mempool_free(pmboxq, phba->mbox_mem_pool);
1018 mempool_free( pmboxq, phba->mbox_mem_pool);
1019 }
1020 return NULL; 1059 return NULL;
1021 } 1060 }
1022 1061
@@ -1027,24 +1066,22 @@ lpfc_get_stats(struct Scsi_Host *shost)
1027 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 1066 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
1028 hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256); 1067 hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256);
1029 1068
1030 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 1069 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1031 pmb->mbxCommand = MBX_READ_LNK_STAT; 1070 pmb->mbxCommand = MBX_READ_LNK_STAT;
1032 pmb->mbxOwner = OWN_HOST; 1071 pmb->mbxOwner = OWN_HOST;
1033 pmboxq->context1 = NULL; 1072 pmboxq->context1 = NULL;
1034 1073
1035 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1074 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1036 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) { 1075 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1037 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 1076 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1038 } else 1077 else
1039 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1078 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1040 1079
1041 if (rc != MBX_SUCCESS) { 1080 if (rc != MBX_SUCCESS) {
1042 if (pmboxq) { 1081 if (rc == MBX_TIMEOUT)
1043 if (rc == MBX_TIMEOUT) 1082 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1044 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1083 else
1045 else 1084 mempool_free( pmboxq, phba->mbox_mem_pool);
1046 mempool_free( pmboxq, phba->mbox_mem_pool);
1047 }
1048 return NULL; 1085 return NULL;
1049 } 1086 }
1050 1087
@@ -1234,25 +1271,27 @@ struct fc_function_template lpfc_transport_functions = {
1234 1271
1235 .get_starget_port_name = lpfc_get_starget_port_name, 1272 .get_starget_port_name = lpfc_get_starget_port_name,
1236 .show_starget_port_name = 1, 1273 .show_starget_port_name = 1,
1274
1275 .issue_fc_host_lip = lpfc_issue_lip,
1237}; 1276};
1238 1277
1239void 1278void
1240lpfc_get_cfgparam(struct lpfc_hba *phba) 1279lpfc_get_cfgparam(struct lpfc_hba *phba)
1241{ 1280{
1242 phba->cfg_log_verbose = lpfc_log_verbose; 1281 lpfc_log_verbose_init(phba, lpfc_log_verbose);
1243 phba->cfg_cr_delay = lpfc_cr_delay; 1282 lpfc_cr_delay_init(phba, lpfc_cr_delay);
1244 phba->cfg_cr_count = lpfc_cr_count; 1283 lpfc_cr_count_init(phba, lpfc_cr_count);
1245 phba->cfg_lun_queue_depth = lpfc_lun_queue_depth; 1284 lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
1246 phba->cfg_fcp_class = lpfc_fcp_class; 1285 lpfc_fcp_class_init(phba, lpfc_fcp_class);
1247 phba->cfg_use_adisc = lpfc_use_adisc; 1286 lpfc_use_adisc_init(phba, lpfc_use_adisc);
1248 phba->cfg_ack0 = lpfc_ack0; 1287 lpfc_ack0_init(phba, lpfc_ack0);
1249 phba->cfg_topology = lpfc_topology; 1288 lpfc_topology_init(phba, lpfc_topology);
1250 phba->cfg_scan_down = lpfc_scan_down; 1289 lpfc_scan_down_init(phba, lpfc_scan_down);
1251 phba->cfg_nodev_tmo = lpfc_nodev_tmo; 1290 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1252 phba->cfg_link_speed = lpfc_link_speed; 1291 lpfc_link_speed_init(phba, lpfc_link_speed);
1253 phba->cfg_fdmi_on = lpfc_fdmi_on; 1292 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
1254 phba->cfg_discovery_threads = lpfc_discovery_threads; 1293 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1255 phba->cfg_max_luns = lpfc_max_luns; 1294 lpfc_max_luns_init(phba, lpfc_max_luns);
1256 1295
1257 /* 1296 /*
1258 * The total number of segments is the configuration value plus 2 1297 * The total number of segments is the configuration value plus 2
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd5135d3eee4..d527d05a607f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -62,10 +62,6 @@ void lpfc_disc_timeout(unsigned long);
62void lpfc_scan_timeout(unsigned long); 62void lpfc_scan_timeout(unsigned long);
63 63
64struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); 64struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
65struct lpfc_nodelist *lpfc_findnode_remove_rpi(struct lpfc_hba * phba,
66 uint16_t rpi);
67void lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
68 uint16_t rpi);
69 65
70int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 66int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
71int lpfc_do_work(void *); 67int lpfc_do_work(void *);
@@ -147,6 +143,9 @@ LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
147int lpfc_mem_alloc(struct lpfc_hba *); 143int lpfc_mem_alloc(struct lpfc_hba *);
148void lpfc_mem_free(struct lpfc_hba *); 144void lpfc_mem_free(struct lpfc_hba *);
149 145
146struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
147void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
148uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
150int lpfc_sli_hba_setup(struct lpfc_hba *); 149int lpfc_sli_hba_setup(struct lpfc_hba *);
151int lpfc_sli_hba_down(struct lpfc_hba *); 150int lpfc_sli_hba_down(struct lpfc_hba *);
152int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 151int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -182,15 +181,11 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order,
182int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 181int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
183 uint32_t timeout); 182 uint32_t timeout);
184 183
185int lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba, 184int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
186 struct lpfc_sli_ring * pring, 185 struct lpfc_sli_ring * pring,
187 struct lpfc_iocbq * piocb, 186 struct lpfc_iocbq * piocb,
188 uint32_t flag, 187 struct lpfc_iocbq * prspiocbq,
189 struct lpfc_iocbq * prspiocbq, 188 uint32_t timeout);
190 uint32_t timeout);
191void lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
192 struct lpfc_iocbq * queue1,
193 struct lpfc_iocbq * queue2);
194void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, 189void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
195 struct lpfc_iocbq * cmdiocb, 190 struct lpfc_iocbq * cmdiocb,
196 struct lpfc_iocbq * rspiocb); 191 struct lpfc_iocbq * rspiocb);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1280f0e54636..7f427f9c4688 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -224,18 +224,16 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
224 224
225 struct lpfc_sli *psli = &phba->sli; 225 struct lpfc_sli *psli = &phba->sli;
226 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 226 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
227 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
228 IOCB_t *icmd; 227 IOCB_t *icmd;
229 struct lpfc_iocbq *geniocb = NULL; 228 struct lpfc_iocbq *geniocb;
230 229
231 /* Allocate buffer for command iocb */ 230 /* Allocate buffer for command iocb */
232 spin_lock_irq(phba->host->host_lock); 231 spin_lock_irq(phba->host->host_lock);
233 list_remove_head(lpfc_iocb_list, geniocb, struct lpfc_iocbq, list); 232 geniocb = lpfc_sli_get_iocbq(phba);
234 spin_unlock_irq(phba->host->host_lock); 233 spin_unlock_irq(phba->host->host_lock);
235 234
236 if (geniocb == NULL) 235 if (geniocb == NULL)
237 return 1; 236 return 1;
238 memset(geniocb, 0, sizeof (struct lpfc_iocbq));
239 237
240 icmd = &geniocb->iocb; 238 icmd = &geniocb->iocb;
241 icmd->un.genreq64.bdl.ulpIoTag32 = 0; 239 icmd->un.genreq64.bdl.ulpIoTag32 = 0;
@@ -279,7 +277,7 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
279 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 277 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
280 spin_lock_irq(phba->host->host_lock); 278 spin_lock_irq(phba->host->host_lock);
281 if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) { 279 if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
282 list_add_tail(&geniocb->list, lpfc_iocb_list); 280 lpfc_sli_release_iocbq(phba, geniocb);
283 spin_unlock_irq(phba->host->host_lock); 281 spin_unlock_irq(phba->host->host_lock);
284 return 1; 282 return 1;
285 } 283 }
@@ -487,7 +485,7 @@ out:
487 kfree(inp); 485 kfree(inp);
488 kfree(bmp); 486 kfree(bmp);
489 spin_lock_irq(phba->host->host_lock); 487 spin_lock_irq(phba->host->host_lock);
490 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 488 lpfc_sli_release_iocbq(phba, cmdiocb);
491 spin_unlock_irq(phba->host->host_lock); 489 spin_unlock_irq(phba->host->host_lock);
492 return; 490 return;
493} 491}
@@ -526,7 +524,7 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
526 kfree(inp); 524 kfree(inp);
527 kfree(bmp); 525 kfree(bmp);
528 spin_lock_irq(phba->host->host_lock); 526 spin_lock_irq(phba->host->host_lock);
529 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 527 lpfc_sli_release_iocbq(phba, cmdiocb);
530 spin_unlock_irq(phba->host->host_lock); 528 spin_unlock_irq(phba->host->host_lock);
531 return; 529 return;
532} 530}
@@ -735,7 +733,7 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
735 kfree(inp); 733 kfree(inp);
736 kfree(bmp); 734 kfree(bmp);
737 spin_lock_irq(phba->host->host_lock); 735 spin_lock_irq(phba->host->host_lock);
738 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 736 lpfc_sli_release_iocbq(phba, cmdiocb);
739 spin_unlock_irq(phba->host->host_lock); 737 spin_unlock_irq(phba->host->host_lock);
740 return; 738 return;
741} 739}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 098b8b45c7f1..084e7628ce17 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -70,7 +70,6 @@ struct lpfc_nodelist {
70 struct timer_list nlp_tmofunc; /* Used for nodev tmo */ 70 struct timer_list nlp_tmofunc; /* Used for nodev tmo */
71 struct fc_rport *rport; /* Corresponding FC transport 71 struct fc_rport *rport; /* Corresponding FC transport
72 port structure */ 72 port structure */
73 struct lpfc_nodelist *nlp_rpi_hash_next;
74 struct lpfc_hba *nlp_phba; 73 struct lpfc_hba *nlp_phba;
75 struct lpfc_work_evt nodev_timeout_evt; 74 struct lpfc_work_evt nodev_timeout_evt;
76 struct lpfc_work_evt els_retry_evt; 75 struct lpfc_work_evt els_retry_evt;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 63caf7fe9725..08a0c00cfc30 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -102,9 +102,8 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba,
102 uint16_t cmdSize, 102 uint16_t cmdSize,
103 uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd) 103 uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
104{ 104{
105 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
106 struct lpfc_sli_ring *pring; 105 struct lpfc_sli_ring *pring;
107 struct lpfc_iocbq *elsiocb = NULL; 106 struct lpfc_iocbq *elsiocb;
108 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 107 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
109 struct ulp_bde64 *bpl; 108 struct ulp_bde64 *bpl;
110 IOCB_t *icmd; 109 IOCB_t *icmd;
@@ -114,15 +113,13 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba,
114 if (phba->hba_state < LPFC_LINK_UP) 113 if (phba->hba_state < LPFC_LINK_UP)
115 return NULL; 114 return NULL;
116 115
117
118 /* Allocate buffer for command iocb */ 116 /* Allocate buffer for command iocb */
119 spin_lock_irq(phba->host->host_lock); 117 spin_lock_irq(phba->host->host_lock);
120 list_remove_head(lpfc_iocb_list, elsiocb, struct lpfc_iocbq, list); 118 elsiocb = lpfc_sli_get_iocbq(phba);
121 spin_unlock_irq(phba->host->host_lock); 119 spin_unlock_irq(phba->host->host_lock);
122 120
123 if (elsiocb == NULL) 121 if (elsiocb == NULL)
124 return NULL; 122 return NULL;
125 memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
126 icmd = &elsiocb->iocb; 123 icmd = &elsiocb->iocb;
127 124
128 /* fill in BDEs for command */ 125 /* fill in BDEs for command */
@@ -133,7 +130,9 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba,
133 if (pcmd) 130 if (pcmd)
134 kfree(pcmd); 131 kfree(pcmd);
135 132
136 list_add_tail(&elsiocb->list, lpfc_iocb_list); 133 spin_lock_irq(phba->host->host_lock);
134 lpfc_sli_release_iocbq(phba, elsiocb);
135 spin_unlock_irq(phba->host->host_lock);
137 return NULL; 136 return NULL;
138 } 137 }
139 138
@@ -150,7 +149,9 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba,
150 kfree(prsp); 149 kfree(prsp);
151 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 150 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
152 kfree(pcmd); 151 kfree(pcmd);
153 list_add_tail(&elsiocb->list, lpfc_iocb_list); 152 spin_lock_irq(phba->host->host_lock);
153 lpfc_sli_release_iocbq(phba, elsiocb);
154 spin_unlock_irq(phba->host->host_lock);
154 return NULL; 155 return NULL;
155 } 156 }
156 INIT_LIST_HEAD(&prsp->list); 157 INIT_LIST_HEAD(&prsp->list);
@@ -164,7 +165,9 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba,
164 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 165 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
165 &pbuflist->phys); 166 &pbuflist->phys);
166 if (pbuflist == 0 || pbuflist->virt == 0) { 167 if (pbuflist == 0 || pbuflist->virt == 0) {
167 list_add_tail(&elsiocb->list, lpfc_iocb_list); 168 spin_lock_irq(phba->host->host_lock);
169 lpfc_sli_release_iocbq(phba, elsiocb);
170 spin_unlock_irq(phba->host->host_lock);
168 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 171 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
169 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 172 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
170 kfree(pcmd); 173 kfree(pcmd);
@@ -596,10 +599,8 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
596 spin_unlock_irq(phba->host->host_lock); 599 spin_unlock_irq(phba->host->host_lock);
597 (iocb->iocb_cmpl) (phba, iocb, iocb); 600 (iocb->iocb_cmpl) (phba, iocb, iocb);
598 spin_lock_irq(phba->host->host_lock); 601 spin_lock_irq(phba->host->host_lock);
599 } else { 602 } else
600 list_add_tail(&iocb->list, 603 lpfc_sli_release_iocbq(phba, iocb);
601 &phba->lpfc_iocb_list);
602 }
603 } 604 }
604 } 605 }
605 } 606 }
@@ -1713,7 +1714,7 @@ lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
1713 kfree(buf_ptr); 1714 kfree(buf_ptr);
1714 } 1715 }
1715 spin_lock_irq(phba->host->host_lock); 1716 spin_lock_irq(phba->host->host_lock);
1716 list_add_tail(&elsiocb->list, &phba->lpfc_iocb_list); 1717 lpfc_sli_release_iocbq(phba, elsiocb);
1717 spin_unlock_irq(phba->host->host_lock); 1718 spin_unlock_irq(phba->host->host_lock);
1718 return 0; 1719 return 0;
1719} 1720}
@@ -2929,9 +2930,8 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
2929 spin_unlock_irq(phba->host->host_lock); 2930 spin_unlock_irq(phba->host->host_lock);
2930 (piocb->iocb_cmpl) (phba, piocb, piocb); 2931 (piocb->iocb_cmpl) (phba, piocb, piocb);
2931 spin_lock_irq(phba->host->host_lock); 2932 spin_lock_irq(phba->host->host_lock);
2932 } else { 2933 } else
2933 list_add_tail(&piocb->list, &phba->lpfc_iocb_list); 2934 lpfc_sli_release_iocbq(phba, piocb);
2934 }
2935 } 2935 }
2936 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { 2936 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
2937 phba->els_tmofunc.expires = jiffies + HZ * timeout; 2937 phba->els_tmofunc.expires = jiffies + HZ * timeout;
@@ -2996,7 +2996,7 @@ lpfc_els_flush_cmd(struct lpfc_hba * phba)
2996 spin_lock_irq(phba->host->host_lock); 2996 spin_lock_irq(phba->host->host_lock);
2997 } 2997 }
2998 else 2998 else
2999 list_add_tail(&piocb->list, &phba->lpfc_iocb_list); 2999 lpfc_sli_release_iocbq(phba, piocb);
3000 } 3000 }
3001 3001
3002 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3002 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -3033,7 +3033,7 @@ lpfc_els_flush_cmd(struct lpfc_hba * phba)
3033 spin_lock_irq(phba->host->host_lock); 3033 spin_lock_irq(phba->host->host_lock);
3034 } 3034 }
3035 else 3035 else
3036 list_add_tail(&piocb->list, &phba->lpfc_iocb_list); 3036 lpfc_sli_release_iocbq(phba, piocb);
3037 } 3037 }
3038 spin_unlock_irq(phba->host->host_lock); 3038 spin_unlock_irq(phba->host->host_lock);
3039 return; 3039 return;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 56052f4510c3..259eeb161b82 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -890,10 +890,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
890 890
891 pmb->context1 = NULL; 891 pmb->context1 = NULL;
892 892
893 if (ndlp->nlp_rpi != 0)
894 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
895 ndlp->nlp_rpi = mb->un.varWords[0]; 893 ndlp->nlp_rpi = mb->un.varWords[0];
896 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
897 ndlp->nlp_type |= NLP_FABRIC; 894 ndlp->nlp_type |= NLP_FABRIC;
898 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 895 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
899 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 896 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
@@ -981,10 +978,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
981 978
982 pmb->context1 = NULL; 979 pmb->context1 = NULL;
983 980
984 if (ndlp->nlp_rpi != 0)
985 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
986 ndlp->nlp_rpi = mb->un.varWords[0]; 981 ndlp->nlp_rpi = mb->un.varWords[0];
987 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
988 ndlp->nlp_type |= NLP_FABRIC; 982 ndlp->nlp_type |= NLP_FABRIC;
989 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 983 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
990 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 984 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
@@ -1028,6 +1022,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1028 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 1022 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1029 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 1023 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1030 1024
1025 scsi_block_requests(phba->host);
1031 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); 1026 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1032 if (!rport) { 1027 if (!rport) {
1033 dev_printk(KERN_WARNING, &phba->pcidev->dev, 1028 dev_printk(KERN_WARNING, &phba->pcidev->dev,
@@ -1044,6 +1039,23 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1044 } 1039 }
1045 rdata = rport->dd_data; 1040 rdata = rport->dd_data;
1046 rdata->pnode = ndlp; 1041 rdata->pnode = ndlp;
1042 scsi_unblock_requests(phba->host);
1043
1044 return;
1045}
1046
1047static void
1048lpfc_unregister_remote_port(struct lpfc_hba * phba,
1049 struct lpfc_nodelist * ndlp)
1050{
1051 struct fc_rport *rport = ndlp->rport;
1052 struct lpfc_rport_data *rdata = rport->dd_data;
1053
1054 ndlp->rport = NULL;
1055 rdata->pnode = NULL;
1056 scsi_block_requests(phba->host);
1057 fc_remote_port_delete(rport);
1058 scsi_unblock_requests(phba->host);
1047 1059
1048 return; 1060 return;
1049} 1061}
@@ -1260,7 +1272,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1260 * may have removed the remote port. 1272 * may have removed the remote port.
1261 */ 1273 */
1262 if ((rport_del != none) && nlp->rport) 1274 if ((rport_del != none) && nlp->rport)
1263 fc_remote_port_block(nlp->rport); 1275 lpfc_unregister_remote_port(phba, nlp);
1264 1276
1265 if (rport_add != none) { 1277 if (rport_add != none) {
1266 /* 1278 /*
@@ -1270,8 +1282,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1270 */ 1282 */
1271 if (!nlp->rport) 1283 if (!nlp->rport)
1272 lpfc_register_remote_port(phba, nlp); 1284 lpfc_register_remote_port(phba, nlp);
1273 else
1274 fc_remote_port_unblock(nlp->rport);
1275 1285
1276 /* 1286 /*
1277 * if we added to Mapped list, but the remote port 1287 * if we added to Mapped list, but the remote port
@@ -1435,10 +1445,9 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1435 iocb, iocb); 1445 iocb, iocb);
1436 spin_lock_irq(phba->host-> 1446 spin_lock_irq(phba->host->
1437 host_lock); 1447 host_lock);
1438 } else { 1448 } else
1439 list_add_tail(&iocb->list, 1449 lpfc_sli_release_iocbq(phba,
1440 &phba->lpfc_iocb_list); 1450 iocb);
1441 }
1442 } 1451 }
1443 } 1452 }
1444 spin_unlock_irq(phba->host->host_lock); 1453 spin_unlock_irq(phba->host->host_lock);
@@ -1472,7 +1481,6 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1472 if (rc == MBX_NOT_FINISHED) 1481 if (rc == MBX_NOT_FINISHED)
1473 mempool_free( mbox, phba->mbox_mem_pool); 1482 mempool_free( mbox, phba->mbox_mem_pool);
1474 } 1483 }
1475 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1476 lpfc_no_rpi(phba, ndlp); 1484 lpfc_no_rpi(phba, ndlp);
1477 ndlp->nlp_rpi = 0; 1485 ndlp->nlp_rpi = 0;
1478 return 1; 1486 return 1;
@@ -1490,7 +1498,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1490 LPFC_MBOXQ_t *mb; 1498 LPFC_MBOXQ_t *mb;
1491 LPFC_MBOXQ_t *nextmb; 1499 LPFC_MBOXQ_t *nextmb;
1492 struct lpfc_dmabuf *mp; 1500 struct lpfc_dmabuf *mp;
1493 struct fc_rport *rport;
1494 1501
1495 /* Cleanup node for NPort <nlp_DID> */ 1502 /* Cleanup node for NPort <nlp_DID> */
1496 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1503 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
@@ -1507,10 +1514,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1507 * and flush cache's w/o generating flush errors. 1514 * and flush cache's w/o generating flush errors.
1508 */ 1515 */
1509 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) { 1516 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1510 rport = ndlp->rport; 1517 lpfc_unregister_remote_port(phba, ndlp);
1511 ndlp->rport = NULL;
1512 fc_remote_port_unblock(rport);
1513 fc_remote_port_delete(rport);
1514 ndlp->nlp_sid = NLP_NO_SID; 1518 ndlp->nlp_sid = NLP_NO_SID;
1515 } 1519 }
1516 1520
@@ -2422,10 +2426,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2422 2426
2423 pmb->context1 = NULL; 2427 pmb->context1 = NULL;
2424 2428
2425 if (ndlp->nlp_rpi != 0)
2426 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
2427 ndlp->nlp_rpi = mb->un.varWords[0]; 2429 ndlp->nlp_rpi = mb->un.varWords[0];
2428 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
2429 ndlp->nlp_type |= NLP_FABRIC; 2430 ndlp->nlp_type |= NLP_FABRIC;
2430 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE; 2431 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2431 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST); 2432 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
@@ -2451,75 +2452,28 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2451} 2452}
2452 2453
2453/* 2454/*
2454 * This routine looks up the ndlp hash 2455 * This routine looks up the ndlp lists
2455 * table for the given RPI. If rpi found 2456 * for the given RPI. If rpi found
2456 * it return the node list pointer 2457 * it return the node list pointer
2457 * else return 0. 2458 * else return NULL.
2458 */ 2459 */
2459struct lpfc_nodelist * 2460struct lpfc_nodelist *
2460lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi) 2461lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2461{ 2462{
2462 struct lpfc_nodelist *ret; 2463 struct lpfc_nodelist *ndlp;
2463 2464 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2464 ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)]; 2465 &phba->fc_nlpmap_list,
2465 while ((ret != 0) && (ret->nlp_rpi != rpi)) { 2466 &phba->fc_plogi_list,
2466 ret = ret->nlp_rpi_hash_next; 2467 &phba->fc_adisc_list,
2467 } 2468 &phba->fc_reglogin_list};
2468 return ret; 2469 int i;
2469}
2470
2471/*
2472 * This routine looks up the ndlp hash table for the
2473 * given RPI. If rpi found it return the node list
2474 * pointer else return 0 after deleting the entry
2475 * from hash table.
2476 */
2477struct lpfc_nodelist *
2478lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
2479{
2480 struct lpfc_nodelist *ret, *temp;;
2481
2482 ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2483 if (ret == 0)
2484 return NULL;
2485
2486 if (ret->nlp_rpi == rpi) {
2487 phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
2488 ret->nlp_rpi_hash_next;
2489 ret->nlp_rpi_hash_next = NULL;
2490 return ret;
2491 }
2492
2493 while ((ret->nlp_rpi_hash_next != 0) &&
2494 (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
2495 ret = ret->nlp_rpi_hash_next;
2496 }
2497
2498 if (ret->nlp_rpi_hash_next != 0) {
2499 temp = ret->nlp_rpi_hash_next;
2500 ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
2501 temp->nlp_rpi_hash_next = NULL;
2502 return temp;
2503 } else {
2504 return NULL;
2505 }
2506}
2507
2508/*
2509 * This routine adds the node list entry to the
2510 * ndlp hash table.
2511 */
2512void
2513lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2514 uint16_t rpi)
2515{
2516 2470
2517 uint32_t index; 2471 for (i = 0; i < ARRAY_SIZE(lists); i++ )
2472 list_for_each_entry(ndlp, lists[i], nlp_listp)
2473 if (ndlp->nlp_rpi == rpi)
2474 return (ndlp);
2518 2475
2519 index = LPFC_RPI_HASH_FUNC(rpi); 2476 return NULL;
2520 ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
2521 phba->fc_nlplookup[index] = ndlp;
2522 return;
2523} 2477}
2524 2478
2525void 2479void
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0856ff7d3b33..4e04470321a2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -537,12 +537,6 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
537 537
538 lpfc_offline(phba); 538 lpfc_offline(phba);
539 539
540 /*
541 * Restart all traffic to this host. Since the fc_transport
542 * block functions (future) were not called in lpfc_offline,
543 * don't call them here.
544 */
545 scsi_unblock_requests(phba->host);
546 } 540 }
547} 541}
548 542
@@ -772,10 +766,12 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
772{ 766{
773 lpfc_vpd_t *vp; 767 lpfc_vpd_t *vp;
774 uint32_t id; 768 uint32_t id;
769 uint8_t hdrtype;
775 char str[16]; 770 char str[16];
776 771
777 vp = &phba->vpd; 772 vp = &phba->vpd;
778 pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id); 773 pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
774 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
779 775
780 switch ((id >> 16) & 0xffff) { 776 switch ((id >> 16) & 0xffff) {
781 case PCI_DEVICE_ID_FIREFLY: 777 case PCI_DEVICE_ID_FIREFLY:
@@ -803,7 +799,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
803 strcpy(str, "LP9802 2"); 799 strcpy(str, "LP9802 2");
804 break; 800 break;
805 case PCI_DEVICE_ID_THOR: 801 case PCI_DEVICE_ID_THOR:
806 strcpy(str, "LP10000 2"); 802 if (hdrtype == 0x80)
803 strcpy(str, "LP10000DC 2");
804 else
805 strcpy(str, "LP10000 2");
807 break; 806 break;
808 case PCI_DEVICE_ID_VIPER: 807 case PCI_DEVICE_ID_VIPER:
809 strcpy(str, "LPX1000 10"); 808 strcpy(str, "LPX1000 10");
@@ -812,10 +811,16 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
812 strcpy(str, "LP982 2"); 811 strcpy(str, "LP982 2");
813 break; 812 break;
814 case PCI_DEVICE_ID_TFLY: 813 case PCI_DEVICE_ID_TFLY:
815 strcpy(str, "LP1050 2"); 814 if (hdrtype == 0x80)
815 strcpy(str, "LP1050DC 2");
816 else
817 strcpy(str, "LP1050 2");
816 break; 818 break;
817 case PCI_DEVICE_ID_HELIOS: 819 case PCI_DEVICE_ID_HELIOS:
818 strcpy(str, "LP11000 4"); 820 if (hdrtype == 0x80)
821 strcpy(str, "LP11002 4");
822 else
823 strcpy(str, "LP11000 4");
819 break; 824 break;
820 case PCI_DEVICE_ID_BMID: 825 case PCI_DEVICE_ID_BMID:
821 strcpy(str, "LP1150 4"); 826 strcpy(str, "LP1150 4");
@@ -824,13 +829,16 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
824 strcpy(str, "LP111 4"); 829 strcpy(str, "LP111 4");
825 break; 830 break;
826 case PCI_DEVICE_ID_ZEPHYR: 831 case PCI_DEVICE_ID_ZEPHYR:
827 strcpy(str, "LP11000e 4"); 832 if (hdrtype == 0x80)
833 strcpy(str, "LPe11002 4");
834 else
835 strcpy(str, "LPe11000 4");
828 break; 836 break;
829 case PCI_DEVICE_ID_ZMID: 837 case PCI_DEVICE_ID_ZMID:
830 strcpy(str, "LP1150e 4"); 838 strcpy(str, "LPe1150 4");
831 break; 839 break;
832 case PCI_DEVICE_ID_ZSMB: 840 case PCI_DEVICE_ID_ZSMB:
833 strcpy(str, "LP111e 4"); 841 strcpy(str, "LPe111 4");
834 break; 842 break;
835 case PCI_DEVICE_ID_LP101: 843 case PCI_DEVICE_ID_LP101:
836 strcpy(str, "LP101 2"); 844 strcpy(str, "LP101 2");
@@ -862,8 +870,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
862 int type) 870 int type)
863{ 871{
864 IOCB_t *icmd; 872 IOCB_t *icmd;
865 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 873 struct lpfc_iocbq *iocb;
866 struct lpfc_iocbq *iocb = NULL;
867 struct lpfc_dmabuf *mp1, *mp2; 874 struct lpfc_dmabuf *mp1, *mp2;
868 875
869 cnt += pring->missbufcnt; 876 cnt += pring->missbufcnt;
@@ -872,13 +879,12 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
872 while (cnt > 0) { 879 while (cnt > 0) {
873 /* Allocate buffer for command iocb */ 880 /* Allocate buffer for command iocb */
874 spin_lock_irq(phba->host->host_lock); 881 spin_lock_irq(phba->host->host_lock);
875 list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list); 882 iocb = lpfc_sli_get_iocbq(phba);
876 spin_unlock_irq(phba->host->host_lock); 883 spin_unlock_irq(phba->host->host_lock);
877 if (iocb == NULL) { 884 if (iocb == NULL) {
878 pring->missbufcnt = cnt; 885 pring->missbufcnt = cnt;
879 return cnt; 886 return cnt;
880 } 887 }
881 memset(iocb, 0, sizeof (struct lpfc_iocbq));
882 icmd = &iocb->iocb; 888 icmd = &iocb->iocb;
883 889
884 /* 2 buffers can be posted per command */ 890 /* 2 buffers can be posted per command */
@@ -891,7 +897,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
891 if (mp1) 897 if (mp1)
892 kfree(mp1); 898 kfree(mp1);
893 spin_lock_irq(phba->host->host_lock); 899 spin_lock_irq(phba->host->host_lock);
894 list_add_tail(&iocb->list, lpfc_iocb_list); 900 lpfc_sli_release_iocbq(phba, iocb);
895 spin_unlock_irq(phba->host->host_lock); 901 spin_unlock_irq(phba->host->host_lock);
896 pring->missbufcnt = cnt; 902 pring->missbufcnt = cnt;
897 return cnt; 903 return cnt;
@@ -910,7 +916,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
910 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 916 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
911 kfree(mp1); 917 kfree(mp1);
912 spin_lock_irq(phba->host->host_lock); 918 spin_lock_irq(phba->host->host_lock);
913 list_add_tail(&iocb->list, lpfc_iocb_list); 919 lpfc_sli_release_iocbq(phba, iocb);
914 spin_unlock_irq(phba->host->host_lock); 920 spin_unlock_irq(phba->host->host_lock);
915 pring->missbufcnt = cnt; 921 pring->missbufcnt = cnt;
916 return cnt; 922 return cnt;
@@ -947,7 +953,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
947 kfree(mp2); 953 kfree(mp2);
948 cnt++; 954 cnt++;
949 } 955 }
950 list_add_tail(&iocb->list, lpfc_iocb_list); 956 lpfc_sli_release_iocbq(phba, iocb);
951 pring->missbufcnt = cnt; 957 pring->missbufcnt = cnt;
952 spin_unlock_irq(phba->host->host_lock); 958 spin_unlock_irq(phba->host->host_lock);
953 return cnt; 959 return cnt;
@@ -1226,12 +1232,6 @@ lpfc_online(struct lpfc_hba * phba)
1226 phba->fc_flag &= ~FC_OFFLINE_MODE; 1232 phba->fc_flag &= ~FC_OFFLINE_MODE;
1227 spin_unlock_irq(phba->host->host_lock); 1233 spin_unlock_irq(phba->host->host_lock);
1228 1234
1229 /*
1230 * Restart all traffic to this host. Since the fc_transport block
1231 * functions (future) were not called in lpfc_offline, don't call them
1232 * here.
1233 */
1234 scsi_unblock_requests(phba->host);
1235 return 0; 1235 return 0;
1236} 1236}
1237 1237
@@ -1249,13 +1249,6 @@ lpfc_offline(struct lpfc_hba * phba)
1249 if (phba->fc_flag & FC_OFFLINE_MODE) 1249 if (phba->fc_flag & FC_OFFLINE_MODE)
1250 return 0; 1250 return 0;
1251 1251
1252 /*
1253 * Don't call the fc_transport block api (future). The device is
1254 * going offline and causing a timer to fire in the midlayer is
1255 * unproductive. Just block all new requests until the driver
1256 * comes back online.
1257 */
1258 scsi_block_requests(phba->host);
1259 psli = &phba->sli; 1252 psli = &phba->sli;
1260 pring = &psli->ring[psli->fcp_ring]; 1253 pring = &psli->ring[psli->fcp_ring];
1261 1254
@@ -1333,6 +1326,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1333 unsigned long bar0map_len, bar2map_len; 1326 unsigned long bar0map_len, bar2map_len;
1334 int error = -ENODEV, retval; 1327 int error = -ENODEV, retval;
1335 int i; 1328 int i;
1329 uint16_t iotag;
1336 1330
1337 if (pci_enable_device(pdev)) 1331 if (pci_enable_device(pdev))
1338 goto out; 1332 goto out;
@@ -1434,6 +1428,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1434 if (!phba->slim2p) 1428 if (!phba->slim2p)
1435 goto out_iounmap; 1429 goto out_iounmap;
1436 1430
1431 memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
1437 1432
1438 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1433 /* Initialize the SLI Layer to run with lpfc HBAs. */
1439 lpfc_sli_setup(phba); 1434 lpfc_sli_setup(phba);
@@ -1456,6 +1451,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1456 } 1451 }
1457 1452
1458 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq)); 1453 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
1454 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
1455 if (iotag == 0) {
1456 kfree (iocbq_entry);
1457 printk(KERN_ERR "%s: failed to allocate IOTAG. "
1458 "Unloading driver.\n",
1459 __FUNCTION__);
1460 error = -ENOMEM;
1461 goto out_free_iocbq;
1462 }
1459 spin_lock_irq(phba->host->host_lock); 1463 spin_lock_irq(phba->host->host_lock);
1460 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1464 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1461 phba->total_iocbq_bufs++; 1465 phba->total_iocbq_bufs++;
@@ -1702,6 +1706,7 @@ MODULE_DEVICE_TABLE(pci, lpfc_id_table);
1702 1706
1703static struct pci_driver lpfc_driver = { 1707static struct pci_driver lpfc_driver = {
1704 .name = LPFC_DRIVER_NAME, 1708 .name = LPFC_DRIVER_NAME,
1709 .owner = THIS_MODULE,
1705 .id_table = lpfc_id_table, 1710 .id_table = lpfc_id_table,
1706 .probe = lpfc_pci_probe_one, 1711 .probe = lpfc_pci_probe_one,
1707 .remove = __devexit_p(lpfc_pci_remove_one), 1712 .remove = __devexit_p(lpfc_pci_remove_one),
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 73eb89f91593..31c20cc00609 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -531,6 +531,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
531 size_t offset; 531 size_t offset;
532 struct lpfc_hgp hgp; 532 struct lpfc_hgp hgp;
533 void __iomem *to_slim; 533 void __iomem *to_slim;
534 int i;
534 535
535 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
536 mb->mbxCommand = MBX_CONFIG_PORT; 537 mb->mbxCommand = MBX_CONFIG_PORT;
@@ -587,7 +588,11 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
587 /* write HGP data to SLIM at the required longword offset */ 588 /* write HGP data to SLIM at the required longword offset */
588 memset(&hgp, 0, sizeof(struct lpfc_hgp)); 589 memset(&hgp, 0, sizeof(struct lpfc_hgp));
589 to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t)); 590 to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
590 lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp)); 591
592 for (i=0; i < phba->sli.num_rings; i++) {
593 lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp));
594 to_slim += sizeof (struct lpfc_hgp);
595 }
591 596
592 /* Setup Port Group ring pointer */ 597 /* Setup Port Group ring pointer */
593 offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - 598 offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 9b35eaac781d..507a6af56f42 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -187,10 +187,8 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
187 spin_unlock_irq(phba->host->host_lock); 187 spin_unlock_irq(phba->host->host_lock);
188 (iocb->iocb_cmpl) (phba, iocb, iocb); 188 (iocb->iocb_cmpl) (phba, iocb, iocb);
189 spin_lock_irq(phba->host->host_lock); 189 spin_lock_irq(phba->host->host_lock);
190 } else { 190 } else
191 list_add_tail(&iocb->list, 191 lpfc_sli_release_iocbq(phba, iocb);
192 &phba->lpfc_iocb_list);
193 }
194 break; 192 break;
195 } 193 }
196 } 194 }
@@ -232,10 +230,8 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
232 spin_unlock_irq(phba->host->host_lock); 230 spin_unlock_irq(phba->host->host_lock);
233 (iocb->iocb_cmpl) (phba, iocb, iocb); 231 (iocb->iocb_cmpl) (phba, iocb, iocb);
234 spin_lock_irq(phba->host->host_lock); 232 spin_lock_irq(phba->host->host_lock);
235 } else { 233 } else
236 list_add_tail(&iocb->list, 234 lpfc_sli_release_iocbq(phba, iocb);
237 &phba->lpfc_iocb_list);
238 }
239 break; 235 break;
240 } 236 }
241 } 237 }
@@ -1086,11 +1082,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1086 return (ndlp->nlp_state); 1082 return (ndlp->nlp_state);
1087 } 1083 }
1088 1084
1089 if (ndlp->nlp_rpi != 0)
1090 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1091
1092 ndlp->nlp_rpi = mb->un.varWords[0]; 1085 ndlp->nlp_rpi = mb->un.varWords[0];
1093 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1094 1086
1095 /* Only if we are not a fabric nport do we issue PRLI */ 1087 /* Only if we are not a fabric nport do we issue PRLI */
1096 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1088 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1593,12 +1585,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1593 pmb = (LPFC_MBOXQ_t *) arg; 1585 pmb = (LPFC_MBOXQ_t *) arg;
1594 mb = &pmb->mb; 1586 mb = &pmb->mb;
1595 1587
1596 /* save rpi */
1597 if (ndlp->nlp_rpi != 0)
1598 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1599
1600 ndlp->nlp_rpi = mb->un.varWords[0]; 1588 ndlp->nlp_rpi = mb->un.varWords[0];
1601 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1602 1589
1603 return (ndlp->nlp_state); 1590 return (ndlp->nlp_state);
1604} 1591}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b5ad1871d34b..c34d3cf4f19c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,12 +50,13 @@
50 * and the BPL BDE is setup in the IOCB. 50 * and the BPL BDE is setup in the IOCB.
51 */ 51 */
52static struct lpfc_scsi_buf * 52static struct lpfc_scsi_buf *
53lpfc_get_scsi_buf(struct lpfc_hba * phba) 53lpfc_new_scsi_buf(struct lpfc_hba * phba)
54{ 54{
55 struct lpfc_scsi_buf *psb; 55 struct lpfc_scsi_buf *psb;
56 struct ulp_bde64 *bpl; 56 struct ulp_bde64 *bpl;
57 IOCB_t *iocb; 57 IOCB_t *iocb;
58 dma_addr_t pdma_phys; 58 dma_addr_t pdma_phys;
59 uint16_t iotag;
59 60
60 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 61 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
61 if (!psb) 62 if (!psb)
@@ -79,6 +80,16 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
79 /* Initialize virtual ptrs to dma_buf region. */ 80 /* Initialize virtual ptrs to dma_buf region. */
80 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 81 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
81 82
83 /* Allocate iotag for psb->cur_iocbq. */
84 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
85 if (iotag == 0) {
86 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
87 psb->data, psb->dma_handle);
88 kfree (psb);
89 return NULL;
90 }
91 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
92
82 psb->fcp_cmnd = psb->data; 93 psb->fcp_cmnd = psb->data;
83 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 94 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
84 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 95 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
@@ -125,11 +136,19 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
125 return psb; 136 return psb;
126} 137}
127 138
128static void 139struct lpfc_scsi_buf*
129lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb) 140lpfc_sli_get_scsi_buf(struct lpfc_hba * phba)
130{ 141{
131 struct lpfc_hba *phba = psb->scsi_hba; 142 struct lpfc_scsi_buf * lpfc_cmd = NULL;
143 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
144
145 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
146 return lpfc_cmd;
147}
132 148
149static void
150lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
151{
133 /* 152 /*
134 * There are only two special cases to consider. (1) the scsi command 153 * There are only two special cases to consider. (1) the scsi command
135 * requested scatter-gather usage or (2) the scsi command allocated 154 * requested scatter-gather usage or (2) the scsi command allocated
@@ -147,6 +166,7 @@ lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
147 } 166 }
148 } 167 }
149 168
169 psb->pCmd = NULL;
150 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 170 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
151} 171}
152 172
@@ -403,14 +423,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
403 break; 423 break;
404 } 424 }
405 425
406 if (pnode) { 426 if ((pnode == NULL )
407 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) 427 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
408 cmd->result = ScsiResult(DID_BUS_BUSY, 428 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
409 SAM_STAT_BUSY);
410 }
411 else {
412 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
413 }
414 } else { 429 } else {
415 cmd->result = ScsiResult(DID_OK, 0); 430 cmd->result = ScsiResult(DID_OK, 0);
416 } 431 }
@@ -426,12 +441,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
426 *lp, *(lp + 3), cmd->retries, cmd->resid); 441 *lp, *(lp + 3), cmd->retries, cmd->resid);
427 } 442 }
428 443
444 cmd->scsi_done(cmd);
445
429 spin_lock_irqsave(phba->host->host_lock, iflag); 446 spin_lock_irqsave(phba->host->host_lock, iflag);
430 lpfc_free_scsi_buf(lpfc_cmd); 447 lpfc_release_scsi_buf(phba, lpfc_cmd);
431 cmd->host_scribble = NULL;
432 spin_unlock_irqrestore(phba->host->host_lock, iflag); 448 spin_unlock_irqrestore(phba->host->host_lock, iflag);
433
434 cmd->scsi_done(cmd);
435} 449}
436 450
437static void 451static void
@@ -539,7 +553,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
539 struct lpfc_rport_data *rdata = scsi_dev->hostdata; 553 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
540 struct lpfc_nodelist *ndlp = rdata->pnode; 554 struct lpfc_nodelist *ndlp = rdata->pnode;
541 555
542 if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 556 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
543 return 0; 557 return 0;
544 } 558 }
545 559
@@ -618,8 +632,7 @@ static int
618lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 632lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
619{ 633{
620 struct lpfc_iocbq *iocbq; 634 struct lpfc_iocbq *iocbq;
621 struct lpfc_iocbq *iocbqrsp = NULL; 635 struct lpfc_iocbq *iocbqrsp;
622 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
623 int ret; 636 int ret;
624 637
625 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 638 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
@@ -628,17 +641,14 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
628 641
629 lpfc_cmd->scsi_hba = phba; 642 lpfc_cmd->scsi_hba = phba;
630 iocbq = &lpfc_cmd->cur_iocbq; 643 iocbq = &lpfc_cmd->cur_iocbq;
631 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list); 644 iocbqrsp = lpfc_sli_get_iocbq(phba);
645
632 if (!iocbqrsp) 646 if (!iocbqrsp)
633 return FAILED; 647 return FAILED;
634 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq)); 648
635 649 ret = lpfc_sli_issue_iocb_wait(phba,
636 iocbq->iocb_flag |= LPFC_IO_POLL; 650 &phba->sli.ring[phba->sli.fcp_ring],
637 ret = lpfc_sli_issue_iocb_wait_high_priority(phba, 651 iocbq, iocbqrsp, lpfc_cmd->timeout);
638 &phba->sli.ring[phba->sli.fcp_ring],
639 iocbq, SLI_IOCB_HIGH_PRIORITY,
640 iocbqrsp,
641 lpfc_cmd->timeout);
642 if (ret != IOCB_SUCCESS) { 652 if (ret != IOCB_SUCCESS) {
643 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 653 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
644 ret = FAILED; 654 ret = FAILED;
@@ -651,45 +661,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
651 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 661 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
652 } 662 }
653 663
654 /* 664 lpfc_sli_release_iocbq(phba, iocbqrsp);
655 * All outstanding txcmplq I/Os should have been aborted by the target.
656 * Unfortunately, some targets do not abide by this forcing the driver
657 * to double check.
658 */
659 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
660 lpfc_cmd->pCmd->device->id,
661 lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
662
663 /* Return response IOCB to free list. */
664 list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
665 return ret; 665 return ret;
666} 666}
667 667
668static void
669lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
670 struct lpfc_iocbq *pIocbOut)
671{
672 unsigned long iflag;
673 struct lpfc_scsi_buf *lpfc_cmd =
674 (struct lpfc_scsi_buf *) pIocbIn->context1;
675
676 spin_lock_irqsave(phba->host->host_lock, iflag);
677 lpfc_free_scsi_buf(lpfc_cmd);
678 spin_unlock_irqrestore(phba->host->host_lock, iflag);
679}
680
681static void
682lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
683 struct lpfc_iocbq *pIocbIn,
684 struct lpfc_iocbq *pIocbOut)
685{
686 struct scsi_cmnd *ml_cmd =
687 ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
688
689 lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
690 ml_cmd->host_scribble = NULL;
691}
692
693const char * 668const char *
694lpfc_info(struct Scsi_Host *host) 669lpfc_info(struct Scsi_Host *host)
695{ 670{
@@ -726,43 +701,25 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
726 struct lpfc_sli *psli = &phba->sli; 701 struct lpfc_sli *psli = &phba->sli;
727 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 702 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
728 struct lpfc_nodelist *ndlp = rdata->pnode; 703 struct lpfc_nodelist *ndlp = rdata->pnode;
729 struct lpfc_scsi_buf *lpfc_cmd = NULL; 704 struct lpfc_scsi_buf *lpfc_cmd;
730 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 705 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
731 int err = 0; 706 int err;
732 707
733 /* 708 err = fc_remote_port_chkready(rport);
734 * The target pointer is guaranteed not to be NULL because the driver 709 if (err) {
735 * only clears the device->hostdata field in lpfc_slave_destroy. This 710 cmnd->result = err;
736 * approach guarantees no further IO calls on this target.
737 */
738 if (!ndlp) {
739 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
740 goto out_fail_command; 711 goto out_fail_command;
741 } 712 }
742 713
743 /* 714 /*
744 * A Fibre Channel target is present and functioning only when the node 715 * Catch race where our node has transitioned, but the
745 * state is MAPPED. Any other state is a failure. 716 * transport is still transitioning.
746 */ 717 */
747 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 718 if (!ndlp) {
748 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 719 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
749 (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) { 720 goto out_fail_command;
750 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
751 goto out_fail_command;
752 }
753 else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
754 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
755 goto out_fail_command;
756 }
757 /*
758 * The device is most likely recovered and the driver
759 * needs a bit more time to finish. Ask the midlayer
760 * to retry.
761 */
762 goto out_host_busy;
763 } 721 }
764 722 lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
765 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
766 if (lpfc_cmd == NULL) { 723 if (lpfc_cmd == NULL) {
767 printk(KERN_WARNING "%s: No buffer available - list empty, " 724 printk(KERN_WARNING "%s: No buffer available - list empty, "
768 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs); 725 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
@@ -792,7 +749,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
792 return 0; 749 return 0;
793 750
794 out_host_busy_free_buf: 751 out_host_busy_free_buf:
795 lpfc_free_scsi_buf(lpfc_cmd); 752 lpfc_release_scsi_buf(phba, lpfc_cmd);
796 cmnd->host_scribble = NULL; 753 cmnd->host_scribble = NULL;
797 out_host_busy: 754 out_host_busy:
798 return SCSI_MLQUEUE_HOST_BUSY; 755 return SCSI_MLQUEUE_HOST_BUSY;
@@ -808,119 +765,92 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd)
808 struct lpfc_hba *phba = 765 struct lpfc_hba *phba =
809 (struct lpfc_hba *)cmnd->device->host->hostdata[0]; 766 (struct lpfc_hba *)cmnd->device->host->hostdata[0];
810 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 767 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
811 struct lpfc_iocbq *iocb, *next_iocb; 768 struct lpfc_iocbq *iocb;
812 struct lpfc_iocbq *abtsiocb = NULL; 769 struct lpfc_iocbq *abtsiocb;
813 struct lpfc_scsi_buf *lpfc_cmd; 770 struct lpfc_scsi_buf *lpfc_cmd;
814 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
815 IOCB_t *cmd, *icmd; 771 IOCB_t *cmd, *icmd;
816 unsigned long snum;
817 unsigned int id, lun;
818 unsigned int loop_count = 0; 772 unsigned int loop_count = 0;
819 int ret = IOCB_SUCCESS; 773 int ret = SUCCESS;
820 774
821 /*
822 * If the host_scribble data area is NULL, then the driver has already
823 * completed this command, but the midlayer did not see the completion
824 * before the eh fired. Just return SUCCESS.
825 */
826 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
827 if (!lpfc_cmd)
828 return SUCCESS;
829 775
830 /* save these now since lpfc_cmd can be freed */ 776 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
831 id = lpfc_cmd->pCmd->device->id; 777 BUG_ON(!lpfc_cmd);
832 lun = lpfc_cmd->pCmd->device->lun;
833 snum = lpfc_cmd->pCmd->serial_number;
834 778
835 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 779 /*
836 cmd = &iocb->iocb; 780 * If pCmd field of the corresponding lpfc_scsi_buf structure
837 if (iocb->context1 != lpfc_cmd) 781 * points to a different SCSI command, then the driver has
838 continue; 782 * already completed this command, but the midlayer did not
783 * see the completion before the eh fired. Just return
784 * SUCCESS.
785 */
786 iocb = &lpfc_cmd->cur_iocbq;
787 if (lpfc_cmd->pCmd != cmnd)
788 goto out;
839 789
840 list_del_init(&iocb->list); 790 BUG_ON(iocb->context1 != lpfc_cmd);
841 pring->txq_cnt--;
842 if (!iocb->iocb_cmpl) {
843 list_add_tail(&iocb->list, lpfc_iocb_list);
844 }
845 else {
846 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
847 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
848 lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
849 }
850 791
792 abtsiocb = lpfc_sli_get_iocbq(phba);
793 if (abtsiocb == NULL) {
794 ret = FAILED;
851 goto out; 795 goto out;
852 } 796 }
853 797
854 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
855 if (abtsiocb == NULL)
856 return FAILED;
857
858 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
859
860 /* 798 /*
861 * The scsi command was not in the txq. Check the txcmplq and if it is 799 * The scsi command can not be in txq and it is in flight because the
862 * found, send an abort to the FW. 800 * pCmd is still pointig at the SCSI command we have to abort. There
801 * is no need to search the txcmplq. Just send an abort to the FW.
863 */ 802 */
864 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
865 if (iocb->context1 != lpfc_cmd)
866 continue;
867 803
868 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted; 804 cmd = &iocb->iocb;
869 cmd = &iocb->iocb; 805 icmd = &abtsiocb->iocb;
870 icmd = &abtsiocb->iocb; 806 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
871 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 807 icmd->un.acxri.abortContextTag = cmd->ulpContext;
872 icmd->un.acxri.abortContextTag = cmd->ulpContext; 808 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
873 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
874
875 icmd->ulpLe = 1;
876 icmd->ulpClass = cmd->ulpClass;
877 if (phba->hba_state >= LPFC_LINK_UP)
878 icmd->ulpCommand = CMD_ABORT_XRI_CN;
879 else
880 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
881 809
882 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 810 icmd->ulpLe = 1;
883 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == 811 icmd->ulpClass = cmd->ulpClass;
884 IOCB_ERROR) { 812 if (phba->hba_state >= LPFC_LINK_UP)
885 list_add_tail(&abtsiocb->list, lpfc_iocb_list); 813 icmd->ulpCommand = CMD_ABORT_XRI_CN;
886 ret = IOCB_ERROR; 814 else
887 break; 815 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
888 }
889 816
890 /* Wait for abort to complete */ 817 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
891 while (cmnd->host_scribble) 818 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
892 { 819 lpfc_sli_release_iocbq(phba, abtsiocb);
893 spin_unlock_irq(phba->host->host_lock); 820 ret = FAILED;
894 set_current_state(TASK_UNINTERRUPTIBLE); 821 goto out;
895 schedule_timeout(LPFC_ABORT_WAIT*HZ); 822 }
896 spin_lock_irq(phba->host->host_lock);
897 if (++loop_count
898 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
899 break;
900 }
901 823
902 if(cmnd->host_scribble) { 824 /* Wait for abort to complete */
903 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 825 while (lpfc_cmd->pCmd == cmnd)
904 "%d:0748 abort handler timed " 826 {
905 "out waiting for abort to " 827 spin_unlock_irq(phba->host->host_lock);
906 "complete. Data: " 828 set_current_state(TASK_UNINTERRUPTIBLE);
907 "x%x x%x x%x x%lx\n", 829 schedule_timeout(LPFC_ABORT_WAIT*HZ);
908 phba->brd_no, ret, id, lun, snum); 830 spin_lock_irq(phba->host->host_lock);
909 cmnd->host_scribble = NULL; 831 if (++loop_count
910 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup; 832 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
911 ret = IOCB_ERROR; 833 break;
912 } 834 }
913 835
914 break; 836 if (lpfc_cmd->pCmd == cmnd) {
837 ret = FAILED;
838 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
839 "%d:0748 abort handler timed out waiting for "
840 "abort to complete: ret %#x, ID %d, LUN %d, "
841 "snum %#lx\n",
842 phba->brd_no, ret, cmnd->device->id,
843 cmnd->device->lun, cmnd->serial_number);
915 } 844 }
916 845
917 out: 846 out:
918 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 847 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
919 "%d:0749 SCSI layer issued abort device " 848 "%d:0749 SCSI layer issued abort device: ret %#x, "
920 "Data: x%x x%x x%x x%lx\n", 849 "ID %d, LUN %d, snum %#lx\n",
921 phba->brd_no, ret, id, lun, snum); 850 phba->brd_no, ret, cmnd->device->id,
851 cmnd->device->lun, cmnd->serial_number);
922 852
923 return ret == IOCB_SUCCESS ? SUCCESS : FAILED; 853 return ret;
924} 854}
925 855
926static int 856static int
@@ -938,11 +868,8 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
938{ 868{
939 struct Scsi_Host *shost = cmnd->device->host; 869 struct Scsi_Host *shost = cmnd->device->host;
940 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; 870 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
941 struct lpfc_sli *psli = &phba->sli; 871 struct lpfc_scsi_buf *lpfc_cmd;
942 struct lpfc_scsi_buf *lpfc_cmd = NULL; 872 struct lpfc_iocbq *iocbq, *iocbqrsp;
943 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
944 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
945 struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
946 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 873 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
947 struct lpfc_nodelist *pnode = rdata->pnode; 874 struct lpfc_nodelist *pnode = rdata->pnode;
948 int ret = FAILED; 875 int ret = FAILED;
@@ -966,7 +893,7 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
966 break; 893 break;
967 } 894 }
968 895
969 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 896 lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
970 if (lpfc_cmd == NULL) 897 if (lpfc_cmd == NULL)
971 goto out; 898 goto out;
972 899
@@ -981,18 +908,13 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
981 iocbq = &lpfc_cmd->cur_iocbq; 908 iocbq = &lpfc_cmd->cur_iocbq;
982 909
983 /* get a buffer for this IOCB command response */ 910 /* get a buffer for this IOCB command response */
984 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list); 911 iocbqrsp = lpfc_sli_get_iocbq(phba);
985 if (iocbqrsp == NULL) 912 if (iocbqrsp == NULL)
986 goto out_free_scsi_buf; 913 goto out_free_scsi_buf;
987 914
988 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq)); 915 ret = lpfc_sli_issue_iocb_wait(phba,
989 916 &phba->sli.ring[phba->sli.fcp_ring],
990 iocbq->iocb_flag |= LPFC_IO_POLL; 917 iocbq, iocbqrsp, lpfc_cmd->timeout);
991 iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
992
993 ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
994 &phba->sli.ring[psli->fcp_ring],
995 iocbq, 0, iocbqrsp, 60);
996 if (ret == IOCB_SUCCESS) 918 if (ret == IOCB_SUCCESS)
997 ret = SUCCESS; 919 ret = SUCCESS;
998 920
@@ -1027,12 +949,13 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1027 } 949 }
1028 950
1029 if (cnt) { 951 if (cnt) {
1030 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 952 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1031 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", 953 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1032 phba->brd_no, cnt); 954 phba->brd_no, cnt);
955 ret = FAILED;
1033 } 956 }
1034 957
1035 list_add_tail(&iocbqrsp->list, lpfc_iocb_list); 958 lpfc_sli_release_iocbq(phba, iocbqrsp);
1036 959
1037out_free_scsi_buf: 960out_free_scsi_buf:
1038 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 961 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
@@ -1041,7 +964,7 @@ out_free_scsi_buf:
1041 phba->brd_no, lpfc_cmd->pCmd->device->id, 964 phba->brd_no, lpfc_cmd->pCmd->device->id,
1042 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status, 965 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
1043 lpfc_cmd->result); 966 lpfc_cmd->result);
1044 lpfc_free_scsi_buf(lpfc_cmd); 967 lpfc_release_scsi_buf(phba, lpfc_cmd);
1045out: 968out:
1046 return ret; 969 return ret;
1047} 970}
@@ -1069,10 +992,9 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1069 int ret = FAILED, i, err_count = 0; 992 int ret = FAILED, i, err_count = 0;
1070 int cnt, loopcnt; 993 int cnt, loopcnt;
1071 unsigned int midlayer_id = 0; 994 unsigned int midlayer_id = 0;
1072 struct lpfc_scsi_buf * lpfc_cmd = NULL; 995 struct lpfc_scsi_buf * lpfc_cmd;
1073 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1074 996
1075 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 997 lpfc_cmd = lpfc_sli_get_scsi_buf (phba);
1076 if (lpfc_cmd == NULL) 998 if (lpfc_cmd == NULL)
1077 goto out; 999 goto out;
1078 1000
@@ -1136,10 +1058,12 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1136 phba->brd_no, cnt, i); 1058 phba->brd_no, cnt, i);
1137 } 1059 }
1138 1060
1139 if (!err_count) 1061 if (cnt == 0)
1140 ret = SUCCESS; 1062 ret = SUCCESS;
1063 else
1064 ret = FAILED;
1141 1065
1142 lpfc_free_scsi_buf(lpfc_cmd); 1066 lpfc_release_scsi_buf(phba, lpfc_cmd);
1143 lpfc_printf_log(phba, 1067 lpfc_printf_log(phba,
1144 KERN_ERR, 1068 KERN_ERR,
1145 LOG_FCP, 1069 LOG_FCP,
@@ -1163,66 +1087,47 @@ static int
1163lpfc_slave_alloc(struct scsi_device *sdev) 1087lpfc_slave_alloc(struct scsi_device *sdev)
1164{ 1088{
1165 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; 1089 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
1166 struct lpfc_nodelist *ndlp = NULL;
1167 int match = 0;
1168 struct lpfc_scsi_buf *scsi_buf = NULL; 1090 struct lpfc_scsi_buf *scsi_buf = NULL;
1091 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1169 uint32_t total = 0, i; 1092 uint32_t total = 0, i;
1170 uint32_t num_to_alloc = 0; 1093 uint32_t num_to_alloc = 0;
1171 unsigned long flags; 1094 unsigned long flags;
1172 struct list_head *listp;
1173 struct list_head *node_list[6];
1174
1175 /*
1176 * Store the target pointer in the scsi_device hostdata pointer provided
1177 * the driver has already discovered the target id.
1178 */
1179
1180 /* Search the nlp lists other than unmap_list for this target ID */
1181 node_list[0] = &phba->fc_npr_list;
1182 node_list[1] = &phba->fc_nlpmap_list;
1183 node_list[2] = &phba->fc_prli_list;
1184 node_list[3] = &phba->fc_reglogin_list;
1185 node_list[4] = &phba->fc_adisc_list;
1186 node_list[5] = &phba->fc_plogi_list;
1187
1188 for (i = 0; i < 6 && !match; i++) {
1189 listp = node_list[i];
1190 if (list_empty(listp))
1191 continue;
1192 list_for_each_entry(ndlp, listp, nlp_listp) {
1193 if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
1194 match = 1;
1195 break;
1196 }
1197 }
1198 }
1199 1095
1200 if (!match) 1096 if (!rport || fc_remote_port_chkready(rport))
1201 return -ENXIO; 1097 return -ENXIO;
1202 1098
1203 sdev->hostdata = ndlp->rport->dd_data; 1099 sdev->hostdata = rport->dd_data;
1204 1100
1205 /* 1101 /*
1206 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1102 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1207 * available list of scsi buffers. Don't allocate more than the 1103 * available list of scsi buffers. Don't allocate more than the
1208 * HBA limit conveyed to the midlayer via the host structure. Note 1104 * HBA limit conveyed to the midlayer via the host structure. The
1209 * that this list of scsi bufs exists for the lifetime of the driver. 1105 * formula accounts for the lun_queue_depth + error handlers + 1
1106 * extra. This list of scsi bufs exists for the lifetime of the driver.
1210 */ 1107 */
1211 total = phba->total_scsi_bufs; 1108 total = phba->total_scsi_bufs;
1212 num_to_alloc = LPFC_CMD_PER_LUN; 1109 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1213 if (total >= phba->cfg_hba_queue_depth) { 1110 if (total >= phba->cfg_hba_queue_depth) {
1214 printk(KERN_WARNING "%s, At config limitation of " 1111 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1215 "%d allocated scsi_bufs\n", __FUNCTION__, total); 1112 "%d:0704 At limitation of %d preallocated "
1113 "command buffers\n", phba->brd_no, total);
1216 return 0; 1114 return 0;
1217 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1115 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
1116 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1117 "%d:0705 Allocation request of %d command "
1118 "buffers will exceed max of %d. Reducing "
1119 "allocation request to %d.\n", phba->brd_no,
1120 num_to_alloc, phba->cfg_hba_queue_depth,
1121 (phba->cfg_hba_queue_depth - total));
1218 num_to_alloc = phba->cfg_hba_queue_depth - total; 1122 num_to_alloc = phba->cfg_hba_queue_depth - total;
1219 } 1123 }
1220 1124
1221 for (i = 0; i < num_to_alloc; i++) { 1125 for (i = 0; i < num_to_alloc; i++) {
1222 scsi_buf = lpfc_get_scsi_buf(phba); 1126 scsi_buf = lpfc_new_scsi_buf(phba);
1223 if (!scsi_buf) { 1127 if (!scsi_buf) {
1224 printk(KERN_ERR "%s, failed to allocate " 1128 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1225 "scsi_buf\n", __FUNCTION__); 1129 "%d:0706 Failed to allocate command "
1130 "buffer\n", phba->brd_no);
1226 break; 1131 break;
1227 } 1132 }
1228 1133
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e74e224fd77c..508710001ed6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,28 @@ typedef enum _lpfc_iocb_type {
65 LPFC_ABORT_IOCB 65 LPFC_ABORT_IOCB
66} lpfc_iocb_type; 66} lpfc_iocb_type;
67 67
68struct lpfc_iocbq *
69lpfc_sli_get_iocbq(struct lpfc_hba * phba)
70{
71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 struct lpfc_iocbq * iocbq = NULL;
73
74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
75 return iocbq;
76}
77
78void
79lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
80{
81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
82
83 /*
84 * Clean all volatile data fields, preserve iotag and node struct.
85 */
86 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
88}
89
68/* 90/*
69 * Translate the iocb command to an iocb command type used to decide the final 91 * Translate the iocb command to an iocb command type used to decide the final
70 * disposition of each completed IOCB. 92 * disposition of each completed IOCB.
@@ -265,41 +287,69 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
265 return iocb; 287 return iocb;
266} 288}
267 289
268static uint32_t 290uint16_t
269lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 291lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
270{ 292{
271 uint32_t search_start; 293 struct lpfc_iocbq ** new_arr;
294 struct lpfc_iocbq ** old_arr;
295 size_t new_len;
296 struct lpfc_sli *psli = &phba->sli;
297 uint16_t iotag;
272 298
273 if (pring->fast_lookup == NULL) { 299 spin_lock_irq(phba->host->host_lock);
274 pring->iotag_ctr++; 300 iotag = psli->last_iotag;
275 if (pring->iotag_ctr >= pring->iotag_max) 301 if(++iotag < psli->iocbq_lookup_len) {
276 pring->iotag_ctr = 1; 302 psli->last_iotag = iotag;
277 return pring->iotag_ctr; 303 psli->iocbq_lookup[iotag] = iocbq;
304 spin_unlock_irq(phba->host->host_lock);
305 iocbq->iotag = iotag;
306 return iotag;
307 }
308 else if (psli->iocbq_lookup_len < (0xffff
309 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
310 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
311 spin_unlock_irq(phba->host->host_lock);
312 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
313 GFP_KERNEL);
314 if (new_arr) {
315 memset((char *)new_arr, 0,
316 new_len * sizeof (struct lpfc_iocbq *));
317 spin_lock_irq(phba->host->host_lock);
318 old_arr = psli->iocbq_lookup;
319 if (new_len <= psli->iocbq_lookup_len) {
320 /* highly unprobable case */
321 kfree(new_arr);
322 iotag = psli->last_iotag;
323 if(++iotag < psli->iocbq_lookup_len) {
324 psli->last_iotag = iotag;
325 psli->iocbq_lookup[iotag] = iocbq;
326 spin_unlock_irq(phba->host->host_lock);
327 iocbq->iotag = iotag;
328 return iotag;
329 }
330 spin_unlock_irq(phba->host->host_lock);
331 return 0;
332 }
333 if (psli->iocbq_lookup)
334 memcpy(new_arr, old_arr,
335 ((psli->last_iotag + 1) *
336 sizeof (struct lpfc_iocbq *)));
337 psli->iocbq_lookup = new_arr;
338 psli->iocbq_lookup_len = new_len;
339 psli->last_iotag = iotag;
340 psli->iocbq_lookup[iotag] = iocbq;
341 spin_unlock_irq(phba->host->host_lock);
342 iocbq->iotag = iotag;
343 kfree(old_arr);
344 return iotag;
345 }
278 } 346 }
279 347
280 search_start = pring->iotag_ctr; 348 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
281 349 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
282 do { 350 phba->brd_no, psli->last_iotag);
283 pring->iotag_ctr++;
284 if (pring->iotag_ctr >= pring->fast_iotag)
285 pring->iotag_ctr = 1;
286
287 if (*(pring->fast_lookup + pring->iotag_ctr) == NULL)
288 return pring->iotag_ctr;
289
290 } while (pring->iotag_ctr != search_start);
291 351
292 /* 352 return 0;
293 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
294 */
295 lpfc_printf_log(phba,
296 KERN_ERR,
297 LOG_SLI,
298 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
299 phba->brd_no,
300 pring->ringno,
301 pring->fast_iotag);
302 return (0);
303} 353}
304 354
305static void 355static void
@@ -307,10 +357,9 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
307 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 357 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
308{ 358{
309 /* 359 /*
310 * Allocate and set up an iotag 360 * Set up an iotag
311 */ 361 */
312 nextiocb->iocb.ulpIoTag = 362 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
313 lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]);
314 363
315 /* 364 /*
316 * Issue iocb command to adapter 365 * Issue iocb command to adapter
@@ -326,16 +375,15 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
326 */ 375 */
327 if (nextiocb->iocb_cmpl) 376 if (nextiocb->iocb_cmpl)
328 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 377 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
329 else { 378 else
330 list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list); 379 lpfc_sli_release_iocbq(phba, nextiocb);
331 }
332 380
333 /* 381 /*
334 * Let the HBA know what IOCB slot will be the next one the 382 * Let the HBA know what IOCB slot will be the next one the
335 * driver will put a command into. 383 * driver will put a command into.
336 */ 384 */
337 pring->cmdidx = pring->next_cmdidx; 385 pring->cmdidx = pring->next_cmdidx;
338 writeb(pring->cmdidx, phba->MBslimaddr 386 writel(pring->cmdidx, phba->MBslimaddr
339 + (SLIMOFF + (pring->ringno * 2)) * 4); 387 + (SLIMOFF + (pring->ringno * 2)) * 4);
340} 388}
341 389
@@ -752,80 +800,28 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
752} 800}
753 801
754static struct lpfc_iocbq * 802static struct lpfc_iocbq *
755lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring, 803lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
756 struct lpfc_iocbq * prspiocb) 804 struct lpfc_sli_ring * pring,
757{ 805 struct lpfc_iocbq * prspiocb)
758 IOCB_t *icmd = NULL;
759 IOCB_t *irsp = NULL;
760 struct lpfc_iocbq *cmd_iocb;
761 struct lpfc_iocbq *iocb, *next_iocb;
762 uint16_t iotag;
763
764 irsp = &prspiocb->iocb;
765 iotag = irsp->ulpIoTag;
766 cmd_iocb = NULL;
767
768 /* Search through txcmpl from the begining */
769 list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
770 icmd = &iocb->iocb;
771 if (iotag == icmd->ulpIoTag) {
772 /* Found a match. */
773 cmd_iocb = iocb;
774 list_del(&iocb->list);
775 pring->txcmplq_cnt--;
776 break;
777 }
778 }
779
780 return (cmd_iocb);
781}
782
783static struct lpfc_iocbq *
784lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba,
785 struct lpfc_sli_ring * pring,
786 struct lpfc_iocbq * prspiocb)
787{ 806{
788 IOCB_t *irsp = NULL;
789 struct lpfc_iocbq *cmd_iocb = NULL; 807 struct lpfc_iocbq *cmd_iocb = NULL;
790 uint16_t iotag; 808 uint16_t iotag;
791 809
792 if (unlikely(pring->fast_lookup == NULL)) 810 iotag = prspiocb->iocb.ulpIoTag;
793 return NULL; 811
794 812 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
795 /* Use fast lookup based on iotag for completion */ 813 cmd_iocb = phba->sli.iocbq_lookup[iotag];
796 irsp = &prspiocb->iocb; 814 list_del(&cmd_iocb->list);
797 iotag = irsp->ulpIoTag; 815 pring->txcmplq_cnt--;
798 if (iotag < pring->fast_iotag) { 816 return cmd_iocb;
799 cmd_iocb = *(pring->fast_lookup + iotag);
800 *(pring->fast_lookup + iotag) = NULL;
801 if (cmd_iocb) {
802 list_del(&cmd_iocb->list);
803 pring->txcmplq_cnt--;
804 return cmd_iocb;
805 } else {
806 /*
807 * This is clearly an error. A ring that uses iotags
808 * should never have a interrupt for a completion that
809 * is not on the ring. Return NULL and log a error.
810 */
811 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
812 "%d:0327 Rsp ring %d error - command "
813 "completion for iotag x%x not found\n",
814 phba->brd_no, pring->ringno, iotag);
815 return NULL;
816 }
817 } 817 }
818 818
819 /*
820 * Rsp ring <ringno> get: iotag <iotag> greater then
821 * configured max <fast_iotag> wd0 <irsp>. This is an
822 * error. Just return NULL.
823 */
824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 819 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
825 "%d:0317 Rsp ring %d get: iotag x%x greater then " 820 "%d:0317 iotag x%x is out off "
826 "configured max x%x wd0 x%x\n", 821 "range: max iotag x%x wd0 x%x\n",
827 phba->brd_no, pring->ringno, iotag, pring->fast_iotag, 822 phba->brd_no, iotag,
828 *(((uint32_t *) irsp) + 7)); 823 phba->sli.last_iotag,
824 *(((uint32_t *) &prspiocb->iocb) + 7));
829 return NULL; 825 return NULL;
830} 826}
831 827
@@ -839,7 +835,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
839 835
840 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 836 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
841 spin_lock_irqsave(phba->host->host_lock, iflag); 837 spin_lock_irqsave(phba->host->host_lock, iflag);
842 cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq); 838 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
843 if (cmdiocbp) { 839 if (cmdiocbp) {
844 if (cmdiocbp->iocb_cmpl) { 840 if (cmdiocbp->iocb_cmpl) {
845 /* 841 /*
@@ -853,17 +849,13 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
853 spin_lock_irqsave(phba->host->host_lock, iflag); 849 spin_lock_irqsave(phba->host->host_lock, iflag);
854 } 850 }
855 else { 851 else {
856 if (cmdiocbp->iocb_flag & LPFC_IO_POLL)
857 rc = 0;
858
859 spin_unlock_irqrestore(phba->host->host_lock, 852 spin_unlock_irqrestore(phba->host->host_lock,
860 iflag); 853 iflag);
861 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 854 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
862 spin_lock_irqsave(phba->host->host_lock, iflag); 855 spin_lock_irqsave(phba->host->host_lock, iflag);
863 } 856 }
864 } else { 857 } else
865 list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list); 858 lpfc_sli_release_iocbq(phba, cmdiocbp);
866 }
867 } else { 859 } else {
868 /* 860 /*
869 * Unknown initiating command based on the response iotag. 861 * Unknown initiating command based on the response iotag.
@@ -889,6 +881,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
889 saveq->iocb.ulpContext); 881 saveq->iocb.ulpContext);
890 } 882 }
891 } 883 }
884
892 spin_unlock_irqrestore(phba->host->host_lock, iflag); 885 spin_unlock_irqrestore(phba->host->host_lock, iflag);
893 return rc; 886 return rc;
894} 887}
@@ -953,7 +946,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
953 * structure. The copy involves a byte-swap since the 946 * structure. The copy involves a byte-swap since the
954 * network byte order and pci byte orders are different. 947 * network byte order and pci byte orders are different.
955 */ 948 */
956 entry = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 949 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
957 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 950 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
958 (uint32_t *) &rspiocbq.iocb, 951 (uint32_t *) &rspiocbq.iocb,
959 sizeof (IOCB_t)); 952 sizeof (IOCB_t));
@@ -990,9 +983,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
990 break; 983 break;
991 } 984 }
992 985
993 cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba, 986 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
994 pring, 987 &rspiocbq);
995 &rspiocbq);
996 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 988 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
997 spin_unlock_irqrestore( 989 spin_unlock_irqrestore(
998 phba->host->host_lock, iflag); 990 phba->host->host_lock, iflag);
@@ -1033,7 +1025,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1033 1025
1034 to_slim = phba->MBslimaddr + 1026 to_slim = phba->MBslimaddr +
1035 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1027 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1036 writeb(pring->rspidx, to_slim); 1028 writel(pring->rspidx, to_slim);
1037 1029
1038 if (pring->rspidx == portRspPut) 1030 if (pring->rspidx == portRspPut)
1039 portRspPut = le32_to_cpu(pgp->rspPutInx); 1031 portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1073,7 +1065,6 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1073 struct lpfc_iocbq *next_iocb; 1065 struct lpfc_iocbq *next_iocb;
1074 struct lpfc_iocbq *cmdiocbp; 1066 struct lpfc_iocbq *cmdiocbp;
1075 struct lpfc_iocbq *saveq; 1067 struct lpfc_iocbq *saveq;
1076 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
1077 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1068 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1078 uint8_t iocb_cmd_type; 1069 uint8_t iocb_cmd_type;
1079 lpfc_iocb_type type; 1070 lpfc_iocb_type type;
@@ -1115,7 +1106,6 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1115 } 1106 }
1116 1107
1117 rmb(); 1108 rmb();
1118 lpfc_iocb_list = &phba->lpfc_iocb_list;
1119 while (pring->rspidx != portRspPut) { 1109 while (pring->rspidx != portRspPut) {
1120 /* 1110 /*
1121 * Build a completion list and call the appropriate handler. 1111 * Build a completion list and call the appropriate handler.
@@ -1131,8 +1121,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1131 * received. 1121 * received.
1132 */ 1122 */
1133 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1123 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1134 list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq, 1124 rspiocbp = lpfc_sli_get_iocbq(phba);
1135 list);
1136 if (rspiocbp == NULL) { 1125 if (rspiocbp == NULL) {
1137 printk(KERN_ERR "%s: out of buffers! Failing " 1126 printk(KERN_ERR "%s: out of buffers! Failing "
1138 "completion.\n", __FUNCTION__); 1127 "completion.\n", __FUNCTION__);
@@ -1147,7 +1136,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1147 1136
1148 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1137 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1149 + 1) * 4; 1138 + 1) * 4;
1150 writeb(pring->rspidx, to_slim); 1139 writel(pring->rspidx, to_slim);
1151 1140
1152 if (list_empty(&(pring->iocb_continueq))) { 1141 if (list_empty(&(pring->iocb_continueq))) {
1153 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1142 list_add(&rspiocbp->list, &(pring->iocb_continueq));
@@ -1213,8 +1202,8 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1213 } else if (type == LPFC_ABORT_IOCB) { 1202 } else if (type == LPFC_ABORT_IOCB) {
1214 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1203 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1215 ((cmdiocbp = 1204 ((cmdiocbp =
1216 lpfc_sli_txcmpl_ring_search_slow(pring, 1205 lpfc_sli_iocbq_lookup(phba, pring,
1217 saveq)))) { 1206 saveq)))) {
1218 /* Call the specified completion 1207 /* Call the specified completion
1219 routine */ 1208 routine */
1220 if (cmdiocbp->iocb_cmpl) { 1209 if (cmdiocbp->iocb_cmpl) {
@@ -1226,10 +1215,9 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1226 spin_lock_irqsave( 1215 spin_lock_irqsave(
1227 phba->host->host_lock, 1216 phba->host->host_lock,
1228 iflag); 1217 iflag);
1229 } else { 1218 } else
1230 list_add_tail(&cmdiocbp->list, 1219 lpfc_sli_release_iocbq(phba,
1231 lpfc_iocb_list); 1220 cmdiocbp);
1232 }
1233 } 1221 }
1234 } else if (type == LPFC_UNKNOWN_IOCB) { 1222 } else if (type == LPFC_UNKNOWN_IOCB) {
1235 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1223 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
@@ -1264,12 +1252,12 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1264 next_iocb, 1252 next_iocb,
1265 &saveq->list, 1253 &saveq->list,
1266 list) { 1254 list) {
1267 list_add_tail(&rspiocbp->list, 1255 lpfc_sli_release_iocbq(phba,
1268 lpfc_iocb_list); 1256 rspiocbp);
1269 } 1257 }
1270 } 1258 }
1271 1259
1272 list_add_tail(&saveq->list, lpfc_iocb_list); 1260 lpfc_sli_release_iocbq(phba, saveq);
1273 } 1261 }
1274 } 1262 }
1275 1263
@@ -1314,7 +1302,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1314 struct lpfc_iocbq *iocb, *next_iocb; 1302 struct lpfc_iocbq *iocb, *next_iocb;
1315 IOCB_t *icmd = NULL, *cmd = NULL; 1303 IOCB_t *icmd = NULL, *cmd = NULL;
1316 int errcnt; 1304 int errcnt;
1317 uint16_t iotag;
1318 1305
1319 errcnt = 0; 1306 errcnt = 0;
1320 1307
@@ -1331,9 +1318,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1331 spin_unlock_irq(phba->host->host_lock); 1318 spin_unlock_irq(phba->host->host_lock);
1332 (iocb->iocb_cmpl) (phba, iocb, iocb); 1319 (iocb->iocb_cmpl) (phba, iocb, iocb);
1333 spin_lock_irq(phba->host->host_lock); 1320 spin_lock_irq(phba->host->host_lock);
1334 } else { 1321 } else
1335 list_add_tail(&iocb->list, &phba->lpfc_iocb_list); 1322 lpfc_sli_release_iocbq(phba, iocb);
1336 }
1337 } 1323 }
1338 pring->txq_cnt = 0; 1324 pring->txq_cnt = 0;
1339 INIT_LIST_HEAD(&(pring->txq)); 1325 INIT_LIST_HEAD(&(pring->txq));
@@ -1343,13 +1329,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1343 cmd = &iocb->iocb; 1329 cmd = &iocb->iocb;
1344 1330
1345 /* 1331 /*
1346 * Imediate abort of IOCB, clear fast_lookup entry, 1332 * Imediate abort of IOCB, deque and call compl
1347 * if any, deque and call compl
1348 */ 1333 */
1349 iotag = cmd->ulpIoTag;
1350 if (iotag && pring->fast_lookup &&
1351 (iotag < pring->fast_iotag))
1352 pring->fast_lookup[iotag] = NULL;
1353 1334
1354 list_del_init(&iocb->list); 1335 list_del_init(&iocb->list);
1355 pring->txcmplq_cnt--; 1336 pring->txcmplq_cnt--;
@@ -1360,9 +1341,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1360 spin_unlock_irq(phba->host->host_lock); 1341 spin_unlock_irq(phba->host->host_lock);
1361 (iocb->iocb_cmpl) (phba, iocb, iocb); 1342 (iocb->iocb_cmpl) (phba, iocb, iocb);
1362 spin_lock_irq(phba->host->host_lock); 1343 spin_lock_irq(phba->host->host_lock);
1363 } else { 1344 } else
1364 list_add_tail(&iocb->list, &phba->lpfc_iocb_list); 1345 lpfc_sli_release_iocbq(phba, iocb);
1365 }
1366 } 1346 }
1367 1347
1368 INIT_LIST_HEAD(&pring->txcmplq); 1348 INIT_LIST_HEAD(&pring->txcmplq);
@@ -2147,6 +2127,10 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2147 psli->next_ring = LPFC_FCP_NEXT_RING; 2127 psli->next_ring = LPFC_FCP_NEXT_RING;
2148 psli->ip_ring = LPFC_IP_RING; 2128 psli->ip_ring = LPFC_IP_RING;
2149 2129
2130 psli->iocbq_lookup = NULL;
2131 psli->iocbq_lookup_len = 0;
2132 psli->last_iotag = 0;
2133
2150 for (i = 0; i < psli->num_rings; i++) { 2134 for (i = 0; i < psli->num_rings; i++) {
2151 pring = &psli->ring[i]; 2135 pring = &psli->ring[i];
2152 switch (i) { 2136 switch (i) {
@@ -2222,7 +2206,7 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2222{ 2206{
2223 struct lpfc_sli *psli; 2207 struct lpfc_sli *psli;
2224 struct lpfc_sli_ring *pring; 2208 struct lpfc_sli_ring *pring;
2225 int i, cnt; 2209 int i;
2226 2210
2227 psli = &phba->sli; 2211 psli = &phba->sli;
2228 spin_lock_irq(phba->host->host_lock); 2212 spin_lock_irq(phba->host->host_lock);
@@ -2238,19 +2222,6 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2238 INIT_LIST_HEAD(&pring->txcmplq); 2222 INIT_LIST_HEAD(&pring->txcmplq);
2239 INIT_LIST_HEAD(&pring->iocb_continueq); 2223 INIT_LIST_HEAD(&pring->iocb_continueq);
2240 INIT_LIST_HEAD(&pring->postbufq); 2224 INIT_LIST_HEAD(&pring->postbufq);
2241 cnt = pring->fast_iotag;
2242 spin_unlock_irq(phba->host->host_lock);
2243 if (cnt) {
2244 pring->fast_lookup =
2245 kmalloc(cnt * sizeof (struct lpfc_iocbq *),
2246 GFP_KERNEL);
2247 if (pring->fast_lookup == 0) {
2248 return (0);
2249 }
2250 memset((char *)pring->fast_lookup, 0,
2251 cnt * sizeof (struct lpfc_iocbq *));
2252 }
2253 spin_lock_irq(phba->host->host_lock);
2254 } 2225 }
2255 spin_unlock_irq(phba->host->host_lock); 2226 spin_unlock_irq(phba->host->host_lock);
2256 return (1); 2227 return (1);
@@ -2292,10 +2263,8 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2292 flags); 2263 flags);
2293 (iocb->iocb_cmpl) (phba, iocb, iocb); 2264 (iocb->iocb_cmpl) (phba, iocb, iocb);
2294 spin_lock_irqsave(phba->host->host_lock, flags); 2265 spin_lock_irqsave(phba->host->host_lock, flags);
2295 } else { 2266 } else
2296 list_add_tail(&iocb->list, 2267 lpfc_sli_release_iocbq(phba, iocb);
2297 &phba->lpfc_iocb_list);
2298 }
2299 } 2268 }
2300 2269
2301 INIT_LIST_HEAD(&(pring->txq)); 2270 INIT_LIST_HEAD(&(pring->txq));
@@ -2436,7 +2405,7 @@ lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2436 kfree(buf_ptr); 2405 kfree(buf_ptr);
2437 } 2406 }
2438 2407
2439 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 2408 lpfc_sli_release_iocbq(phba, cmdiocb);
2440 return; 2409 return;
2441} 2410}
2442 2411
@@ -2445,16 +2414,14 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2445 struct lpfc_sli_ring * pring, 2414 struct lpfc_sli_ring * pring,
2446 struct lpfc_iocbq * cmdiocb) 2415 struct lpfc_iocbq * cmdiocb)
2447{ 2416{
2448 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 2417 struct lpfc_iocbq *abtsiocbp;
2449 struct lpfc_iocbq *abtsiocbp = NULL;
2450 IOCB_t *icmd = NULL; 2418 IOCB_t *icmd = NULL;
2451 IOCB_t *iabt = NULL; 2419 IOCB_t *iabt = NULL;
2452 2420
2453 /* issue ABTS for this IOCB based on iotag */ 2421 /* issue ABTS for this IOCB based on iotag */
2454 list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list); 2422 abtsiocbp = lpfc_sli_get_iocbq(phba);
2455 if (abtsiocbp == NULL) 2423 if (abtsiocbp == NULL)
2456 return 0; 2424 return 0;
2457 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2458 2425
2459 iabt = &abtsiocbp->iocb; 2426 iabt = &abtsiocbp->iocb;
2460 icmd = &cmdiocb->iocb; 2427 icmd = &cmdiocb->iocb;
@@ -2473,7 +2440,7 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2473 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl; 2440 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2474 break; 2441 break;
2475 default: 2442 default:
2476 list_add_tail(&abtsiocbp->list, lpfc_iocb_list); 2443 lpfc_sli_release_iocbq(phba, abtsiocbp);
2477 return 0; 2444 return 0;
2478 } 2445 }
2479 2446
@@ -2485,7 +2452,7 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2485 iabt->ulpCommand = CMD_ABORT_MXRI64_CN; 2452 iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2486 2453
2487 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { 2454 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2488 list_add_tail(&abtsiocbp->list, lpfc_iocb_list); 2455 lpfc_sli_release_iocbq(phba, abtsiocbp);
2489 return 0; 2456 return 0;
2490 } 2457 }
2491 2458
@@ -2493,28 +2460,37 @@ lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2493} 2460}
2494 2461
2495static int 2462static int
2496lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id, 2463lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2497 uint64_t lun_id, struct lpfc_iocbq *iocb, 2464 uint64_t lun_id, uint32_t ctx,
2498 uint32_t ctx, lpfc_ctx_cmd ctx_cmd) 2465 lpfc_ctx_cmd ctx_cmd)
2499{ 2466{
2467 struct lpfc_scsi_buf *lpfc_cmd;
2468 struct scsi_cmnd *cmnd;
2500 int rc = 1; 2469 int rc = 1;
2501 2470
2502 if (lpfc_cmd == NULL) 2471 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
2472 return rc;
2473
2474 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
2475 cmnd = lpfc_cmd->pCmd;
2476
2477 if (cmnd == NULL)
2503 return rc; 2478 return rc;
2504 2479
2505 switch (ctx_cmd) { 2480 switch (ctx_cmd) {
2506 case LPFC_CTX_LUN: 2481 case LPFC_CTX_LUN:
2507 if ((lpfc_cmd->pCmd->device->id == tgt_id) && 2482 if ((cmnd->device->id == tgt_id) &&
2508 (lpfc_cmd->pCmd->device->lun == lun_id)) 2483 (cmnd->device->lun == lun_id))
2509 rc = 0; 2484 rc = 0;
2510 break; 2485 break;
2511 case LPFC_CTX_TGT: 2486 case LPFC_CTX_TGT:
2512 if (lpfc_cmd->pCmd->device->id == tgt_id) 2487 if (cmnd->device->id == tgt_id)
2513 rc = 0; 2488 rc = 0;
2514 break; 2489 break;
2515 case LPFC_CTX_CTX: 2490 case LPFC_CTX_CTX:
2516 if (iocb->iocb.ulpContext == ctx) 2491 if (iocbq->iocb.ulpContext == ctx)
2517 rc = 0; 2492 rc = 0;
2493 break;
2518 case LPFC_CTX_HOST: 2494 case LPFC_CTX_HOST:
2519 rc = 0; 2495 rc = 0;
2520 break; 2496 break;
@@ -2531,30 +2507,17 @@ int
2531lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2507lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2532 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 2508 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2533{ 2509{
2534 struct lpfc_iocbq *iocb, *next_iocb; 2510 struct lpfc_iocbq *iocbq;
2535 IOCB_t *cmd = NULL; 2511 int sum, i;
2536 struct lpfc_scsi_buf *lpfc_cmd;
2537 int sum = 0, ret_val = 0;
2538 2512
2539 /* Next check the txcmplq */ 2513 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
2540 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 2514 iocbq = phba->sli.iocbq_lookup[i];
2541 cmd = &iocb->iocb;
2542
2543 /* Must be a FCP command */
2544 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2545 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2546 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2547 continue;
2548 }
2549 2515
2550 /* context1 MUST be a struct lpfc_scsi_buf */ 2516 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2551 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1); 2517 0, ctx_cmd) == 0)
2552 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id, 2518 sum++;
2553 NULL, 0, ctx_cmd);
2554 if (ret_val != 0)
2555 continue;
2556 sum++;
2557 } 2519 }
2520
2558 return sum; 2521 return sum;
2559} 2522}
2560 2523
@@ -2563,7 +2526,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2563 struct lpfc_iocbq * rspiocb) 2526 struct lpfc_iocbq * rspiocb)
2564{ 2527{
2565 spin_lock_irq(phba->host->host_lock); 2528 spin_lock_irq(phba->host->host_lock);
2566 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list); 2529 lpfc_sli_release_iocbq(phba, cmdiocb);
2567 spin_unlock_irq(phba->host->host_lock); 2530 spin_unlock_irq(phba->host->host_lock);
2568 return; 2531 return;
2569} 2532}
@@ -2573,39 +2536,27 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2573 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, 2536 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2574 lpfc_ctx_cmd abort_cmd) 2537 lpfc_ctx_cmd abort_cmd)
2575{ 2538{
2576 struct lpfc_iocbq *iocb, *next_iocb; 2539 struct lpfc_iocbq *iocbq;
2577 struct lpfc_iocbq *abtsiocb = NULL; 2540 struct lpfc_iocbq *abtsiocb;
2578 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
2579 IOCB_t *cmd = NULL; 2541 IOCB_t *cmd = NULL;
2580 struct lpfc_scsi_buf *lpfc_cmd;
2581 int errcnt = 0, ret_val = 0; 2542 int errcnt = 0, ret_val = 0;
2543 int i;
2582 2544
2583 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 2545 for (i = 1; i <= phba->sli.last_iotag; i++) {
2584 cmd = &iocb->iocb; 2546 iocbq = phba->sli.iocbq_lookup[i];
2585
2586 /* Must be a FCP command */
2587 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2588 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2589 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2590 continue;
2591 }
2592 2547
2593 /* context1 MUST be a struct lpfc_scsi_buf */ 2548 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2594 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1); 2549 0, abort_cmd) != 0)
2595 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
2596 iocb, ctx, abort_cmd);
2597 if (ret_val != 0)
2598 continue; 2550 continue;
2599 2551
2600 /* issue ABTS for this IOCB based on iotag */ 2552 /* issue ABTS for this IOCB based on iotag */
2601 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, 2553 abtsiocb = lpfc_sli_get_iocbq(phba);
2602 list);
2603 if (abtsiocb == NULL) { 2554 if (abtsiocb == NULL) {
2604 errcnt++; 2555 errcnt++;
2605 continue; 2556 continue;
2606 } 2557 }
2607 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
2608 2558
2559 cmd = &iocbq->iocb;
2609 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 2560 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2610 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 2561 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2611 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 2562 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
@@ -2621,7 +2572,7 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2621 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 2572 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2622 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 2573 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2623 if (ret_val == IOCB_ERROR) { 2574 if (ret_val == IOCB_ERROR) {
2624 list_add_tail(&abtsiocb->list, lpfc_iocb_list); 2575 lpfc_sli_release_iocbq(phba, abtsiocb);
2625 errcnt++; 2576 errcnt++;
2626 continue; 2577 continue;
2627 } 2578 }
@@ -2630,83 +2581,99 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2630 return errcnt; 2581 return errcnt;
2631} 2582}
2632 2583
2633void 2584static void
2634lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba, 2585lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
2635 struct lpfc_iocbq * queue1, 2586 struct lpfc_iocbq *cmdiocbq,
2636 struct lpfc_iocbq * queue2) 2587 struct lpfc_iocbq *rspiocbq)
2637{ 2588{
2638 if (queue1->context2 && queue2) 2589 wait_queue_head_t *pdone_q;
2639 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq)); 2590 unsigned long iflags;
2591
2592 spin_lock_irqsave(phba->host->host_lock, iflags);
2593 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2594 if (cmdiocbq->context2 && rspiocbq)
2595 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2596 &rspiocbq->iocb, sizeof(IOCB_t));
2640 2597
2641 /* The waiter is looking for LPFC_IO_HIPRI bit to be set 2598 pdone_q = cmdiocbq->context_un.wait_queue;
2642 as a signal to wake up */ 2599 spin_unlock_irqrestore(phba->host->host_lock, iflags);
2643 queue1->iocb_flag |= LPFC_IO_HIPRI; 2600 if (pdone_q)
2601 wake_up(pdone_q);
2644 return; 2602 return;
2645} 2603}
2646 2604
2605/*
2606 * Issue the caller's iocb and wait for its completion, but no longer than the
2607 * caller's timeout. Note that iocb_flags is cleared before the
2608 * lpfc_sli_issue_call since the wake routine sets a unique value and by
2609 * definition this is a wait function.
2610 */
2647int 2611int
2648lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba, 2612lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
2649 struct lpfc_sli_ring * pring, 2613 struct lpfc_sli_ring * pring,
2650 struct lpfc_iocbq * piocb, 2614 struct lpfc_iocbq * piocb,
2651 uint32_t flag, 2615 struct lpfc_iocbq * prspiocbq,
2652 struct lpfc_iocbq * prspiocbq, 2616 uint32_t timeout)
2653 uint32_t timeout)
2654{ 2617{
2655 int j, delay_time, retval = IOCB_ERROR; 2618 DECLARE_WAIT_QUEUE_HEAD(done_q);
2656 2619 long timeleft, timeout_req = 0;
2657 /* The caller must left context1 empty. */ 2620 int retval = IOCB_SUCCESS;
2658 if (piocb->context_un.hipri_wait_queue != 0) {
2659 return IOCB_ERROR;
2660 }
2661 2621
2662 /* 2622 /*
2663 * If the caller has provided a response iocbq buffer, context2 must 2623 * If the caller has provided a response iocbq buffer, then context2
2664 * be NULL or its an error. 2624 * is NULL or its an error.
2665 */ 2625 */
2666 if (prspiocbq && piocb->context2) { 2626 if (prspiocbq) {
2667 return IOCB_ERROR; 2627 if (piocb->context2)
2628 return IOCB_ERROR;
2629 piocb->context2 = prspiocbq;
2668 } 2630 }
2669 2631
2670 piocb->context2 = prspiocbq; 2632 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
2633 piocb->context_un.wait_queue = &done_q;
2634 piocb->iocb_flag &= ~LPFC_IO_WAKE;
2671 2635
2672 /* Setup callback routine and issue the command. */ 2636 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
2673 piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority; 2637 if (retval == IOCB_SUCCESS) {
2674 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 2638 timeout_req = timeout * HZ;
2675 flag | SLI_IOCB_HIGH_PRIORITY); 2639 spin_unlock_irq(phba->host->host_lock);
2676 if (retval != IOCB_SUCCESS) { 2640 timeleft = wait_event_timeout(done_q,
2677 piocb->context2 = NULL; 2641 piocb->iocb_flag & LPFC_IO_WAKE,
2678 return IOCB_ERROR; 2642 timeout_req);
2679 } 2643 spin_lock_irq(phba->host->host_lock);
2680
2681 /*
2682 * This high-priority iocb was sent out-of-band. Poll for its
2683 * completion rather than wait for a signal. Note that the host_lock
2684 * is held by the midlayer and must be released here to allow the
2685 * interrupt handlers to complete the IO and signal this routine via
2686 * the iocb_flag.
2687 * Also, the delay_time is computed to be one second longer than
2688 * the scsi command timeout to give the FW time to abort on
2689 * timeout rather than the driver just giving up. Typically,
2690 * the midlayer does not specify a time for this command so the
2691 * driver is free to enforce its own timeout.
2692 */
2693 2644
2694 delay_time = ((timeout + 1) * 1000) >> 6; 2645 if (timeleft == 0) {
2695 retval = IOCB_ERROR; 2646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2696 spin_unlock_irq(phba->host->host_lock); 2647 "%d:0329 IOCB wait timeout error - no "
2697 for (j = 0; j < 64; j++) { 2648 "wake response Data x%x\n",
2698 msleep(delay_time); 2649 phba->brd_no, timeout);
2699 if (piocb->iocb_flag & LPFC_IO_HIPRI) { 2650 retval = IOCB_TIMEDOUT;
2700 piocb->iocb_flag &= ~LPFC_IO_HIPRI; 2651 } else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
2701 retval = IOCB_SUCCESS; 2652 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2702 break; 2653 "%d:0330 IOCB wake NOT set, "
2654 "Data x%x x%lx\n", phba->brd_no,
2655 timeout, (timeleft / jiffies));
2656 retval = IOCB_TIMEDOUT;
2657 } else {
2658 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2659 "%d:0331 IOCB wake signaled\n",
2660 phba->brd_no);
2703 } 2661 }
2662 } else {
2663 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2664 "%d:0332 IOCB wait issue failed, Data x%x\n",
2665 phba->brd_no, retval);
2666 retval = IOCB_ERROR;
2704 } 2667 }
2705 2668
2706 spin_lock_irq(phba->host->host_lock); 2669 if (prspiocbq)
2707 piocb->context2 = NULL; 2670 piocb->context2 = NULL;
2671
2672 piocb->context_un.wait_queue = NULL;
2673 piocb->iocb_cmpl = NULL;
2708 return retval; 2674 return retval;
2709} 2675}
2676
2710int 2677int
2711lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 2678lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
2712 uint32_t timeout) 2679 uint32_t timeout)
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6c74f3c85ff7..b7a9f970f565 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -33,13 +33,15 @@ typedef enum _lpfc_ctx_cmd {
33struct lpfc_iocbq { 33struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 34 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 35 struct list_head list;
36 uint16_t iotag; /* pre-assigned IO tag */
37 uint16_t rsvd1;
38
36 IOCB_t iocb; /* IOCB cmd */ 39 IOCB_t iocb; /* IOCB cmd */
37 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 40 uint8_t retry; /* retry counter for IOCB cmd - if needed */
38 uint8_t iocb_flag; 41 uint8_t iocb_flag;
39#define LPFC_IO_POLL 1 /* Polling mode iocb */ 42#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
40#define LPFC_IO_LIBDFC 2 /* libdfc iocb */ 43#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
41#define LPFC_IO_WAIT 4 44#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
42#define LPFC_IO_HIPRI 8 /* High Priority Queue signal flag */
43 45
44 uint8_t abort_count; 46 uint8_t abort_count;
45 uint8_t rsvd2; 47 uint8_t rsvd2;
@@ -48,8 +50,7 @@ struct lpfc_iocbq {
48 void *context2; /* caller context information */ 50 void *context2; /* caller context information */
49 void *context3; /* caller context information */ 51 void *context3; /* caller context information */
50 union { 52 union {
51 wait_queue_head_t *hipri_wait_queue; /* High Priority Queue wait 53 wait_queue_head_t *wait_queue;
52 queue */
53 struct lpfc_iocbq *rsp_iocb; 54 struct lpfc_iocbq *rsp_iocb;
54 struct lpfcMboxq *mbox; 55 struct lpfcMboxq *mbox;
55 } context_un; 56 } context_un;
@@ -125,10 +126,10 @@ struct lpfc_sli_ring {
125 126
126 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */ 127 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
127 uint32_t next_cmdidx; /* next_cmd index */ 128 uint32_t next_cmdidx; /* next_cmd index */
129 uint32_t rspidx; /* current index in response ring */
130 uint32_t cmdidx; /* current index in command ring */
128 uint8_t rsvd; 131 uint8_t rsvd;
129 uint8_t ringno; /* ring number */ 132 uint8_t ringno; /* ring number */
130 uint8_t rspidx; /* current index in response ring */
131 uint8_t cmdidx; /* current index in command ring */
132 uint16_t numCiocb; /* number of command iocb's per ring */ 133 uint16_t numCiocb; /* number of command iocb's per ring */
133 uint16_t numRiocb; /* number of rsp iocb's per ring */ 134 uint16_t numRiocb; /* number of rsp iocb's per ring */
134 135
@@ -200,6 +201,11 @@ struct lpfc_sli {
200 cmd */ 201 cmd */
201 202
202 uint32_t *MBhostaddr; /* virtual address for mbox cmds */ 203 uint32_t *MBhostaddr; /* virtual address for mbox cmds */
204
205#define LPFC_IOCBQ_LOOKUP_INCREMENT 1024
206 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
207 size_t iocbq_lookup_len; /* current lengs of the array */
208 uint16_t last_iotag; /* last allocated IOTAG */
203}; 209};
204 210
205/* Given a pointer to the start of the ring, and the slot number of 211/* Given a pointer to the start of the ring, and the slot number of
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 7e6747b06f90..4f0466fbd5f2 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.0.30" 21#define LPFC_DRIVER_VERSION "8.1.0"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24