aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2009-04-06 18:48:10 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-04-27 10:42:47 -0400
commita257bf905efd22fd2c055580b0ab2e8e7ed1b6a1 (patch)
treee0f2ef6b9627d86456c92d56fa2c088c6d6544bf /drivers/scsi/lpfc
parent3621a710a7dbb2d22a8e95d94bcf0c2d13ef57fc (diff)
[SCSI] lpfc 8.3.1: misc fixes/changes
8.3.1 Fixes/Changes : - Fix incorrect byte-swapping on word 4 of IOCB (data length) which caused LUNs to not be discovered on big-endian (e.g. PPC) - Remove a bad cast of MBslimaddr which loses the __iomem (sparse) - Make lpfc_debugfs_mask_disc_trc static (sparse) - Correct misspelled word BlockGuard in lpfc_logmsg.h comment - Replaced repeated code segment for canceling IOCBs from a list with a function call, lpfc_sli_cancel_iocbs(). - Increased HBQ buffers to support 40KB SSC sequences. - Added sysfs interface to update speed and topology parameter without link bounce. - Fixed bug with sysfs fc_host WWNs not being updated after changing the WWNs. - Check if the active mailbox is NULL in the beginning of the mailbox timeout handler - fixes panic in the mailbox timeout handler while running IO stress test - Fixed system panic in lpfc_pci_remove_one() due to ndlp indirect reference to phba through vport - Removed de-reference of scsi device after call to scsi_done() to fix panic in scsi completion path while accessing scsi device after scsi_done is called. - Fixed "Nodelist not empty" message when unloading the driver after target reboot test - Added LP2105 HBA model description - Added code to print all 16 words of unrecognized ASYNC events - Fixed memory leak in vport create + delete loop - Added support for handling dual error bit from HBA - Fixed a driver NULL pointer dereference in lpfc_sli_process_sol_iocb - Fixed a discovery bug with FC switch reboot in lpfc_setup_disc_node - Take NULL termintator into account when calculating available buffer space Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c67
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c75
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c104
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c221
12 files changed, 378 insertions, 246 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index dcba267db711..1105f9a111ba 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -443,6 +443,7 @@ struct lpfc_hba {
443 uint32_t hba_flag; /* hba generic flags */ 443 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 445
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */
446 struct lpfc_dmabuf slim2p; 447 struct lpfc_dmabuf slim2p;
447 448
448 MAILBOX_t *mbox; 449 MAILBOX_t *mbox;
@@ -723,4 +724,3 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
723 724
724 return; 725 return;
725} 726}
726
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 69296cd6e9d7..c14f0cbdb125 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2216,18 +2216,41 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
2216 * non-zero return value from lpfc_issue_lip() 2216 * non-zero return value from lpfc_issue_lip()
2217 * -EINVAL val out of range 2217 * -EINVAL val out of range
2218 **/ 2218 **/
2219static int 2219static ssize_t
2220lpfc_topology_set(struct lpfc_hba *phba, int val) 2220lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2221 const char *buf, size_t count)
2221{ 2222{
2223 struct Scsi_Host *shost = class_to_shost(dev);
2224 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2225 struct lpfc_hba *phba = vport->phba;
2226 int val = 0;
2227 int nolip = 0;
2228 const char *val_buf = buf;
2222 int err; 2229 int err;
2223 uint32_t prev_val; 2230 uint32_t prev_val;
2231
2232 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
2233 nolip = 1;
2234 val_buf = &buf[strlen("nolip ")];
2235 }
2236
2237 if (!isdigit(val_buf[0]))
2238 return -EINVAL;
2239 if (sscanf(val_buf, "%i", &val) != 1)
2240 return -EINVAL;
2241
2224 if (val >= 0 && val <= 6) { 2242 if (val >= 0 && val <= 6) {
2225 prev_val = phba->cfg_topology; 2243 prev_val = phba->cfg_topology;
2226 phba->cfg_topology = val; 2244 phba->cfg_topology = val;
2245 if (nolip)
2246 return strlen(buf);
2247
2227 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2248 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2228 if (err) 2249 if (err) {
2229 phba->cfg_topology = prev_val; 2250 phba->cfg_topology = prev_val;
2230 return err; 2251 return -EINVAL;
2252 } else
2253 return strlen(buf);
2231 } 2254 }
2232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2233 "%d:0467 lpfc_topology attribute cannot be set to %d, " 2256 "%d:0467 lpfc_topology attribute cannot be set to %d, "
@@ -2240,7 +2263,6 @@ module_param(lpfc_topology, int, 0);
2240MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); 2263MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
2241lpfc_param_show(topology) 2264lpfc_param_show(topology)
2242lpfc_param_init(topology, 0, 0, 6) 2265lpfc_param_init(topology, 0, 0, 6)
2243lpfc_param_store(topology)
2244static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2245 lpfc_topology_show, lpfc_topology_store); 2267 lpfc_topology_show, lpfc_topology_store);
2246 2268
@@ -2281,7 +2303,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2281 unsigned long base, step, bucket_type; 2303 unsigned long base, step, bucket_type;
2282 2304
2283 if (!strncmp(buf, "setbucket", strlen("setbucket"))) { 2305 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
2284 if (strlen(buf) > LPFC_MAX_DATA_CTRL_LEN) 2306 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
2285 return -EINVAL; 2307 return -EINVAL;
2286 2308
2287 strcpy(bucket_data, buf); 2309 strcpy(bucket_data, buf);
@@ -2598,12 +2620,29 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
2598 * non-zero return value from lpfc_issue_lip() 2620 * non-zero return value from lpfc_issue_lip()
2599 * -EINVAL val out of range 2621 * -EINVAL val out of range
2600 **/ 2622 **/
2601static int 2623static ssize_t
2602lpfc_link_speed_set(struct lpfc_hba *phba, int val) 2624lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
2625 const char *buf, size_t count)
2603{ 2626{
2627 struct Scsi_Host *shost = class_to_shost(dev);
2628 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2629 struct lpfc_hba *phba = vport->phba;
2630 int val = 0;
2631 int nolip = 0;
2632 const char *val_buf = buf;
2604 int err; 2633 int err;
2605 uint32_t prev_val; 2634 uint32_t prev_val;
2606 2635
2636 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
2637 nolip = 1;
2638 val_buf = &buf[strlen("nolip ")];
2639 }
2640
2641 if (!isdigit(val_buf[0]))
2642 return -EINVAL;
2643 if (sscanf(val_buf, "%i", &val) != 1)
2644 return -EINVAL;
2645
2607 if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 2646 if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
2608 ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 2647 ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
2609 ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 2648 ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
@@ -2611,14 +2650,19 @@ lpfc_link_speed_set(struct lpfc_hba *phba, int val)
2611 ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) 2650 ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)))
2612 return -EINVAL; 2651 return -EINVAL;
2613 2652
2614 if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED) 2653 if ((val >= 0 && val <= 8)
2615 && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { 2654 && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
2616 prev_val = phba->cfg_link_speed; 2655 prev_val = phba->cfg_link_speed;
2617 phba->cfg_link_speed = val; 2656 phba->cfg_link_speed = val;
2657 if (nolip)
2658 return strlen(buf);
2659
2618 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2660 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2619 if (err) 2661 if (err) {
2620 phba->cfg_link_speed = prev_val; 2662 phba->cfg_link_speed = prev_val;
2621 return err; 2663 return -EINVAL;
2664 } else
2665 return strlen(buf);
2622 } 2666 }
2623 2667
2624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2665,7 +2709,6 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
2665 return -EINVAL; 2709 return -EINVAL;
2666} 2710}
2667 2711
2668lpfc_param_store(link_speed)
2669static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, 2712static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
2670 lpfc_link_speed_show, lpfc_link_speed_store); 2713 lpfc_link_speed_show, lpfc_link_speed_store);
2671 2714
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 07f4976319a5..f88ce3f26190 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -184,6 +184,8 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
184struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 184struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
185void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); 185void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t);
187 189
188void lpfc_reset_barrier(struct lpfc_hba * phba); 190void lpfc_reset_barrier(struct lpfc_hba * phba);
189int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index abb870e595b3..52be5644e07a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -95,7 +95,7 @@ module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
95MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, 95MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
96 "Set debugfs slow ring trace depth"); 96 "Set debugfs slow ring trace depth");
97 97
98int lpfc_debugfs_mask_disc_trc; 98static int lpfc_debugfs_mask_disc_trc;
99module_param(lpfc_debugfs_mask_disc_trc, int, 0); 99module_param(lpfc_debugfs_mask_disc_trc, int, 0);
100MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, 100MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
101 "Set debugfs discovery trace mask"); 101 "Set debugfs discovery trace mask");
@@ -399,8 +399,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
399 399
400 len += snprintf(buf+len, size-len, "HBA SLIM\n"); 400 len += snprintf(buf+len, size-len, "HBA SLIM\n");
401 lpfc_memcpy_from_slim(buffer, 401 lpfc_memcpy_from_slim(buffer,
402 ((uint8_t *)phba->MBslimaddr) + lpfc_debugfs_last_hba_slim_off, 402 phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
403 1024);
404 403
405 ptr = (uint32_t *)&buffer[0]; 404 ptr = (uint32_t *)&buffer[0];
406 off = lpfc_debugfs_last_hba_slim_off; 405 off = lpfc_debugfs_last_hba_slim_off;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f29e548a90d1..ffd108972072 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -99,6 +99,7 @@ struct lpfc_nodelist {
99#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */ 99#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
100 100
101 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 101 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
102 struct lpfc_hba *phba;
102 struct fc_rport *rport; /* Corresponding FC transport 103 struct fc_rport *rport; /* Corresponding FC transport
103 port structure */ 104 port structure */
104 struct lpfc_vport *vport; 105 struct lpfc_vport *vport;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d32c98bcf0c5..b8b34cf5c3d2 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5058,19 +5058,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
5058 } 5058 }
5059 spin_unlock_irq(&phba->hbalock); 5059 spin_unlock_irq(&phba->hbalock);
5060 5060
5061 while (!list_empty(&completions)) { 5061 /* Cancell all the IOCBs from the completions list */
5062 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 5062 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5063 cmd = &piocb->iocb; 5063 IOERR_SLI_ABORTED);
5064 list_del_init(&piocb->list);
5065
5066 if (!piocb->iocb_cmpl)
5067 lpfc_sli_release_iocbq(phba, piocb);
5068 else {
5069 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5070 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5071 (piocb->iocb_cmpl) (phba, piocb, piocb);
5072 }
5073 }
5074 5064
5075 return; 5065 return;
5076} 5066}
@@ -5121,18 +5111,11 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
5121 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 5111 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5122 } 5112 }
5123 spin_unlock_irq(&phba->hbalock); 5113 spin_unlock_irq(&phba->hbalock);
5124 while (!list_empty(&completions)) { 5114
5125 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 5115 /* Cancel all the IOCBs from the completions list */
5126 cmd = &piocb->iocb; 5116 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5127 list_del_init(&piocb->list); 5117 IOERR_SLI_ABORTED);
5128 if (!piocb->iocb_cmpl) 5118
5129 lpfc_sli_release_iocbq(phba, piocb);
5130 else {
5131 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
5132 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
5133 (piocb->iocb_cmpl) (phba, piocb, piocb);
5134 }
5135 }
5136 return; 5119 return;
5137} 5120}
5138 5121
@@ -6468,7 +6451,6 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
6468 LIST_HEAD(completions); 6451 LIST_HEAD(completions);
6469 struct lpfc_hba *phba = vport->phba; 6452 struct lpfc_hba *phba = vport->phba;
6470 struct lpfc_iocbq *tmp_iocb, *piocb; 6453 struct lpfc_iocbq *tmp_iocb, *piocb;
6471 IOCB_t *cmd;
6472 6454
6473 spin_lock_irq(&phba->hbalock); 6455 spin_lock_irq(&phba->hbalock);
6474 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 6456 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
@@ -6481,15 +6463,9 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
6481 } 6463 }
6482 spin_unlock_irq(&phba->hbalock); 6464 spin_unlock_irq(&phba->hbalock);
6483 6465
6484 while (!list_empty(&completions)) { 6466 /* Cancel all the IOCBs from the completions list */
6485 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 6467 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6486 list_del_init(&piocb->list); 6468 IOERR_SLI_ABORTED);
6487
6488 cmd = &piocb->iocb;
6489 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6490 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6491 (piocb->iocb_cmpl) (phba, piocb, piocb);
6492 }
6493} 6469}
6494 6470
6495/** 6471/**
@@ -6506,10 +6482,9 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
6506void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 6482void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6507{ 6483{
6508 LIST_HEAD(completions); 6484 LIST_HEAD(completions);
6509 struct lpfc_hba *phba = ndlp->vport->phba; 6485 struct lpfc_hba *phba = ndlp->phba;
6510 struct lpfc_iocbq *tmp_iocb, *piocb; 6486 struct lpfc_iocbq *tmp_iocb, *piocb;
6511 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 6487 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6512 IOCB_t *cmd;
6513 6488
6514 spin_lock_irq(&phba->hbalock); 6489 spin_lock_irq(&phba->hbalock);
6515 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 6490 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
@@ -6521,15 +6496,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6521 } 6496 }
6522 spin_unlock_irq(&phba->hbalock); 6497 spin_unlock_irq(&phba->hbalock);
6523 6498
6524 while (!list_empty(&completions)) { 6499 /* Cancel all the IOCBs from the completions list */
6525 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 6500 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6526 list_del_init(&piocb->list); 6501 IOERR_SLI_ABORTED);
6527
6528 cmd = &piocb->iocb;
6529 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6530 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6531 (piocb->iocb_cmpl) (phba, piocb, piocb);
6532 }
6533} 6502}
6534 6503
6535/** 6504/**
@@ -6546,20 +6515,12 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6546void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 6515void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6547{ 6516{
6548 LIST_HEAD(completions); 6517 LIST_HEAD(completions);
6549 struct lpfc_iocbq *piocb;
6550 IOCB_t *cmd;
6551 6518
6552 spin_lock_irq(&phba->hbalock); 6519 spin_lock_irq(&phba->hbalock);
6553 list_splice_init(&phba->fabric_iocb_list, &completions); 6520 list_splice_init(&phba->fabric_iocb_list, &completions);
6554 spin_unlock_irq(&phba->hbalock); 6521 spin_unlock_irq(&phba->hbalock);
6555 6522
6556 while (!list_empty(&completions)) { 6523 /* Cancel all the IOCBs from the completions list */
6557 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6558 list_del_init(&piocb->list); 6525 IOERR_SLI_ABORTED);
6559
6560 cmd = &piocb->iocb;
6561 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6562 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6563 (piocb->iocb_cmpl) (phba, piocb, piocb);
6564 }
6565} 6526}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9373a9e7485a..e764ce0bf704 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -78,7 +78,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
78 return; 78 return;
79 } 79 }
80 80
81 phba = ndlp->vport->phba; 81 phba = ndlp->phba;
82 82
83 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 83 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
84 "rport terminate: sid:x%x did:x%x flg:x%x", 84 "rport terminate: sid:x%x did:x%x flg:x%x",
@@ -1862,9 +1862,14 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1862 * @vport: Pointer to Virtual Port object. 1862 * @vport: Pointer to Virtual Port object.
1863 * @ndlp: Pointer to FC node object. 1863 * @ndlp: Pointer to FC node object.
1864 * @did: FC_ID of the node. 1864 * @did: FC_ID of the node.
1865 * This function is always called when node object need to 1865 *
1866 * be initialized. It initializes all the fields of the node 1866 * This function is always called when node object need to be initialized.
1867 * object. 1867 * It initializes all the fields of the node object. Although the reference
1868 * to phba from @ndlp can be obtained indirectly through it's reference to
1869 * @vport, a direct reference to phba is taken here by @ndlp. This is due
1870 * to the life-span of the @ndlp might go beyond the existence of @vport as
1871 * the final release of ndlp is determined by its reference count. And, the
1872 * operation on @ndlp needs the reference to phba.
1868 **/ 1873 **/
1869static inline void 1874static inline void
1870lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1875lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@@ -1877,6 +1882,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1877 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 1882 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1878 ndlp->nlp_DID = did; 1883 ndlp->nlp_DID = did;
1879 ndlp->vport = vport; 1884 ndlp->vport = vport;
1885 ndlp->phba = vport->phba;
1880 ndlp->nlp_sid = NLP_NO_SID; 1886 ndlp->nlp_sid = NLP_NO_SID;
1881 kref_init(&ndlp->kref); 1887 kref_init(&ndlp->kref);
1882 NLP_INT_NODE_ACT(ndlp); 1888 NLP_INT_NODE_ACT(ndlp);
@@ -2086,7 +2092,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2086 struct lpfc_sli *psli; 2092 struct lpfc_sli *psli;
2087 struct lpfc_sli_ring *pring; 2093 struct lpfc_sli_ring *pring;
2088 struct lpfc_iocbq *iocb, *next_iocb; 2094 struct lpfc_iocbq *iocb, *next_iocb;
2089 IOCB_t *icmd;
2090 uint32_t rpi, i; 2095 uint32_t rpi, i;
2091 2096
2092 lpfc_fabric_abort_nport(ndlp); 2097 lpfc_fabric_abort_nport(ndlp);
@@ -2122,19 +2127,9 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2122 } 2127 }
2123 } 2128 }
2124 2129
2125 while (!list_empty(&completions)) { 2130 /* Cancel all the IOCBs from the completions list */
2126 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2131 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
2127 list_del_init(&iocb->list); 2132 IOERR_SLI_ABORTED);
2128
2129 if (!iocb->iocb_cmpl)
2130 lpfc_sli_release_iocbq(phba, iocb);
2131 else {
2132 icmd = &iocb->iocb;
2133 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2134 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2135 (iocb->iocb_cmpl)(phba, iocb, iocb);
2136 }
2137 }
2138 2133
2139 return 0; 2134 return 0;
2140} 2135}
@@ -2186,9 +2181,13 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
2186 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2181 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2187 mbox->context1 = NULL; 2182 mbox->context1 = NULL;
2188 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2183 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2189 if (rc == MBX_NOT_FINISHED) { 2184 if (rc != MBX_TIMEOUT)
2190 mempool_free(mbox, phba->mbox_mem_pool); 2185 mempool_free(mbox, phba->mbox_mem_pool);
2191 } 2186
2187 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
2188 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
2189 "1836 Could not issue "
2190 "unreg_login(all_rpis) status %d\n", rc);
2192 } 2191 }
2193} 2192}
2194 2193
@@ -2206,12 +2205,14 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
2206 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2205 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2207 mbox->context1 = NULL; 2206 mbox->context1 = NULL;
2208 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2207 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2209 if (rc == MBX_NOT_FINISHED) { 2208 if (rc != MBX_TIMEOUT)
2209 mempool_free(mbox, phba->mbox_mem_pool);
2210
2211 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
2210 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 2212 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
2211 "1815 Could not issue " 2213 "1815 Could not issue "
2212 "unreg_did (default rpis)\n"); 2214 "unreg_did (default rpis) status %d\n",
2213 mempool_free(mbox, phba->mbox_mem_pool); 2215 rc);
2214 }
2215 } 2216 }
2216} 2217}
2217 2218
@@ -2470,14 +2471,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2470 if (ndlp->nlp_flag & NLP_RCV_PLOGI) 2471 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2471 return NULL; 2472 return NULL;
2472 2473
2473 spin_lock_irq(shost->host_lock);
2474 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2475 spin_unlock_irq(shost->host_lock);
2476
2477 /* Since this node is marked for discovery, 2474 /* Since this node is marked for discovery,
2478 * delay timeout is not needed. 2475 * delay timeout is not needed.
2479 */ 2476 */
2480 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2477 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2478 spin_lock_irq(shost->host_lock);
2479 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2480 spin_unlock_irq(shost->host_lock);
2481 } else 2481 } else
2482 ndlp = NULL; 2482 ndlp = NULL;
2483 } else { 2483 } else {
@@ -2740,19 +2740,9 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2740 } 2740 }
2741 spin_unlock_irq(&phba->hbalock); 2741 spin_unlock_irq(&phba->hbalock);
2742 2742
2743 while (!list_empty(&completions)) { 2743 /* Cancel all the IOCBs from the completions list */
2744 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2744 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
2745 list_del_init(&iocb->list); 2745 IOERR_SLI_ABORTED);
2746
2747 if (!iocb->iocb_cmpl)
2748 lpfc_sli_release_iocbq(phba, iocb);
2749 else {
2750 icmd = &iocb->iocb;
2751 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2752 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2753 (iocb->iocb_cmpl) (phba, iocb, iocb);
2754 }
2755 }
2756} 2746}
2757 2747
2758static void 2748static void
@@ -3173,7 +3163,7 @@ lpfc_nlp_release(struct kref *kref)
3173 lpfc_nlp_remove(ndlp->vport, ndlp); 3163 lpfc_nlp_remove(ndlp->vport, ndlp);
3174 3164
3175 /* clear the ndlp active flag for all release cases */ 3165 /* clear the ndlp active flag for all release cases */
3176 phba = ndlp->vport->phba; 3166 phba = ndlp->phba;
3177 spin_lock_irqsave(&phba->ndlp_lock, flags); 3167 spin_lock_irqsave(&phba->ndlp_lock, flags);
3178 NLP_CLR_NODE_ACT(ndlp); 3168 NLP_CLR_NODE_ACT(ndlp);
3179 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3169 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
@@ -3181,7 +3171,7 @@ lpfc_nlp_release(struct kref *kref)
3181 /* free ndlp memory for final ndlp release */ 3171 /* free ndlp memory for final ndlp release */
3182 if (NLP_CHK_FREE_REQ(ndlp)) { 3172 if (NLP_CHK_FREE_REQ(ndlp)) {
3183 kfree(ndlp->lat_data); 3173 kfree(ndlp->lat_data);
3184 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3174 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
3185 } 3175 }
3186} 3176}
3187 3177
@@ -3204,7 +3194,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
3204 * ndlp reference count that is in the process of being 3194 * ndlp reference count that is in the process of being
3205 * released. 3195 * released.
3206 */ 3196 */
3207 phba = ndlp->vport->phba; 3197 phba = ndlp->phba;
3208 spin_lock_irqsave(&phba->ndlp_lock, flags); 3198 spin_lock_irqsave(&phba->ndlp_lock, flags);
3209 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { 3199 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
3210 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3200 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
@@ -3240,7 +3230,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
3240 "node put: did:x%x flg:x%x refcnt:x%x", 3230 "node put: did:x%x flg:x%x refcnt:x%x",
3241 ndlp->nlp_DID, ndlp->nlp_flag, 3231 ndlp->nlp_DID, ndlp->nlp_flag,
3242 atomic_read(&ndlp->kref.refcount)); 3232 atomic_read(&ndlp->kref.refcount));
3243 phba = ndlp->vport->phba; 3233 phba = ndlp->phba;
3244 spin_lock_irqsave(&phba->ndlp_lock, flags); 3234 spin_lock_irqsave(&phba->ndlp_lock, flags);
3245 /* Check the ndlp memory free acknowledge flag to avoid the 3235 /* Check the ndlp memory free acknowledge flag to avoid the
3246 * possible race condition that kref_put got invoked again 3236 * possible race condition that kref_put got invoked again
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e3645e8f3487..86d1bdcbf2d8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -302,6 +302,7 @@ int
302lpfc_config_port_post(struct lpfc_hba *phba) 302lpfc_config_port_post(struct lpfc_hba *phba)
303{ 303{
304 struct lpfc_vport *vport = phba->pport; 304 struct lpfc_vport *vport = phba->pport;
305 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
305 LPFC_MBOXQ_t *pmb; 306 LPFC_MBOXQ_t *pmb;
306 MAILBOX_t *mb; 307 MAILBOX_t *mb;
307 struct lpfc_dmabuf *mp; 308 struct lpfc_dmabuf *mp;
@@ -359,6 +360,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
359 sizeof (struct lpfc_name)); 360 sizeof (struct lpfc_name));
360 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 361 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
361 sizeof (struct lpfc_name)); 362 sizeof (struct lpfc_name));
363
364 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
367
362 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 368 /* If no serial number in VPD data, use low 6 bytes of WWNN */
363 /* This should be consolidated into parse_vpd ? - mr */ 369 /* This should be consolidated into parse_vpd ? - mr */
364 if (phba->SerialNumber[0] == 0) { 370 if (phba->SerialNumber[0] == 0) {
@@ -598,8 +604,6 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
598 struct lpfc_sli *psli = &phba->sli; 604 struct lpfc_sli *psli = &phba->sli;
599 struct lpfc_sli_ring *pring; 605 struct lpfc_sli_ring *pring;
600 struct lpfc_dmabuf *mp, *next_mp; 606 struct lpfc_dmabuf *mp, *next_mp;
601 struct lpfc_iocbq *iocb;
602 IOCB_t *cmd = NULL;
603 LIST_HEAD(completions); 607 LIST_HEAD(completions);
604 int i; 608 int i;
605 609
@@ -627,20 +631,9 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
627 pring->txcmplq_cnt = 0; 631 pring->txcmplq_cnt = 0;
628 spin_unlock_irq(&phba->hbalock); 632 spin_unlock_irq(&phba->hbalock);
629 633
630 while (!list_empty(&completions)) { 634 /* Cancel all the IOCBs from the completions list */
631 iocb = list_get_first(&completions, struct lpfc_iocbq, 635 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
632 list); 636 IOERR_SLI_ABORTED);
633 cmd = &iocb->iocb;
634 list_del_init(&iocb->list);
635
636 if (!iocb->iocb_cmpl)
637 lpfc_sli_release_iocbq(phba, iocb);
638 else {
639 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
640 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
641 (iocb->iocb_cmpl) (phba, iocb, iocb);
642 }
643 }
644 637
645 lpfc_sli_abort_iocb_ring(phba, pring); 638 lpfc_sli_abort_iocb_ring(phba, pring);
646 spin_lock_irq(&phba->hbalock); 639 spin_lock_irq(&phba->hbalock);
@@ -856,6 +849,72 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
856} 849}
857 850
858/** 851/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure.
854 *
855 * This routine is invoked to handle the deferred HBA hardware error
856 * conditions. This type of error is indicated by HBA by setting ER1
857 * and another ER bit in the host status register. The driver will
858 * wait until the ER1 bit clears before handling the error condition.
859 **/
860static void
861lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
862{
863 uint32_t old_host_status = phba->work_hs;
864 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli;
866
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n",
870 phba->work_hs,
871 phba->work_status[0], phba->work_status[1]);
872
873 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
875 spin_unlock_irq(&phba->hbalock);
876
877
878 /*
879 * Firmware stops when it triggred erratt. That could cause the I/Os
880 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
881 * SCSI layer retry it after re-establishing link.
882 */
883 pring = &psli->ring[psli->fcp_ring];
884 lpfc_sli_abort_iocb_ring(phba, pring);
885
886 /*
887 * There was a firmware error. Take the hba offline and then
888 * attempt to restart it.
889 */
890 lpfc_offline_prep(phba);
891 lpfc_offline(phba);
892
893 /* Wait for the ER1 bit to clear.*/
894 while (phba->work_hs & HS_FFER1) {
895 msleep(100);
896 phba->work_hs = readl(phba->HSregaddr);
897 /* If driver is unloading let the worker thread continue */
898 if (phba->pport->load_flag & FC_UNLOADING) {
899 phba->work_hs = 0;
900 break;
901 }
902 }
903
904 /*
905 * This is to ptrotect against a race condition in which
906 * first write to the host attention register clear the
907 * host status register.
908 */
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1;
911
912 phba->hba_flag &= ~DEFER_ERATT;
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915}
916
917/**
859 * lpfc_handle_eratt - The HBA hardware error handler 918 * lpfc_handle_eratt - The HBA hardware error handler
860 * @phba: pointer to lpfc hba data structure. 919 * @phba: pointer to lpfc hba data structure.
861 * 920 *
@@ -894,6 +953,9 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
894 (char *) &board_event, 953 (char *) &board_event,
895 LPFC_NL_VENDOR_ID); 954 LPFC_NL_VENDOR_ID);
896 955
956 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba);
958
897 if (phba->work_hs & HS_FFER6) { 959 if (phba->work_hs & HS_FFER6) {
898 /* Re-establishing Link */ 960 /* Re-establishing Link */
899 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 961 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1321,7 +1383,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1321 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1383 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1322 break; 1384 break;
1323 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1385 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1324 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; 1386 m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1387 GE = 1;
1325 break; 1388 break;
1326 case PCI_DEVICE_ID_ZMID: 1389 case PCI_DEVICE_ID_ZMID:
1327 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1390 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
@@ -3032,8 +3095,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3032 3095
3033 lpfc_free_sysfs_attr(vport); 3096 lpfc_free_sysfs_attr(vport);
3034 3097
3035 kthread_stop(phba->worker_thread);
3036
3037 /* Release all the vports against this physical port */ 3098 /* Release all the vports against this physical port */
3038 vports = lpfc_create_vport_work_array(phba); 3099 vports = lpfc_create_vport_work_array(phba);
3039 if (vports != NULL) 3100 if (vports != NULL)
@@ -3051,7 +3112,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3051 * clears the rings, discards all mailbox commands, and resets 3112 * clears the rings, discards all mailbox commands, and resets
3052 * the HBA. 3113 * the HBA.
3053 */ 3114 */
3115
3116 /* HBA interrupt will be diabled after this call */
3054 lpfc_sli_hba_down(phba); 3117 lpfc_sli_hba_down(phba);
3118 /* Stop kthread signal shall trigger work_done one more time */
3119 kthread_stop(phba->worker_thread);
3120 /* Final cleanup of txcmplq and reset the HBA */
3055 lpfc_sli_brdrestart(phba); 3121 lpfc_sli_brdrestart(phba);
3056 3122
3057 lpfc_stop_phba_timers(phba); 3123 lpfc_stop_phba_timers(phba);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index a85b7c196bbc..1aa85709b012 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -27,7 +27,7 @@
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x40 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x80 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockBuard events */ 30#define LOG_BG 0x200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 8f548adae9cc..08cdc77af41c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -192,7 +192,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
192 struct lpfc_sli *psli = &phba->sli; 192 struct lpfc_sli *psli = &phba->sli;
193 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 193 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
194 struct lpfc_iocbq *iocb, *next_iocb; 194 struct lpfc_iocbq *iocb, *next_iocb;
195 IOCB_t *cmd;
196 195
197 /* Abort outstanding I/O on NPort <nlp_DID> */ 196 /* Abort outstanding I/O on NPort <nlp_DID> */
198 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 197 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
@@ -223,19 +222,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
223 } 222 }
224 spin_unlock_irq(&phba->hbalock); 223 spin_unlock_irq(&phba->hbalock);
225 224
226 while (!list_empty(&completions)) { 225 /* Cancel all the IOCBs from the completions list */
227 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 226 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
228 cmd = &iocb->iocb; 227 IOERR_SLI_ABORTED);
229 list_del_init(&iocb->list); 228
230
231 if (!iocb->iocb_cmpl)
232 lpfc_sli_release_iocbq(phba, iocb);
233 else {
234 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
235 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
236 (iocb->iocb_cmpl) (phba, iocb, iocb);
237 }
238 }
239 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 229 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
240 return 0; 230 return 0;
241} 231}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6eedb23980d6..167b66dd34c7 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -272,14 +272,14 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
272 **/ 272 **/
273static inline void 273static inline void
274lpfc_rampup_queue_depth(struct lpfc_vport *vport, 274lpfc_rampup_queue_depth(struct lpfc_vport *vport,
275 struct scsi_device *sdev) 275 uint32_t queue_depth)
276{ 276{
277 unsigned long flags; 277 unsigned long flags;
278 struct lpfc_hba *phba = vport->phba; 278 struct lpfc_hba *phba = vport->phba;
279 uint32_t evt_posted; 279 uint32_t evt_posted;
280 atomic_inc(&phba->num_cmd_success); 280 atomic_inc(&phba->num_cmd_success);
281 281
282 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 282 if (vport->cfg_lun_queue_depth <= queue_depth)
283 return; 283 return;
284 spin_lock_irqsave(&phba->hbalock, flags); 284 spin_lock_irqsave(&phba->hbalock, flags);
285 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 285 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
@@ -737,7 +737,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
737 * Due to difference in data length between DIF/non-DIF paths, 737 * Due to difference in data length between DIF/non-DIF paths,
738 * we need to set word 4 of IOCB here 738 * we need to set word 4 of IOCB here
739 */ 739 */
740 iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd)); 740 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
741 return 0; 741 return 0;
742} 742}
743 743
@@ -1693,10 +1693,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1693 struct lpfc_nodelist *pnode = rdata->pnode; 1693 struct lpfc_nodelist *pnode = rdata->pnode;
1694 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 1694 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1695 int result; 1695 int result;
1696 struct scsi_device *sdev, *tmp_sdev; 1696 struct scsi_device *tmp_sdev;
1697 int depth = 0; 1697 int depth = 0;
1698 unsigned long flags; 1698 unsigned long flags;
1699 struct lpfc_fast_path_event *fast_path_evt; 1699 struct lpfc_fast_path_event *fast_path_evt;
1700 struct Scsi_Host *shost = cmd->device->host;
1701 uint32_t queue_depth, scsi_id;
1700 1702
1701 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 1703 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
1702 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 1704 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
@@ -1807,11 +1809,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1807 1809
1808 lpfc_update_stats(phba, lpfc_cmd); 1810 lpfc_update_stats(phba, lpfc_cmd);
1809 result = cmd->result; 1811 result = cmd->result;
1810 sdev = cmd->device;
1811 if (vport->cfg_max_scsicmpl_time && 1812 if (vport->cfg_max_scsicmpl_time &&
1812 time_after(jiffies, lpfc_cmd->start_time + 1813 time_after(jiffies, lpfc_cmd->start_time +
1813 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 1814 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1814 spin_lock_irqsave(sdev->host->host_lock, flags); 1815 spin_lock_irqsave(shost->host_lock, flags);
1815 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 1816 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1816 if (pnode->cmd_qdepth > 1817 if (pnode->cmd_qdepth >
1817 atomic_read(&pnode->cmd_pending) && 1818 atomic_read(&pnode->cmd_pending) &&
@@ -1824,22 +1825,26 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1824 1825
1825 pnode->last_change_time = jiffies; 1826 pnode->last_change_time = jiffies;
1826 } 1827 }
1827 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1828 spin_unlock_irqrestore(shost->host_lock, flags);
1828 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 1829 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1829 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 1830 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1830 time_after(jiffies, pnode->last_change_time + 1831 time_after(jiffies, pnode->last_change_time +
1831 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 1832 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1832 spin_lock_irqsave(sdev->host->host_lock, flags); 1833 spin_lock_irqsave(shost->host_lock, flags);
1833 pnode->cmd_qdepth += pnode->cmd_qdepth * 1834 pnode->cmd_qdepth += pnode->cmd_qdepth *
1834 LPFC_TGTQ_RAMPUP_PCENT / 100; 1835 LPFC_TGTQ_RAMPUP_PCENT / 100;
1835 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 1836 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1836 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 1837 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1837 pnode->last_change_time = jiffies; 1838 pnode->last_change_time = jiffies;
1838 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1839 spin_unlock_irqrestore(shost->host_lock, flags);
1839 } 1840 }
1840 } 1841 }
1841 1842
1842 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1843 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1844
1845 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
1846 queue_depth = cmd->device->queue_depth;
1847 scsi_id = cmd->device->id;
1843 cmd->scsi_done(cmd); 1848 cmd->scsi_done(cmd);
1844 1849
1845 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1850 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -1847,28 +1852,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1847 * If there is a thread waiting for command completion 1852 * If there is a thread waiting for command completion
1848 * wake up the thread. 1853 * wake up the thread.
1849 */ 1854 */
1850 spin_lock_irqsave(sdev->host->host_lock, flags); 1855 spin_lock_irqsave(shost->host_lock, flags);
1851 lpfc_cmd->pCmd = NULL; 1856 lpfc_cmd->pCmd = NULL;
1852 if (lpfc_cmd->waitq) 1857 if (lpfc_cmd->waitq)
1853 wake_up(lpfc_cmd->waitq); 1858 wake_up(lpfc_cmd->waitq);
1854 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1859 spin_unlock_irqrestore(shost->host_lock, flags);
1855 lpfc_release_scsi_buf(phba, lpfc_cmd); 1860 lpfc_release_scsi_buf(phba, lpfc_cmd);
1856 return; 1861 return;
1857 } 1862 }
1858 1863
1859 1864
1860 if (!result) 1865 if (!result)
1861 lpfc_rampup_queue_depth(vport, sdev); 1866 lpfc_rampup_queue_depth(vport, queue_depth);
1862 1867
1863 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && 1868 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1864 ((jiffies - pnode->last_ramp_up_time) > 1869 ((jiffies - pnode->last_ramp_up_time) >
1865 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 1870 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1866 ((jiffies - pnode->last_q_full_time) > 1871 ((jiffies - pnode->last_q_full_time) >
1867 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 1872 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1868 (vport->cfg_lun_queue_depth > sdev->queue_depth)) { 1873 (vport->cfg_lun_queue_depth > queue_depth)) {
1869 shost_for_each_device(tmp_sdev, sdev->host) { 1874 shost_for_each_device(tmp_sdev, shost) {
1870 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){ 1875 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1871 if (tmp_sdev->id != sdev->id) 1876 if (tmp_sdev->id != scsi_id)
1872 continue; 1877 continue;
1873 if (tmp_sdev->ordered_tags) 1878 if (tmp_sdev->ordered_tags)
1874 scsi_adjust_queue_depth(tmp_sdev, 1879 scsi_adjust_queue_depth(tmp_sdev,
@@ -1884,7 +1889,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1884 } 1889 }
1885 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode, 1890 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1886 0xFFFFFFFF, 1891 0xFFFFFFFF,
1887 sdev->queue_depth - 1, sdev->queue_depth); 1892 queue_depth , queue_depth + 1);
1888 } 1893 }
1889 1894
1890 /* 1895 /*
@@ -1895,8 +1900,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1895 NLP_CHK_NODE_ACT(pnode)) { 1900 NLP_CHK_NODE_ACT(pnode)) {
1896 pnode->last_q_full_time = jiffies; 1901 pnode->last_q_full_time = jiffies;
1897 1902
1898 shost_for_each_device(tmp_sdev, sdev->host) { 1903 shost_for_each_device(tmp_sdev, shost) {
1899 if (tmp_sdev->id != sdev->id) 1904 if (tmp_sdev->id != scsi_id)
1900 continue; 1905 continue;
1901 depth = scsi_track_queue_full(tmp_sdev, 1906 depth = scsi_track_queue_full(tmp_sdev,
1902 tmp_sdev->queue_depth - 1); 1907 tmp_sdev->queue_depth - 1);
@@ -1908,7 +1913,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1908 * scsi_track_queue_full. 1913 * scsi_track_queue_full.
1909 */ 1914 */
1910 if (depth == -1) 1915 if (depth == -1)
1911 depth = sdev->host->cmd_per_lun; 1916 depth = shost->cmd_per_lun;
1912 1917
1913 if (depth) { 1918 if (depth) {
1914 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1919 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
@@ -1924,11 +1929,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1924 * If there is a thread waiting for command completion 1929 * If there is a thread waiting for command completion
1925 * wake up the thread. 1930 * wake up the thread.
1926 */ 1931 */
1927 spin_lock_irqsave(sdev->host->host_lock, flags); 1932 spin_lock_irqsave(shost->host_lock, flags);
1928 lpfc_cmd->pCmd = NULL; 1933 lpfc_cmd->pCmd = NULL;
1929 if (lpfc_cmd->waitq) 1934 if (lpfc_cmd->waitq)
1930 wake_up(lpfc_cmd->waitq); 1935 wake_up(lpfc_cmd->waitq);
1931 spin_unlock_irqrestore(sdev->host->host_lock, flags); 1936 spin_unlock_irqrestore(shost->host_lock, flags);
1932 1937
1933 lpfc_release_scsi_buf(phba, lpfc_cmd); 1938 lpfc_release_scsi_buf(phba, lpfc_cmd);
1934} 1939}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 74cfa963dfc5..eb5c75c45ba4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -185,6 +185,38 @@ lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
185} 185}
186 186
187/** 187/**
188 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
189 * @phba: Pointer to HBA context object.
190 * @iocblist: List of IOCBs.
191 * @ulpstatus: ULP status in IOCB command field.
192 * @ulpWord4: ULP word-4 in IOCB command field.
193 *
194 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
195 * on the list by invoking the complete callback function associated with the
196 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
197 * fields.
198 **/
199void
200lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
201 uint32_t ulpstatus, uint32_t ulpWord4)
202{
203 struct lpfc_iocbq *piocb;
204
205 while (!list_empty(iocblist)) {
206 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
207
208 if (!piocb->iocb_cmpl)
209 lpfc_sli_release_iocbq(phba, piocb);
210 else {
211 piocb->iocb.ulpStatus = ulpstatus;
212 piocb->iocb.un.ulpWord[4] = ulpWord4;
213 (piocb->iocb_cmpl) (phba, piocb, piocb);
214 }
215 }
216 return;
217}
218
219/**
188 * lpfc_sli_iocb_cmd_type - Get the iocb type 220 * lpfc_sli_iocb_cmd_type - Get the iocb type
189 * @iocb_cmnd: iocb command code. 221 * @iocb_cmnd: iocb command code.
190 * 222 *
@@ -818,8 +850,8 @@ static struct lpfc_hbq_init lpfc_els_hbq = {
818 .profile = 0, 850 .profile = 0,
819 .ring_mask = (1 << LPFC_ELS_RING), 851 .ring_mask = (1 << LPFC_ELS_RING),
820 .buffer_count = 0, 852 .buffer_count = 0,
821 .init_count = 20, 853 .init_count = 40,
822 .add_count = 5, 854 .add_count = 40,
823}; 855};
824 856
825/* HBQ for the extra ring if needed */ 857/* HBQ for the extra ring if needed */
@@ -1596,7 +1628,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1596 * Ring <ringno> handler: unexpected completion IoTag 1628 * Ring <ringno> handler: unexpected completion IoTag
1597 * <IoTag> 1629 * <IoTag>
1598 */ 1630 */
1599 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI, 1631 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1600 "0322 Ring %d handler: " 1632 "0322 Ring %d handler: "
1601 "unexpected completion IoTag x%x " 1633 "unexpected completion IoTag x%x "
1602 "Data: x%x x%x x%x x%x\n", 1634 "Data: x%x x%x x%x x%x\n",
@@ -2324,7 +2356,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2324{ 2356{
2325 LIST_HEAD(completions); 2357 LIST_HEAD(completions);
2326 struct lpfc_iocbq *iocb, *next_iocb; 2358 struct lpfc_iocbq *iocb, *next_iocb;
2327 IOCB_t *cmd = NULL;
2328 2359
2329 if (pring->ringno == LPFC_ELS_RING) { 2360 if (pring->ringno == LPFC_ELS_RING) {
2330 lpfc_fabric_abort_hba(phba); 2361 lpfc_fabric_abort_hba(phba);
@@ -2343,19 +2374,9 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2343 2374
2344 spin_unlock_irq(&phba->hbalock); 2375 spin_unlock_irq(&phba->hbalock);
2345 2376
2346 while (!list_empty(&completions)) { 2377 /* Cancel all the IOCBs from the completions list */
2347 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2378 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
2348 cmd = &iocb->iocb; 2379 IOERR_SLI_ABORTED);
2349 list_del_init(&iocb->list);
2350
2351 if (!iocb->iocb_cmpl)
2352 lpfc_sli_release_iocbq(phba, iocb);
2353 else {
2354 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2355 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2356 (iocb->iocb_cmpl) (phba, iocb, iocb);
2357 }
2358 }
2359} 2380}
2360 2381
2361/** 2382/**
@@ -2373,8 +2394,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2373{ 2394{
2374 LIST_HEAD(txq); 2395 LIST_HEAD(txq);
2375 LIST_HEAD(txcmplq); 2396 LIST_HEAD(txcmplq);
2376 struct lpfc_iocbq *iocb;
2377 IOCB_t *cmd = NULL;
2378 struct lpfc_sli *psli = &phba->sli; 2397 struct lpfc_sli *psli = &phba->sli;
2379 struct lpfc_sli_ring *pring; 2398 struct lpfc_sli_ring *pring;
2380 2399
@@ -2392,34 +2411,12 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2392 spin_unlock_irq(&phba->hbalock); 2411 spin_unlock_irq(&phba->hbalock);
2393 2412
2394 /* Flush the txq */ 2413 /* Flush the txq */
2395 while (!list_empty(&txq)) { 2414 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
2396 iocb = list_get_first(&txq, struct lpfc_iocbq, list); 2415 IOERR_SLI_DOWN);
2397 cmd = &iocb->iocb;
2398 list_del_init(&iocb->list);
2399
2400 if (!iocb->iocb_cmpl)
2401 lpfc_sli_release_iocbq(phba, iocb);
2402 else {
2403 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2404 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2405 (iocb->iocb_cmpl) (phba, iocb, iocb);
2406 }
2407 }
2408 2416
2409 /* Flush the txcmpq */ 2417 /* Flush the txcmpq */
2410 while (!list_empty(&txcmplq)) { 2418 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
2411 iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list); 2419 IOERR_SLI_DOWN);
2412 cmd = &iocb->iocb;
2413 list_del_init(&iocb->list);
2414
2415 if (!iocb->iocb_cmpl)
2416 lpfc_sli_release_iocbq(phba, iocb);
2417 else {
2418 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2419 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2420 (iocb->iocb_cmpl) (phba, iocb, iocb);
2421 }
2422 }
2423} 2420}
2424 2421
2425/** 2422/**
@@ -3251,6 +3248,21 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3251 struct lpfc_sli *psli = &phba->sli; 3248 struct lpfc_sli *psli = &phba->sli;
3252 struct lpfc_sli_ring *pring; 3249 struct lpfc_sli_ring *pring;
3253 3250
3251 /* Check the pmbox pointer first. There is a race condition
3252 * between the mbox timeout handler getting executed in the
3253 * worklist and the mailbox actually completing. When this
3254 * race condition occurs, the mbox_active will be NULL.
3255 */
3256 spin_lock_irq(&phba->hbalock);
3257 if (pmbox == NULL) {
3258 lpfc_printf_log(phba, KERN_WARNING,
3259 LOG_MBOX | LOG_SLI,
3260 "0353 Active Mailbox cleared - mailbox timeout "
3261 "exiting\n");
3262 spin_unlock_irq(&phba->hbalock);
3263 return;
3264 }
3265
3254 /* Mbox cmd <mbxCommand> timeout */ 3266 /* Mbox cmd <mbxCommand> timeout */
3255 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3267 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3256 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 3268 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@@ -3258,6 +3270,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3258 phba->pport->port_state, 3270 phba->pport->port_state,
3259 phba->sli.sli_flag, 3271 phba->sli.sli_flag,
3260 phba->sli.mbox_active); 3272 phba->sli.mbox_active);
3273 spin_unlock_irq(&phba->hbalock);
3261 3274
3262 /* Setting state unknown so lpfc_sli_abort_iocb_ring 3275 /* Setting state unknown so lpfc_sli_abort_iocb_ring
3263 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 3276 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
@@ -3364,6 +3377,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3364 goto out_not_finished; 3377 goto out_not_finished;
3365 } 3378 }
3366 3379
3380 /* If HBA has a deferred error attention, fail the iocb. */
3381 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
3382 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3383 goto out_not_finished;
3384 }
3385
3367 psli = &phba->sli; 3386 psli = &phba->sli;
3368 3387
3369 mb = &pmbox->mb; 3388 mb = &pmbox->mb;
@@ -3728,6 +3747,10 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3728 if (unlikely(pci_channel_offline(phba->pcidev))) 3747 if (unlikely(pci_channel_offline(phba->pcidev)))
3729 return IOCB_ERROR; 3748 return IOCB_ERROR;
3730 3749
3750 /* If HBA has a deferred error attention, fail the iocb. */
3751 if (unlikely(phba->hba_flag & DEFER_ERATT))
3752 return IOCB_ERROR;
3753
3731 /* 3754 /*
3732 * We should never get an IOCB if we are in a < LINK_DOWN state 3755 * We should never get an IOCB if we are in a < LINK_DOWN state
3733 */ 3756 */
@@ -3906,6 +3929,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
3906 uint16_t temp; 3929 uint16_t temp;
3907 struct temp_event temp_event_data; 3930 struct temp_event temp_event_data;
3908 struct Scsi_Host *shost; 3931 struct Scsi_Host *shost;
3932 uint32_t *iocb_w;
3909 3933
3910 icmd = &iocbq->iocb; 3934 icmd = &iocbq->iocb;
3911 evt_code = icmd->un.asyncstat.evt_code; 3935 evt_code = icmd->un.asyncstat.evt_code;
@@ -3913,13 +3937,23 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
3913 3937
3914 if ((evt_code != ASYNC_TEMP_WARN) && 3938 if ((evt_code != ASYNC_TEMP_WARN) &&
3915 (evt_code != ASYNC_TEMP_SAFE)) { 3939 (evt_code != ASYNC_TEMP_SAFE)) {
3940 iocb_w = (uint32_t *) icmd;
3916 lpfc_printf_log(phba, 3941 lpfc_printf_log(phba,
3917 KERN_ERR, 3942 KERN_ERR,
3918 LOG_SLI, 3943 LOG_SLI,
3919 "0346 Ring %d handler: unexpected ASYNC_STATUS" 3944 "0346 Ring %d handler: unexpected ASYNC_STATUS"
3920 " evt_code 0x%x\n", 3945 " evt_code 0x%x \n"
3946 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
3947 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
3948 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
3949 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
3921 pring->ringno, 3950 pring->ringno,
3922 icmd->un.asyncstat.evt_code); 3951 icmd->un.asyncstat.evt_code,
3952 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
3953 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
3954 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
3955 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
3956
3923 return; 3957 return;
3924 } 3958 }
3925 temp_event_data.data = (uint32_t)temp; 3959 temp_event_data.data = (uint32_t)temp;
@@ -4178,17 +4212,9 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
4178 4212
4179 spin_unlock_irqrestore(&phba->hbalock, flags); 4213 spin_unlock_irqrestore(&phba->hbalock, flags);
4180 4214
4181 while (!list_empty(&completions)) { 4215 /* Cancel all the IOCBs from the completions list */
4182 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 4216 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4183 4217 IOERR_SLI_DOWN);
4184 if (!iocb->iocb_cmpl)
4185 lpfc_sli_release_iocbq(phba, iocb);
4186 else {
4187 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
4188 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
4189 (iocb->iocb_cmpl) (phba, iocb, iocb);
4190 }
4191 }
4192 return 1; 4218 return 1;
4193} 4219}
4194 4220
@@ -4215,8 +4241,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4215 struct lpfc_sli_ring *pring; 4241 struct lpfc_sli_ring *pring;
4216 struct lpfc_dmabuf *buf_ptr; 4242 struct lpfc_dmabuf *buf_ptr;
4217 LPFC_MBOXQ_t *pmb; 4243 LPFC_MBOXQ_t *pmb;
4218 struct lpfc_iocbq *iocb;
4219 IOCB_t *cmd = NULL;
4220 int i; 4244 int i;
4221 unsigned long flags = 0; 4245 unsigned long flags = 0;
4222 4246
@@ -4244,18 +4268,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4244 } 4268 }
4245 spin_unlock_irqrestore(&phba->hbalock, flags); 4269 spin_unlock_irqrestore(&phba->hbalock, flags);
4246 4270
4247 while (!list_empty(&completions)) { 4271 /* Cancel all the IOCBs from the completions list */
4248 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 4272 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4249 cmd = &iocb->iocb; 4273 IOERR_SLI_DOWN);
4250
4251 if (!iocb->iocb_cmpl)
4252 lpfc_sli_release_iocbq(phba, iocb);
4253 else {
4254 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4255 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
4256 (iocb->iocb_cmpl) (phba, iocb, iocb);
4257 }
4258 }
4259 4274
4260 spin_lock_irqsave(&phba->hbalock, flags); 4275 spin_lock_irqsave(&phba->hbalock, flags);
4261 list_splice_init(&phba->elsbuf, &completions); 4276 list_splice_init(&phba->elsbuf, &completions);
@@ -5137,11 +5152,31 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5137 return 0; 5152 return 0;
5138 } 5153 }
5139 5154
5155 /*
5156 * If there is deferred error attention, do not check for error
5157 * attention
5158 */
5159 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5160 spin_unlock_irq(&phba->hbalock);
5161 return 0;
5162 }
5163
5140 /* Read chip Host Attention (HA) register */ 5164 /* Read chip Host Attention (HA) register */
5141 ha_copy = readl(phba->HAregaddr); 5165 ha_copy = readl(phba->HAregaddr);
5142 if (ha_copy & HA_ERATT) { 5166 if (ha_copy & HA_ERATT) {
5143 /* Read host status register to retrieve error event */ 5167 /* Read host status register to retrieve error event */
5144 lpfc_sli_read_hs(phba); 5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5145 /* Set the driver HA work bitmap */ 5180 /* Set the driver HA work bitmap */
5146 phba->work_ha |= HA_ERATT; 5181 phba->work_ha |= HA_ERATT;
5147 /* Indicate polling handles this ERATT */ 5182 /* Indicate polling handles this ERATT */
@@ -5230,6 +5265,16 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5230 /* Indicate interrupt handler handles ERATT */ 5265 /* Indicate interrupt handler handles ERATT */
5231 phba->hba_flag |= HBA_ERATT_HANDLED; 5266 phba->hba_flag |= HBA_ERATT_HANDLED;
5232 } 5267 }
5268
5269 /*
5270 * If there is deferred error attention, do not check for any
5271 * interrupt.
5272 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock);
5275 return IRQ_NONE;
5276 }
5277
5233 /* Clear up only attention source related to slow-path */ 5278 /* Clear up only attention source related to slow-path */
5234 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 5279 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
5235 phba->HAregaddr); 5280 phba->HAregaddr);
@@ -5301,8 +5346,22 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5301 } 5346 }
5302 } 5347 }
5303 spin_lock_irqsave(&phba->hbalock, iflag); 5348 spin_lock_irqsave(&phba->hbalock, iflag);
5304 if (work_ha_copy & HA_ERATT) 5349 if (work_ha_copy & HA_ERATT) {
5305 lpfc_sli_read_hs(phba); 5350 lpfc_sli_read_hs(phba);
5351 /*
5352 * Check if there is a deferred error condition
5353 * is active
5354 */
5355 if ((HS_FFER1 & phba->work_hs) &&
5356 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5357 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5358 phba->hba_flag |= DEFER_ERATT;
5359 /* Clear all interrupt enable conditions */
5360 writel(0, phba->HCregaddr);
5361 readl(phba->HCregaddr);
5362 }
5363 }
5364
5306 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5307 pmb = phba->sli.mbox_active; 5366 pmb = phba->sli.mbox_active;
5308 pmbox = &pmb->mb; 5367 pmbox = &pmb->mb;
@@ -5466,6 +5525,14 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5466 ha_copy = readl(phba->HAregaddr); 5525 ha_copy = readl(phba->HAregaddr);
5467 /* Clear up only attention source related to fast-path */ 5526 /* Clear up only attention source related to fast-path */
5468 spin_lock_irqsave(&phba->hbalock, iflag); 5527 spin_lock_irqsave(&phba->hbalock, iflag);
5528 /*
5529 * If there is deferred error attention, do not check for
5530 * any interrupt.
5531 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock);
5534 return IRQ_NONE;
5535 }
5469 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
5470 phba->HAregaddr); 5537 phba->HAregaddr);
5471 readl(phba->HAregaddr); /* flush */ 5538 readl(phba->HAregaddr); /* flush */
@@ -5558,6 +5625,14 @@ lpfc_intr_handler(int irq, void *dev_id)
5558 phba->hba_flag |= HBA_ERATT_HANDLED; 5625 phba->hba_flag |= HBA_ERATT_HANDLED;
5559 } 5626 }
5560 5627
5628 /*
5629 * If there is deferred error attention, do not check for any interrupt.
5630 */
5631 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5632 spin_unlock_irq(&phba->hbalock);
5633 return IRQ_NONE;
5634 }
5635
5561 /* Clear attention sources except link and error attentions */ 5636 /* Clear attention sources except link and error attentions */
5562 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 5637 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
5563 readl(phba->HAregaddr); /* flush */ 5638 readl(phba->HAregaddr); /* flush */