aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c372
1 files changed, 242 insertions, 130 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c81c2b3228d6..dc042bd97baa 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -57,6 +57,7 @@ static uint8_t lpfcAlpaArray[] = {
57}; 57};
58 58
59static void lpfc_disc_timeout_handler(struct lpfc_vport *); 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60static void lpfc_disc_flush_list(struct lpfc_vport *vport);
60 61
61void 62void
62lpfc_terminate_rport_io(struct fc_rport *rport) 63lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -107,20 +108,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
107 struct lpfc_nodelist * ndlp; 108 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport; 109 struct lpfc_vport *vport;
109 struct lpfc_hba *phba; 110 struct lpfc_hba *phba;
110 struct completion devloss_compl;
111 struct lpfc_work_evt *evtp; 111 struct lpfc_work_evt *evtp;
112 int put_node;
113 int put_rport;
112 114
113 rdata = rport->dd_data; 115 rdata = rport->dd_data;
114 ndlp = rdata->pnode; 116 ndlp = rdata->pnode;
115 117 if (!ndlp)
116 if (!ndlp) {
117 if (rport->scsi_target_id != -1) {
118 printk(KERN_ERR "Cannot find remote node"
119 " for rport in dev_loss_tmo_callbk x%x\n",
120 rport->port_id);
121 }
122 return; 118 return;
123 }
124 119
125 vport = ndlp->vport; 120 vport = ndlp->vport;
126 phba = vport->phba; 121 phba = vport->phba;
@@ -129,15 +124,35 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
129 "rport devlosscb: sid:x%x did:x%x flg:x%x", 124 "rport devlosscb: sid:x%x did:x%x flg:x%x",
130 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 125 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
131 126
132 init_completion(&devloss_compl); 127 /* Don't defer this if we are in the process of deleting the vport
128 * or unloading the driver. The unload will cleanup the node
129 * appropriately we just need to cleanup the ndlp rport info here.
130 */
131 if (vport->load_flag & FC_UNLOADING) {
132 put_node = rdata->pnode != NULL;
133 put_rport = ndlp->rport != NULL;
134 rdata->pnode = NULL;
135 ndlp->rport = NULL;
136 if (put_node)
137 lpfc_nlp_put(ndlp);
138 if (put_rport)
139 put_device(&rport->dev);
140 return;
141 }
142
143 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
144 return;
145
133 evtp = &ndlp->dev_loss_evt; 146 evtp = &ndlp->dev_loss_evt;
134 147
135 if (!list_empty(&evtp->evt_listp)) 148 if (!list_empty(&evtp->evt_listp))
136 return; 149 return;
137 150
138 spin_lock_irq(&phba->hbalock); 151 spin_lock_irq(&phba->hbalock);
139 evtp->evt_arg1 = ndlp; 152 /* We need to hold the node by incrementing the reference
140 evtp->evt_arg2 = &devloss_compl; 153 * count until this queued work is done
154 */
155 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
141 evtp->evt = LPFC_EVT_DEV_LOSS; 156 evtp->evt = LPFC_EVT_DEV_LOSS;
142 list_add_tail(&evtp->evt_listp, &phba->work_list); 157 list_add_tail(&evtp->evt_listp, &phba->work_list);
143 if (phba->work_wait) 158 if (phba->work_wait)
@@ -145,8 +160,6 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
145 160
146 spin_unlock_irq(&phba->hbalock); 161 spin_unlock_irq(&phba->hbalock);
147 162
148 wait_for_completion(&devloss_compl);
149
150 return; 163 return;
151} 164}
152 165
@@ -154,7 +167,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
154 * This function is called from the worker thread when dev_loss_tmo 167 * This function is called from the worker thread when dev_loss_tmo
155 * expire. 168 * expire.
156 */ 169 */
157void 170static void
158lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 171lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
159{ 172{
160 struct lpfc_rport_data *rdata; 173 struct lpfc_rport_data *rdata;
@@ -162,6 +175,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
162 struct lpfc_vport *vport; 175 struct lpfc_vport *vport;
163 struct lpfc_hba *phba; 176 struct lpfc_hba *phba;
164 uint8_t *name; 177 uint8_t *name;
178 int put_node;
179 int put_rport;
165 int warn_on = 0; 180 int warn_on = 0;
166 181
167 rport = ndlp->rport; 182 rport = ndlp->rport;
@@ -178,14 +193,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
178 "rport devlosstmo:did:x%x type:x%x id:x%x", 193 "rport devlosstmo:did:x%x type:x%x id:x%x",
179 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 194 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
180 195
181 if (!(vport->load_flag & FC_UNLOADING) && 196 /* Don't defer this if we are in the process of deleting the vport
182 ndlp->nlp_state == NLP_STE_MAPPED_NODE) 197 * or unloading the driver. The unload will cleanup the node
198 * appropriately we just need to cleanup the ndlp rport info here.
199 */
200 if (vport->load_flag & FC_UNLOADING) {
201 if (ndlp->nlp_sid != NLP_NO_SID) {
202 /* flush the target */
203 lpfc_sli_abort_iocb(vport,
204 &phba->sli.ring[phba->sli.fcp_ring],
205 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
206 }
207 put_node = rdata->pnode != NULL;
208 put_rport = ndlp->rport != NULL;
209 rdata->pnode = NULL;
210 ndlp->rport = NULL;
211 if (put_node)
212 lpfc_nlp_put(ndlp);
213 if (put_rport)
214 put_device(&rport->dev);
183 return; 215 return;
216 }
184 217
185 if (ndlp->nlp_type & NLP_FABRIC) { 218 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
186 int put_node; 219 return;
187 int put_rport;
188 220
221 if (ndlp->nlp_type & NLP_FABRIC) {
189 /* We will clean up these Nodes in linkup */ 222 /* We will clean up these Nodes in linkup */
190 put_node = rdata->pnode != NULL; 223 put_node = rdata->pnode != NULL;
191 put_rport = ndlp->rport != NULL; 224 put_rport = ndlp->rport != NULL;
@@ -227,23 +260,20 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
227 ndlp->nlp_state, ndlp->nlp_rpi); 260 ndlp->nlp_state, ndlp->nlp_rpi);
228 } 261 }
229 262
263 put_node = rdata->pnode != NULL;
264 put_rport = ndlp->rport != NULL;
265 rdata->pnode = NULL;
266 ndlp->rport = NULL;
267 if (put_node)
268 lpfc_nlp_put(ndlp);
269 if (put_rport)
270 put_device(&rport->dev);
271
230 if (!(vport->load_flag & FC_UNLOADING) && 272 if (!(vport->load_flag & FC_UNLOADING) &&
231 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 273 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
232 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
233 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 275 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
234 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
235 else {
236 int put_node;
237 int put_rport;
238
239 put_node = rdata->pnode != NULL;
240 put_rport = ndlp->rport != NULL;
241 rdata->pnode = NULL;
242 ndlp->rport = NULL;
243 if (put_node)
244 lpfc_nlp_put(ndlp);
245 if (put_rport)
246 put_device(&rport->dev);
247 } 277 }
248} 278}
249 279
@@ -260,7 +290,6 @@ lpfc_work_list_done(struct lpfc_hba *phba)
260{ 290{
261 struct lpfc_work_evt *evtp = NULL; 291 struct lpfc_work_evt *evtp = NULL;
262 struct lpfc_nodelist *ndlp; 292 struct lpfc_nodelist *ndlp;
263 struct lpfc_vport *vport;
264 int free_evt; 293 int free_evt;
265 294
266 spin_lock_irq(&phba->hbalock); 295 spin_lock_irq(&phba->hbalock);
@@ -270,35 +299,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
270 spin_unlock_irq(&phba->hbalock); 299 spin_unlock_irq(&phba->hbalock);
271 free_evt = 1; 300 free_evt = 1;
272 switch (evtp->evt) { 301 switch (evtp->evt) {
273 case LPFC_EVT_DEV_LOSS_DELAY:
274 free_evt = 0; /* evt is part of ndlp */
275 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
276 vport = ndlp->vport;
277 if (!vport)
278 break;
279
280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
281 "rport devlossdly:did:x%x flg:x%x",
282 ndlp->nlp_DID, ndlp->nlp_flag, 0);
283
284 if (!(vport->load_flag & FC_UNLOADING) &&
285 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
286 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
287 lpfc_disc_state_machine(vport, ndlp, NULL,
288 NLP_EVT_DEVICE_RM);
289 }
290 break;
291 case LPFC_EVT_ELS_RETRY: 302 case LPFC_EVT_ELS_RETRY:
292 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 303 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
293 lpfc_els_retry_delay_handler(ndlp); 304 lpfc_els_retry_delay_handler(ndlp);
294 free_evt = 0; /* evt is part of ndlp */ 305 free_evt = 0; /* evt is part of ndlp */
306 /* decrement the node reference count held
307 * for this queued work
308 */
309 lpfc_nlp_put(ndlp);
295 break; 310 break;
296 case LPFC_EVT_DEV_LOSS: 311 case LPFC_EVT_DEV_LOSS:
297 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 312 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
298 lpfc_nlp_get(ndlp);
299 lpfc_dev_loss_tmo_handler(ndlp); 313 lpfc_dev_loss_tmo_handler(ndlp);
300 free_evt = 0; 314 free_evt = 0;
301 complete((struct completion *)(evtp->evt_arg2)); 315 /* decrement the node reference count held for
316 * this queued work
317 */
302 lpfc_nlp_put(ndlp); 318 lpfc_nlp_put(ndlp);
303 break; 319 break;
304 case LPFC_EVT_ONLINE: 320 case LPFC_EVT_ONLINE:
@@ -373,7 +389,7 @@ lpfc_work_done(struct lpfc_hba *phba)
373 lpfc_handle_latt(phba); 389 lpfc_handle_latt(phba);
374 vports = lpfc_create_vport_work_array(phba); 390 vports = lpfc_create_vport_work_array(phba);
375 if (vports != NULL) 391 if (vports != NULL)
376 for(i = 0; i < LPFC_MAX_VPORTS; i++) { 392 for(i = 0; i <= phba->max_vpi; i++) {
377 /* 393 /*
378 * We could have no vports in array if unloading, so if 394 * We could have no vports in array if unloading, so if
379 * this happens then just use the pport 395 * this happens then just use the pport
@@ -405,14 +421,14 @@ lpfc_work_done(struct lpfc_hba *phba)
405 vport->work_port_events &= ~work_port_events; 421 vport->work_port_events &= ~work_port_events;
406 spin_unlock_irq(&vport->work_port_lock); 422 spin_unlock_irq(&vport->work_port_lock);
407 } 423 }
408 lpfc_destroy_vport_work_array(vports); 424 lpfc_destroy_vport_work_array(phba, vports);
409 425
410 pring = &phba->sli.ring[LPFC_ELS_RING]; 426 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 427 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING); 428 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK) 429 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 430 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_MASK) { 431 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT; 432 pring->flag |= LPFC_DEFERRED_RING_EVENT;
417 } else { 433 } else {
418 lpfc_sli_handle_slow_ring_event(phba, pring, 434 lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -544,6 +560,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
544void 560void
545lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 561lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
546{ 562{
563 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
547 struct lpfc_hba *phba = vport->phba; 564 struct lpfc_hba *phba = vport->phba;
548 struct lpfc_nodelist *ndlp, *next_ndlp; 565 struct lpfc_nodelist *ndlp, *next_ndlp;
549 int rc; 566 int rc;
@@ -552,7 +569,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
552 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 569 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
553 continue; 570 continue;
554 571
555 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) 572 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
573 ((vport->port_type == LPFC_NPIV_PORT) &&
574 (ndlp->nlp_DID == NameServer_DID)))
556 lpfc_unreg_rpi(vport, ndlp); 575 lpfc_unreg_rpi(vport, ndlp);
557 576
558 /* Leave Fabric nodes alone on link down */ 577 /* Leave Fabric nodes alone on link down */
@@ -565,14 +584,30 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
565 } 584 }
566 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 585 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
567 lpfc_mbx_unreg_vpi(vport); 586 lpfc_mbx_unreg_vpi(vport);
587 spin_lock_irq(shost->host_lock);
568 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 588 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
589 spin_unlock_irq(shost->host_lock);
569 } 590 }
570} 591}
571 592
593void
594lpfc_port_link_failure(struct lpfc_vport *vport)
595{
596 /* Cleanup any outstanding RSCN activity */
597 lpfc_els_flush_rscn(vport);
598
599 /* Cleanup any outstanding ELS commands */
600 lpfc_els_flush_cmd(vport);
601
602 lpfc_cleanup_rpis(vport, 0);
603
604 /* Turn off discovery timer if its running */
605 lpfc_can_disctmo(vport);
606}
607
572static void 608static void
573lpfc_linkdown_port(struct lpfc_vport *vport) 609lpfc_linkdown_port(struct lpfc_vport *vport)
574{ 610{
575 struct lpfc_nodelist *ndlp, *next_ndlp;
576 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 611 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
577 612
578 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 613 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
@@ -581,21 +616,8 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
581 "Link Down: state:x%x rtry:x%x flg:x%x", 616 "Link Down: state:x%x rtry:x%x flg:x%x",
582 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 617 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
583 618
584 /* Cleanup any outstanding RSCN activity */ 619 lpfc_port_link_failure(vport);
585 lpfc_els_flush_rscn(vport);
586
587 /* Cleanup any outstanding ELS commands */
588 lpfc_els_flush_cmd(vport);
589 620
590 lpfc_cleanup_rpis(vport, 0);
591
592 /* free any ndlp's on unused list */
593 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
594 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
595 lpfc_drop_node(vport, ndlp);
596
597 /* Turn off discovery timer if its running */
598 lpfc_can_disctmo(vport);
599} 621}
600 622
601int 623int
@@ -618,18 +640,18 @@ lpfc_linkdown(struct lpfc_hba *phba)
618 spin_unlock_irq(&phba->hbalock); 640 spin_unlock_irq(&phba->hbalock);
619 vports = lpfc_create_vport_work_array(phba); 641 vports = lpfc_create_vport_work_array(phba);
620 if (vports != NULL) 642 if (vports != NULL)
621 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 643 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
622 /* Issue a LINK DOWN event to all nodes */ 644 /* Issue a LINK DOWN event to all nodes */
623 lpfc_linkdown_port(vports[i]); 645 lpfc_linkdown_port(vports[i]);
624 } 646 }
625 lpfc_destroy_vport_work_array(vports); 647 lpfc_destroy_vport_work_array(phba, vports);
626 /* Clean up any firmware default rpi's */ 648 /* Clean up any firmware default rpi's */
627 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 649 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
628 if (mb) { 650 if (mb) {
629 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 651 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
630 mb->vport = vport; 652 mb->vport = vport;
631 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 653 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
632 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) 654 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
633 == MBX_NOT_FINISHED) { 655 == MBX_NOT_FINISHED) {
634 mempool_free(mb, phba->mbox_mem_pool); 656 mempool_free(mb, phba->mbox_mem_pool);
635 } 657 }
@@ -643,8 +665,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
643 lpfc_config_link(phba, mb); 665 lpfc_config_link(phba, mb);
644 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 666 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
645 mb->vport = vport; 667 mb->vport = vport;
646 if (lpfc_sli_issue_mbox(phba, mb, 668 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
647 (MBX_NOWAIT | MBX_STOP_IOCB))
648 == MBX_NOT_FINISHED) { 669 == MBX_NOT_FINISHED) {
649 mempool_free(mb, phba->mbox_mem_pool); 670 mempool_free(mb, phba->mbox_mem_pool);
650 } 671 }
@@ -686,7 +707,6 @@ static void
686lpfc_linkup_port(struct lpfc_vport *vport) 707lpfc_linkup_port(struct lpfc_vport *vport)
687{ 708{
688 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 709 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
689 struct lpfc_nodelist *ndlp, *next_ndlp;
690 struct lpfc_hba *phba = vport->phba; 710 struct lpfc_hba *phba = vport->phba;
691 711
692 if ((vport->load_flag & FC_UNLOADING) != 0) 712 if ((vport->load_flag & FC_UNLOADING) != 0)
@@ -713,11 +733,6 @@ lpfc_linkup_port(struct lpfc_vport *vport)
713 if (vport->fc_flag & FC_LBIT) 733 if (vport->fc_flag & FC_LBIT)
714 lpfc_linkup_cleanup_nodes(vport); 734 lpfc_linkup_cleanup_nodes(vport);
715 735
716 /* free any ndlp's in unused state */
717 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
718 nlp_listp)
719 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
720 lpfc_drop_node(vport, ndlp);
721} 736}
722 737
723static int 738static int
@@ -734,9 +749,9 @@ lpfc_linkup(struct lpfc_hba *phba)
734 749
735 vports = lpfc_create_vport_work_array(phba); 750 vports = lpfc_create_vport_work_array(phba);
736 if (vports != NULL) 751 if (vports != NULL)
737 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) 752 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
738 lpfc_linkup_port(vports[i]); 753 lpfc_linkup_port(vports[i]);
739 lpfc_destroy_vport_work_array(vports); 754 lpfc_destroy_vport_work_array(phba, vports);
740 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 755 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
741 lpfc_issue_clear_la(phba, phba->pport); 756 lpfc_issue_clear_la(phba, phba->pport);
742 757
@@ -749,7 +764,7 @@ lpfc_linkup(struct lpfc_hba *phba)
749 * as the completion routine when the command is 764 * as the completion routine when the command is
750 * handed off to the SLI layer. 765 * handed off to the SLI layer.
751 */ 766 */
752void 767static void
753lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 768lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
754{ 769{
755 struct lpfc_vport *vport = pmb->vport; 770 struct lpfc_vport *vport = pmb->vport;
@@ -852,8 +867,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
852 * LPFC_FLOGI while waiting for FLOGI cmpl 867 * LPFC_FLOGI while waiting for FLOGI cmpl
853 */ 868 */
854 if (vport->port_state != LPFC_FLOGI) { 869 if (vport->port_state != LPFC_FLOGI) {
855 vport->port_state = LPFC_FLOGI;
856 lpfc_set_disctmo(vport);
857 lpfc_initial_flogi(vport); 870 lpfc_initial_flogi(vport);
858 } 871 }
859 return; 872 return;
@@ -1022,8 +1035,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1022 lpfc_read_sparam(phba, sparam_mbox, 0); 1035 lpfc_read_sparam(phba, sparam_mbox, 0);
1023 sparam_mbox->vport = vport; 1036 sparam_mbox->vport = vport;
1024 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1037 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1025 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, 1038 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1026 (MBX_NOWAIT | MBX_STOP_IOCB));
1027 if (rc == MBX_NOT_FINISHED) { 1039 if (rc == MBX_NOT_FINISHED) {
1028 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 1040 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1029 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1041 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1040,8 +1052,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1040 lpfc_config_link(phba, cfglink_mbox); 1052 lpfc_config_link(phba, cfglink_mbox);
1041 cfglink_mbox->vport = vport; 1053 cfglink_mbox->vport = vport;
1042 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1054 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1043 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, 1055 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1044 (MBX_NOWAIT | MBX_STOP_IOCB));
1045 if (rc != MBX_NOT_FINISHED) 1056 if (rc != MBX_NOT_FINISHED)
1046 return; 1057 return;
1047 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1058 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
@@ -1174,6 +1185,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1174 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1185 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1175 kfree(mp); 1186 kfree(mp);
1176 mempool_free(pmb, phba->mbox_mem_pool); 1187 mempool_free(pmb, phba->mbox_mem_pool);
1188 /* decrement the node reference count held for this callback
1189 * function.
1190 */
1177 lpfc_nlp_put(ndlp); 1191 lpfc_nlp_put(ndlp);
1178 1192
1179 return; 1193 return;
@@ -1219,7 +1233,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1219 lpfc_unreg_vpi(phba, vport->vpi, mbox); 1233 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1220 mbox->vport = vport; 1234 mbox->vport = vport;
1221 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 1235 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1222 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 1236 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1223 if (rc == MBX_NOT_FINISHED) { 1237 if (rc == MBX_NOT_FINISHED) {
1224 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 1238 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1225 "1800 Could not issue unreg_vpi\n"); 1239 "1800 Could not issue unreg_vpi\n");
@@ -1319,7 +1333,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1319 vports = lpfc_create_vport_work_array(phba); 1333 vports = lpfc_create_vport_work_array(phba);
1320 if (vports != NULL) 1334 if (vports != NULL)
1321 for(i = 0; 1335 for(i = 0;
1322 i < LPFC_MAX_VPORTS && vports[i] != NULL; 1336 i <= phba->max_vpi && vports[i] != NULL;
1323 i++) { 1337 i++) {
1324 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 1338 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1325 continue; 1339 continue;
@@ -1335,7 +1349,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1335 "Fabric support\n"); 1349 "Fabric support\n");
1336 } 1350 }
1337 } 1351 }
1338 lpfc_destroy_vport_work_array(vports); 1352 lpfc_destroy_vport_work_array(phba, vports);
1339 lpfc_do_scr_ns_plogi(phba, vport); 1353 lpfc_do_scr_ns_plogi(phba, vport);
1340 } 1354 }
1341 1355
@@ -1361,11 +1375,16 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1361 1375
1362 if (mb->mbxStatus) { 1376 if (mb->mbxStatus) {
1363out: 1377out:
1378 /* decrement the node reference count held for this
1379 * callback function.
1380 */
1364 lpfc_nlp_put(ndlp); 1381 lpfc_nlp_put(ndlp);
1365 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1382 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1366 kfree(mp); 1383 kfree(mp);
1367 mempool_free(pmb, phba->mbox_mem_pool); 1384 mempool_free(pmb, phba->mbox_mem_pool);
1368 lpfc_drop_node(vport, ndlp); 1385
1386 /* If no other thread is using the ndlp, free it */
1387 lpfc_nlp_not_used(ndlp);
1369 1388
1370 if (phba->fc_topology == TOPOLOGY_LOOP) { 1389 if (phba->fc_topology == TOPOLOGY_LOOP) {
1371 /* 1390 /*
@@ -1410,6 +1429,9 @@ out:
1410 goto out; 1429 goto out;
1411 } 1430 }
1412 1431
1432 /* decrement the node reference count held for this
1433 * callback function.
1434 */
1413 lpfc_nlp_put(ndlp); 1435 lpfc_nlp_put(ndlp);
1414 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1415 kfree(mp); 1437 kfree(mp);
@@ -1656,8 +1678,18 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1656void 1678void
1657lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1679lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1658{ 1680{
1681 /*
1682 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
1683 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1684 * the ndlp from the vport. The ndlp marked as UNUSED on the list
1685 * until ALL other outstanding threads have completed. We check
1686 * that the ndlp not already in the UNUSED state before we proceed.
1687 */
1688 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1689 return;
1659 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 1690 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1660 lpfc_nlp_put(ndlp); 1691 lpfc_nlp_put(ndlp);
1692 return;
1661} 1693}
1662 1694
1663/* 1695/*
@@ -1868,8 +1900,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1868 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 1900 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1869 mbox->vport = vport; 1901 mbox->vport = vport;
1870 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1902 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1871 rc = lpfc_sli_issue_mbox(phba, mbox, 1903 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1872 (MBX_NOWAIT | MBX_STOP_IOCB));
1873 if (rc == MBX_NOT_FINISHED) 1904 if (rc == MBX_NOT_FINISHED)
1874 mempool_free(mbox, phba->mbox_mem_pool); 1905 mempool_free(mbox, phba->mbox_mem_pool);
1875 } 1906 }
@@ -1892,8 +1923,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1892 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 1923 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1893 mbox->vport = vport; 1924 mbox->vport = vport;
1894 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1925 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1895 rc = lpfc_sli_issue_mbox(phba, mbox, 1926 mbox->context1 = NULL;
1896 (MBX_NOWAIT | MBX_STOP_IOCB)); 1927 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1897 if (rc == MBX_NOT_FINISHED) { 1928 if (rc == MBX_NOT_FINISHED) {
1898 mempool_free(mbox, phba->mbox_mem_pool); 1929 mempool_free(mbox, phba->mbox_mem_pool);
1899 } 1930 }
@@ -1912,8 +1943,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1912 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 1943 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1913 mbox->vport = vport; 1944 mbox->vport = vport;
1914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1945 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1915 rc = lpfc_sli_issue_mbox(phba, mbox, 1946 mbox->context1 = NULL;
1916 (MBX_NOWAIT | MBX_STOP_IOCB)); 1947 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1917 if (rc == MBX_NOT_FINISHED) { 1948 if (rc == MBX_NOT_FINISHED) {
1918 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 1949 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1919 "1815 Could not issue " 1950 "1815 Could not issue "
@@ -1981,11 +2012,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1981 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) 2012 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1982 list_del_init(&ndlp->dev_loss_evt.evt_listp); 2013 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1983 2014
1984 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
1985 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1986 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
1987 }
1988
1989 lpfc_unreg_rpi(vport, ndlp); 2015 lpfc_unreg_rpi(vport, ndlp);
1990 2016
1991 return 0; 2017 return 0;
@@ -1999,12 +2025,39 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1999static void 2025static void
2000lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2026lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2001{ 2027{
2028 struct lpfc_hba *phba = vport->phba;
2002 struct lpfc_rport_data *rdata; 2029 struct lpfc_rport_data *rdata;
2030 LPFC_MBOXQ_t *mbox;
2031 int rc;
2003 2032
2004 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 2033 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2005 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2034 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2006 } 2035 }
2007 2036
2037 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2038 /* For this case we need to cleanup the default rpi
2039 * allocated by the firmware.
2040 */
2041 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2042 != NULL) {
2043 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
2044 (uint8_t *) &vport->fc_sparam, mbox, 0);
2045 if (rc) {
2046 mempool_free(mbox, phba->mbox_mem_pool);
2047 }
2048 else {
2049 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2050 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2051 mbox->vport = vport;
2052 mbox->context2 = NULL;
2053 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2054 if (rc == MBX_NOT_FINISHED) {
2055 mempool_free(mbox, phba->mbox_mem_pool);
2056 }
2057 }
2058 }
2059 }
2060
2008 lpfc_cleanup_node(vport, ndlp); 2061 lpfc_cleanup_node(vport, ndlp);
2009 2062
2010 /* 2063 /*
@@ -2132,6 +2185,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2132 } 2185 }
2133 if (vport->fc_flag & FC_RSCN_MODE) { 2186 if (vport->fc_flag & FC_RSCN_MODE) {
2134 if (lpfc_rscn_payload_check(vport, did)) { 2187 if (lpfc_rscn_payload_check(vport, did)) {
2188 /* If we've already recieved a PLOGI from this NPort
2189 * we don't need to try to discover it again.
2190 */
2191 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2192 return NULL;
2193
2135 spin_lock_irq(shost->host_lock); 2194 spin_lock_irq(shost->host_lock);
2136 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2195 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2137 spin_unlock_irq(shost->host_lock); 2196 spin_unlock_irq(shost->host_lock);
@@ -2144,8 +2203,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2144 } else 2203 } else
2145 ndlp = NULL; 2204 ndlp = NULL;
2146 } else { 2205 } else {
2206 /* If we've already recieved a PLOGI from this NPort,
2207 * or we are already in the process of discovery on it,
2208 * we don't need to try to discover it again.
2209 */
2147 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 2210 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2148 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) 2211 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2212 ndlp->nlp_flag & NLP_RCV_PLOGI)
2149 return NULL; 2213 return NULL;
2150 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2214 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2151 spin_lock_irq(shost->host_lock); 2215 spin_lock_irq(shost->host_lock);
@@ -2220,8 +2284,7 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2220 lpfc_clear_la(phba, mbox); 2284 lpfc_clear_la(phba, mbox);
2221 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2285 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2222 mbox->vport = vport; 2286 mbox->vport = vport;
2223 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | 2287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2224 MBX_STOP_IOCB));
2225 if (rc == MBX_NOT_FINISHED) { 2288 if (rc == MBX_NOT_FINISHED) {
2226 mempool_free(mbox, phba->mbox_mem_pool); 2289 mempool_free(mbox, phba->mbox_mem_pool);
2227 lpfc_disc_flush_list(vport); 2290 lpfc_disc_flush_list(vport);
@@ -2244,8 +2307,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2244 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 2307 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2245 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 2308 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2246 regvpimbox->vport = vport; 2309 regvpimbox->vport = vport;
2247 if (lpfc_sli_issue_mbox(phba, regvpimbox, 2310 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2248 (MBX_NOWAIT | MBX_STOP_IOCB))
2249 == MBX_NOT_FINISHED) { 2311 == MBX_NOT_FINISHED) {
2250 mempool_free(regvpimbox, phba->mbox_mem_pool); 2312 mempool_free(regvpimbox, phba->mbox_mem_pool);
2251 } 2313 }
@@ -2414,7 +2476,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2414 } 2476 }
2415} 2477}
2416 2478
2417void 2479static void
2418lpfc_disc_flush_list(struct lpfc_vport *vport) 2480lpfc_disc_flush_list(struct lpfc_vport *vport)
2419{ 2481{
2420 struct lpfc_nodelist *ndlp, *next_ndlp; 2482 struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -2426,7 +2488,6 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
2426 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2488 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2427 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2489 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2428 lpfc_free_tx(phba, ndlp); 2490 lpfc_free_tx(phba, ndlp);
2429 lpfc_nlp_put(ndlp);
2430 } 2491 }
2431 } 2492 }
2432 } 2493 }
@@ -2516,6 +2577,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2516 if (ndlp->nlp_type & NLP_FABRIC) { 2577 if (ndlp->nlp_type & NLP_FABRIC) {
2517 /* Clean up the ndlp on Fabric connections */ 2578 /* Clean up the ndlp on Fabric connections */
2518 lpfc_drop_node(vport, ndlp); 2579 lpfc_drop_node(vport, ndlp);
2580
2519 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 2581 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2520 /* Fail outstanding IO now since device 2582 /* Fail outstanding IO now since device
2521 * is marked for PLOGI. 2583 * is marked for PLOGI.
@@ -2524,9 +2586,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2524 } 2586 }
2525 } 2587 }
2526 if (vport->port_state != LPFC_FLOGI) { 2588 if (vport->port_state != LPFC_FLOGI) {
2527 vport->port_state = LPFC_FLOGI;
2528 lpfc_set_disctmo(vport);
2529 lpfc_initial_flogi(vport); 2589 lpfc_initial_flogi(vport);
2590 return;
2530 } 2591 }
2531 break; 2592 break;
2532 2593
@@ -2536,7 +2597,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2536 /* Initial FLOGI timeout */ 2597 /* Initial FLOGI timeout */
2537 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2598 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2538 "0222 Initial %s timeout\n", 2599 "0222 Initial %s timeout\n",
2539 vport->vpi ? "FLOGI" : "FDISC"); 2600 vport->vpi ? "FDISC" : "FLOGI");
2540 2601
2541 /* Assume no Fabric and go on with discovery. 2602 /* Assume no Fabric and go on with discovery.
2542 * Check for outstanding ELS FLOGI to abort. 2603 * Check for outstanding ELS FLOGI to abort.
@@ -2558,10 +2619,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2558 /* Next look for NameServer ndlp */ 2619 /* Next look for NameServer ndlp */
2559 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2620 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2560 if (ndlp) 2621 if (ndlp)
2561 lpfc_nlp_put(ndlp); 2622 lpfc_els_abort(phba, ndlp);
2562 /* Start discovery */ 2623
2563 lpfc_disc_start(vport); 2624 /* ReStart discovery */
2564 break; 2625 goto restart_disc;
2565 2626
2566 case LPFC_NS_QRY: 2627 case LPFC_NS_QRY:
2567 /* Check for wait for NameServer Rsp timeout */ 2628 /* Check for wait for NameServer Rsp timeout */
@@ -2580,6 +2641,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2580 } 2641 }
2581 vport->fc_ns_retry = 0; 2642 vport->fc_ns_retry = 0;
2582 2643
2644restart_disc:
2583 /* 2645 /*
2584 * Discovery is over. 2646 * Discovery is over.
2585 * set port_state to PORT_READY if SLI2. 2647 * set port_state to PORT_READY if SLI2.
@@ -2608,8 +2670,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2608 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2670 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2609 initlinkmbox->vport = vport; 2671 initlinkmbox->vport = vport;
2610 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2672 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2611 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2673 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2612 (MBX_NOWAIT | MBX_STOP_IOCB));
2613 lpfc_set_loopback_flag(phba); 2674 lpfc_set_loopback_flag(phba);
2614 if (rc == MBX_NOT_FINISHED) 2675 if (rc == MBX_NOT_FINISHED)
2615 mempool_free(initlinkmbox, phba->mbox_mem_pool); 2676 mempool_free(initlinkmbox, phba->mbox_mem_pool);
@@ -2664,12 +2725,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2664 clrlaerr = 1; 2725 clrlaerr = 1;
2665 break; 2726 break;
2666 2727
2728 case LPFC_LINK_UP:
2729 lpfc_issue_clear_la(phba, vport);
2730 /* Drop thru */
2667 case LPFC_LINK_UNKNOWN: 2731 case LPFC_LINK_UNKNOWN:
2668 case LPFC_WARM_START: 2732 case LPFC_WARM_START:
2669 case LPFC_INIT_START: 2733 case LPFC_INIT_START:
2670 case LPFC_INIT_MBX_CMDS: 2734 case LPFC_INIT_MBX_CMDS:
2671 case LPFC_LINK_DOWN: 2735 case LPFC_LINK_DOWN:
2672 case LPFC_LINK_UP:
2673 case LPFC_HBA_ERROR: 2736 case LPFC_HBA_ERROR:
2674 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2737 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2675 "0230 Unexpected timeout, hba link " 2738 "0230 Unexpected timeout, hba link "
@@ -2723,7 +2786,9 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2723 else 2786 else
2724 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 2787 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2725 2788
2726 /* Mailbox took a reference to the node */ 2789 /* decrement the node reference count held for this callback
2790 * function.
2791 */
2727 lpfc_nlp_put(ndlp); 2792 lpfc_nlp_put(ndlp);
2728 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2793 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2729 kfree(mp); 2794 kfree(mp);
@@ -2747,19 +2812,19 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2747 sizeof(ndlp->nlp_portname)) == 0; 2812 sizeof(ndlp->nlp_portname)) == 0;
2748} 2813}
2749 2814
2750struct lpfc_nodelist * 2815static struct lpfc_nodelist *
2751__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 2816__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2752{ 2817{
2753 struct lpfc_nodelist *ndlp; 2818 struct lpfc_nodelist *ndlp;
2754 2819
2755 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2820 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2756 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && 2821 if (filter(ndlp, param))
2757 filter(ndlp, param))
2758 return ndlp; 2822 return ndlp;
2759 } 2823 }
2760 return NULL; 2824 return NULL;
2761} 2825}
2762 2826
2827#if 0
2763/* 2828/*
2764 * Search node lists for a remote port matching filter criteria 2829 * Search node lists for a remote port matching filter criteria
2765 * Caller needs to hold host_lock before calling this routine. 2830 * Caller needs to hold host_lock before calling this routine.
@@ -2775,6 +2840,7 @@ lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2775 spin_unlock_irq(shost->host_lock); 2840 spin_unlock_irq(shost->host_lock);
2776 return ndlp; 2841 return ndlp;
2777} 2842}
2843#endif /* 0 */
2778 2844
2779/* 2845/*
2780 * This routine looks up the ndlp lists for the given RPI. If rpi found it 2846 * This routine looks up the ndlp lists for the given RPI. If rpi found it
@@ -2786,6 +2852,7 @@ __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2786 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 2852 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2787} 2853}
2788 2854
2855#if 0
2789struct lpfc_nodelist * 2856struct lpfc_nodelist *
2790lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 2857lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2791{ 2858{
@@ -2797,6 +2864,7 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2797 spin_unlock_irq(shost->host_lock); 2864 spin_unlock_irq(shost->host_lock);
2798 return ndlp; 2865 return ndlp;
2799} 2866}
2867#endif /* 0 */
2800 2868
2801/* 2869/*
2802 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 2870 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
@@ -2837,6 +2905,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2837 return; 2905 return;
2838} 2906}
2839 2907
2908/* This routine releases all resources associated with a specifc NPort's ndlp
2909 * and mempool_free's the nodelist.
2910 */
2840static void 2911static void
2841lpfc_nlp_release(struct kref *kref) 2912lpfc_nlp_release(struct kref *kref)
2842{ 2913{
@@ -2851,16 +2922,57 @@ lpfc_nlp_release(struct kref *kref)
2851 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 2922 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2852} 2923}
2853 2924
2925/* This routine bumps the reference count for a ndlp structure to ensure
2926 * that one discovery thread won't free a ndlp while another discovery thread
2927 * is using it.
2928 */
2854struct lpfc_nodelist * 2929struct lpfc_nodelist *
2855lpfc_nlp_get(struct lpfc_nodelist *ndlp) 2930lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2856{ 2931{
2857 if (ndlp) 2932 if (ndlp) {
2933 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2934 "node get: did:x%x flg:x%x refcnt:x%x",
2935 ndlp->nlp_DID, ndlp->nlp_flag,
2936 atomic_read(&ndlp->kref.refcount));
2858 kref_get(&ndlp->kref); 2937 kref_get(&ndlp->kref);
2938 }
2859 return ndlp; 2939 return ndlp;
2860} 2940}
2861 2941
2942
2943/* This routine decrements the reference count for a ndlp structure. If the
2944 * count goes to 0, this indicates the the associated nodelist should be freed.
2945 */
2862int 2946int
2863lpfc_nlp_put(struct lpfc_nodelist *ndlp) 2947lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2864{ 2948{
2949 if (ndlp) {
2950 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2951 "node put: did:x%x flg:x%x refcnt:x%x",
2952 ndlp->nlp_DID, ndlp->nlp_flag,
2953 atomic_read(&ndlp->kref.refcount));
2954 }
2865 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 2955 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2866} 2956}
2957
2958/* This routine free's the specified nodelist if it is not in use
2959 * by any other discovery thread. This routine returns 1 if the ndlp
2960 * is not being used by anyone and has been freed. A return value of
2961 * 0 indicates it is being used by another discovery thread and the
2962 * refcount is left unchanged.
2963 */
2964int
2965lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2966{
2967 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2968 "node not used: did:x%x flg:x%x refcnt:x%x",
2969 ndlp->nlp_DID, ndlp->nlp_flag,
2970 atomic_read(&ndlp->kref.refcount));
2971
2972 if (atomic_read(&ndlp->kref.refcount) == 1) {
2973 lpfc_nlp_put(ndlp);
2974 return 1;
2975 }
2976 return 0;
2977}
2978