aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2007-06-17 20:56:38 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-17 23:27:39 -0400
commit92d7f7b0cde3ad2260e7462b40867b57efd49851 (patch)
treefadb1d8f1a817c2f85937b5e9c3b830bdecb5555 /drivers/scsi/lpfc/lpfc_hbadisc.c
parented957684294618602b48f1950b0c9bbcb036583f (diff)
[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3
NPIV support is added to the driver. It utilizes the interfaces of the fc transport for the creation and deletion of vports. Within the driver, a new Scsi_Host is created for each NPIV instance, and is paired with a new instance of a FC port. This allows N FC Port elements to share a single Adapter. Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1140
1 files changed, 745 insertions, 395 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 20b2a4905da..94ee9675b5b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,6 +36,7 @@
36#include "lpfc.h" 36#include "lpfc.h"
37#include "lpfc_logmsg.h" 37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
39#include "lpfc_vport.h"
39 40
40/* AlpaArray for assignment of scsid for scan-down and bind_method */ 41/* AlpaArray for assignment of scsid for scan-down and bind_method */
41static uint8_t lpfcAlpaArray[] = { 42static uint8_t lpfcAlpaArray[] = {
@@ -96,50 +97,68 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
96 int warn_on = 0; 97 int warn_on = 0;
97 struct lpfc_hba *phba; 98 struct lpfc_hba *phba;
98 struct lpfc_vport *vport; 99 struct lpfc_vport *vport;
100 int put_node;
101 int put_rport;
99 102
100 rdata = rport->dd_data; 103 rdata = rport->dd_data;
101 ndlp = rdata->pnode; 104 ndlp = rdata->pnode;
102 105
103 if (!ndlp) { 106 if (!ndlp) {
104 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 107 if (rport->scsi_target_id != -1) {
105 printk(KERN_ERR "Cannot find remote node" 108 printk(KERN_ERR "Cannot find remote node"
106 " for rport in dev_loss_tmo_callbk x%x\n", 109 " for rport in dev_loss_tmo_callbk x%x\n",
107 rport->port_id); 110 rport->port_id);
111 }
108 return; 112 return;
109 } 113 }
110 114
111 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 115 if (ndlp->nlp_type & NLP_FABRIC) {
116 /* We will clean up these Nodes in linkup */
117 put_node = rdata->pnode != NULL;
118 put_rport = ndlp->rport != NULL;
119 rdata->pnode = NULL;
120 ndlp->rport = NULL;
121 if (put_node)
122 lpfc_nlp_put(ndlp);
123 if (put_rport)
124 put_device(&rport->dev);
112 return; 125 return;
126 }
113 127
114 name = (uint8_t *)&ndlp->nlp_portname; 128 name = (uint8_t *)&ndlp->nlp_portname;
115 vport = ndlp->vport; 129 vport = ndlp->vport;
116 phba = vport->phba; 130 phba = vport->phba;
117 131
132 if (!(vport->load_flag & FC_UNLOADING) &&
133 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
134 return;
135
136
118 if (ndlp->nlp_sid != NLP_NO_SID) { 137 if (ndlp->nlp_sid != NLP_NO_SID) {
119 warn_on = 1; 138 warn_on = 1;
120 /* flush the target */ 139 /* flush the target */
121 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 140 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
122 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 141 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
123 } 142 }
124 if (vport->load_flag & FC_UNLOADING) 143 if (vport->load_flag & FC_UNLOADING)
125 warn_on = 0; 144 warn_on = 0;
126 145
127 if (warn_on) { 146 if (warn_on) {
128 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 147 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
129 "%d:0203 Devloss timeout on " 148 "%d (%d):0203 Devloss timeout on "
130 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 149 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
131 "NPort x%x Data: x%x x%x x%x\n", 150 "NPort x%x Data: x%x x%x x%x\n",
132 phba->brd_no, 151 phba->brd_no, vport->vpi,
133 *name, *(name+1), *(name+2), *(name+3), 152 *name, *(name+1), *(name+2), *(name+3),
134 *(name+4), *(name+5), *(name+6), *(name+7), 153 *(name+4), *(name+5), *(name+6), *(name+7),
135 ndlp->nlp_DID, ndlp->nlp_flag, 154 ndlp->nlp_DID, ndlp->nlp_flag,
136 ndlp->nlp_state, ndlp->nlp_rpi); 155 ndlp->nlp_state, ndlp->nlp_rpi);
137 } else { 156 } else {
138 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 157 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
139 "%d:0204 Devloss timeout on " 158 "%d (%d):0204 Devloss timeout on "
140 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 159 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
141 "NPort x%x Data: x%x x%x x%x\n", 160 "NPort x%x Data: x%x x%x x%x\n",
142 phba->brd_no, 161 phba->brd_no, vport->vpi,
143 *name, *(name+1), *(name+2), *(name+3), 162 *name, *(name+1), *(name+2), *(name+3),
144 *(name+4), *(name+5), *(name+6), *(name+7), 163 *(name+4), *(name+5), *(name+6), *(name+7),
145 ndlp->nlp_DID, ndlp->nlp_flag, 164 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -152,12 +171,23 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
152 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 171 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
153 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 172 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
154 else { 173 else {
174 put_node = rdata->pnode != NULL;
175 put_rport = ndlp->rport != NULL;
155 rdata->pnode = NULL; 176 rdata->pnode = NULL;
156 ndlp->rport = NULL; 177 ndlp->rport = NULL;
157 lpfc_nlp_put(ndlp); 178 if (put_node)
158 put_device(&rport->dev); 179 lpfc_nlp_put(ndlp);
180 if (put_rport)
181 put_device(&rport->dev);
159 } 182 }
183 return;
184}
160 185
186
187void
188lpfc_worker_wake_up(struct lpfc_hba *phba)
189{
190 wake_up(phba->work_wait);
161 return; 191 return;
162} 192}
163 193
@@ -166,6 +196,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
166{ 196{
167 struct lpfc_work_evt *evtp = NULL; 197 struct lpfc_work_evt *evtp = NULL;
168 struct lpfc_nodelist *ndlp; 198 struct lpfc_nodelist *ndlp;
199 struct lpfc_vport *vport;
169 int free_evt; 200 int free_evt;
170 201
171 spin_lock_irq(&phba->hbalock); 202 spin_lock_irq(&phba->hbalock);
@@ -175,10 +206,23 @@ lpfc_work_list_done(struct lpfc_hba *phba)
175 spin_unlock_irq(&phba->hbalock); 206 spin_unlock_irq(&phba->hbalock);
176 free_evt = 1; 207 free_evt = 1;
177 switch (evtp->evt) { 208 switch (evtp->evt) {
209 case LPFC_EVT_DEV_LOSS:
210 free_evt = 0; /* evt is part of ndlp */
211 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
212 vport = ndlp->vport;
213 if (!vport)
214 break;
215 if (!(vport->load_flag & FC_UNLOADING) &&
216 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
217 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
218 lpfc_disc_state_machine(vport, ndlp, NULL,
219 NLP_EVT_DEVICE_RM);
220 }
221 break;
178 case LPFC_EVT_ELS_RETRY: 222 case LPFC_EVT_ELS_RETRY:
179 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 223 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
180 lpfc_els_retry_delay_handler(ndlp); 224 lpfc_els_retry_delay_handler(ndlp);
181 free_evt = 0; 225 free_evt = 0; /* evt is part of ndlp */
182 break; 226 break;
183 case LPFC_EVT_ONLINE: 227 case LPFC_EVT_ONLINE:
184 if (phba->link_state < LPFC_LINK_DOWN) 228 if (phba->link_state < LPFC_LINK_DOWN)
@@ -250,24 +294,43 @@ lpfc_work_done(struct lpfc_hba *phba)
250 if (ha_copy & HA_LATT) 294 if (ha_copy & HA_LATT)
251 lpfc_handle_latt(phba); 295 lpfc_handle_latt(phba);
252 296
253 vport = phba->pport; 297 spin_lock_irq(&phba->hbalock);
298 list_for_each_entry(vport, &phba->port_list, listentry) {
299 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
254 300
255 work_port_events = vport->work_port_events; 301 if (!scsi_host_get(shost)) {
302 continue;
303 }
304 spin_unlock_irq(&phba->hbalock);
305 work_port_events = vport->work_port_events;
256 306
257 if (work_port_events & WORKER_DISC_TMO) 307 if (work_port_events & WORKER_DISC_TMO)
258 lpfc_disc_timeout_handler(vport); 308 lpfc_disc_timeout_handler(vport);
259 309
260 if (work_port_events & WORKER_ELS_TMO) 310 if (work_port_events & WORKER_ELS_TMO)
261 lpfc_els_timeout_handler(vport); 311 lpfc_els_timeout_handler(vport);
262 312
263 if (work_port_events & WORKER_MBOX_TMO) 313 if (work_port_events & WORKER_MBOX_TMO)
264 lpfc_mbox_timeout_handler(phba); 314 lpfc_mbox_timeout_handler(phba);
265 315
266 if (work_port_events & WORKER_FDMI_TMO) 316 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
267 lpfc_fdmi_timeout_handler(vport); 317 lpfc_unblock_fabric_iocbs(phba);
268 318
269 spin_lock_irq(&phba->hbalock); 319 if (work_port_events & WORKER_FDMI_TMO)
270 vport->work_port_events &= ~work_port_events; 320 lpfc_fdmi_timeout_handler(vport);
321
322 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
323 lpfc_ramp_down_queue_handler(phba);
324
325 if (work_port_events & WORKER_RAMP_UP_QUEUE)
326 lpfc_ramp_up_queue_handler(phba);
327
328 spin_lock_irq(&vport->work_port_lock);
329 vport->work_port_events &= ~work_port_events;
330 spin_unlock_irq(&vport->work_port_lock);
331 scsi_host_put(shost);
332 spin_lock_irq(&phba->hbalock);
333 }
271 spin_unlock_irq(&phba->hbalock); 334 spin_unlock_irq(&phba->hbalock);
272 335
273 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) { 336 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
@@ -300,24 +363,41 @@ lpfc_work_done(struct lpfc_hba *phba)
300static int 363static int
301check_work_wait_done(struct lpfc_hba *phba) 364check_work_wait_done(struct lpfc_hba *phba)
302{ 365{
303 struct lpfc_vport *vport = phba->pport; 366 struct lpfc_vport *vport;
304 int rc = 0; 367 struct lpfc_sli_ring *pring;
305 368 int i, rc = 0;
306 if (!vport)
307 return 0;
308 369
309 spin_lock_irq(&phba->hbalock); 370 spin_lock_irq(&phba->hbalock);
371 list_for_each_entry(vport, &phba->port_list, listentry) {
372 if (vport->work_port_events) {
373 rc = 1;
374 goto exit;
375 }
376 }
310 377
311 if (phba->work_ha || 378 if (phba->work_ha || (!list_empty(&phba->work_list)) ||
312 vport->work_port_events || 379 kthread_should_stop()) {
313 (!list_empty(&phba->work_list)) ||
314 kthread_should_stop())
315 rc = 1; 380 rc = 1;
381 goto exit;
382 }
383 for (i = 0; i < phba->sli.num_rings; i++) {
384 pring = &phba->sli.ring[i];
385 if (pring->flag & LPFC_DEFERRED_RING_EVENT) {
386 rc = 1;
387 goto exit;
388 }
389 }
390exit:
391 if (rc)
392 phba->work_found++;
393 else
394 phba->work_found = 0;
316 395
317 spin_unlock_irq(&phba->hbalock); 396 spin_unlock_irq(&phba->hbalock);
318 return rc; 397 return rc;
319} 398}
320 399
400
321int 401int
322lpfc_do_work(void *p) 402lpfc_do_work(void *p)
323{ 403{
@@ -327,11 +407,13 @@ lpfc_do_work(void *p)
327 407
328 set_user_nice(current, -20); 408 set_user_nice(current, -20);
329 phba->work_wait = &work_waitq; 409 phba->work_wait = &work_waitq;
410 phba->work_found = 0;
330 411
331 while (1) { 412 while (1) {
332 413
333 rc = wait_event_interruptible(work_waitq, 414 rc = wait_event_interruptible(work_waitq,
334 check_work_wait_done(phba)); 415 check_work_wait_done(phba));
416
335 BUG_ON(rc); 417 BUG_ON(rc);
336 418
337 if (kthread_should_stop()) 419 if (kthread_should_stop())
@@ -339,6 +421,17 @@ lpfc_do_work(void *p)
339 421
340 lpfc_work_done(phba); 422 lpfc_work_done(phba);
341 423
424 /* If there is alot of slow ring work, like during link up
425 * check_work_wait_done() may cause this thread to not give
426 * up the CPU for very long periods of time. This may cause
427 * soft lockups or other problems. To avoid these situations
428 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
429 * consecutive iterations.
430 */
431 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
432 phba->work_found = 0;
433 schedule();
434 }
342 } 435 }
343 phba->work_wait = NULL; 436 phba->work_wait = NULL;
344 return 0; 437 return 0;
@@ -360,7 +453,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
360 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 453 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
361 * be queued to worker thread for processing 454 * be queued to worker thread for processing
362 */ 455 */
363 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL); 456 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
364 if (!evtp) 457 if (!evtp)
365 return 0; 458 return 0;
366 459
@@ -371,37 +464,94 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
371 spin_lock_irqsave(&phba->hbalock, flags); 464 spin_lock_irqsave(&phba->hbalock, flags);
372 list_add_tail(&evtp->evt_listp, &phba->work_list); 465 list_add_tail(&evtp->evt_listp, &phba->work_list);
373 if (phba->work_wait) 466 if (phba->work_wait)
374 wake_up(phba->work_wait); 467 lpfc_worker_wake_up(phba);
375 spin_unlock_irqrestore(&phba->hbalock, flags); 468 spin_unlock_irqrestore(&phba->hbalock, flags);
376 469
377 return 1; 470 return 1;
378} 471}
379 472
473void
474lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
475{
476 struct lpfc_hba *phba = vport->phba;
477 struct lpfc_nodelist *ndlp, *next_ndlp;
478 int rc;
479
480 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
481 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
482 continue;
483
484 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
485 lpfc_unreg_rpi(vport, ndlp);
486
487 /* Leave Fabric nodes alone on link down */
488 if (!remove && ndlp->nlp_type & NLP_FABRIC)
489 continue;
490 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
491 remove
492 ? NLP_EVT_DEVICE_RM
493 : NLP_EVT_DEVICE_RECOVERY);
494 }
495 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
496 lpfc_mbx_unreg_vpi(vport);
497 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
498 }
499}
500
501static void
502lpfc_linkdown_port(struct lpfc_vport *vport)
503{
504 struct lpfc_nodelist *ndlp, *next_ndlp;
505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
506
507 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
508
509 /* Cleanup any outstanding RSCN activity */
510 lpfc_els_flush_rscn(vport);
511
512 /* Cleanup any outstanding ELS commands */
513 lpfc_els_flush_cmd(vport);
514
515 lpfc_cleanup_rpis(vport, 0);
516
517 /* free any ndlp's on unused list */
518 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
519 /* free any ndlp's in unused state */
520 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
521 lpfc_drop_node(vport, ndlp);
522
523 /* Turn off discovery timer if its running */
524 lpfc_can_disctmo(vport);
525}
526
380int 527int
381lpfc_linkdown(struct lpfc_hba *phba) 528lpfc_linkdown(struct lpfc_hba *phba)
382{ 529{
383 struct lpfc_vport *vport = phba->pport; 530 struct lpfc_vport *vport = phba->pport;
384 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 531 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
385 struct lpfc_sli *psli; 532 struct lpfc_vport *port_iterator;
386 struct lpfc_nodelist *ndlp, *next_ndlp;
387 LPFC_MBOXQ_t *mb; 533 LPFC_MBOXQ_t *mb;
388 int rc;
389 534
390 psli = &phba->sli;
391 if (phba->link_state == LPFC_LINK_DOWN) { 535 if (phba->link_state == LPFC_LINK_DOWN) {
392 return 0; 536 return 0;
393 } 537 }
394 spin_lock_irq(&phba->hbalock); 538 spin_lock_irq(&phba->hbalock);
395 if (phba->link_state > LPFC_LINK_DOWN) 539 if (phba->link_state > LPFC_LINK_DOWN) {
396 phba->link_state = LPFC_LINK_DOWN; 540 phba->link_state = LPFC_LINK_DOWN;
541 phba->pport->fc_flag &= ~FC_LBIT;
542 }
397 spin_unlock_irq(&phba->hbalock); 543 spin_unlock_irq(&phba->hbalock);
398 544
399 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 545 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
546
547 /* Issue a LINK DOWN event to all nodes */
548 lpfc_linkdown_port(port_iterator);
549 }
400 550
401 /* Clean up any firmware default rpi's */ 551 /* Clean up any firmware default rpi's */
402 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 552 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
403 if (mb) { 553 if (mb) {
404 lpfc_unreg_did(phba, 0xffffffff, mb); 554 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
405 mb->vport = vport; 555 mb->vport = vport;
406 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
407 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) 557 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
@@ -410,31 +560,13 @@ lpfc_linkdown(struct lpfc_hba *phba)
410 } 560 }
411 } 561 }
412 562
413 /* Cleanup any outstanding RSCN activity */
414 lpfc_els_flush_rscn(vport);
415
416 /* Cleanup any outstanding ELS commands */
417 lpfc_els_flush_cmd(vport);
418
419 /*
420 * Issue a LINK DOWN event to all nodes.
421 */
422 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
423 /* free any ndlp's on unused state */
424 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
425 lpfc_drop_node(vport, ndlp);
426 else /* otherwise, force node recovery. */
427 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
428 NLP_EVT_DEVICE_RECOVERY);
429 }
430
431 /* Setup myDID for link up if we are in pt2pt mode */ 563 /* Setup myDID for link up if we are in pt2pt mode */
432 if (vport->fc_flag & FC_PT2PT) { 564 if (phba->pport->fc_flag & FC_PT2PT) {
433 vport->fc_myDID = 0; 565 phba->pport->fc_myDID = 0;
434 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 566 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
435 if (mb) { 567 if (mb) {
436 lpfc_config_link(phba, mb); 568 lpfc_config_link(phba, mb);
437 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 569 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
438 mb->vport = vport; 570 mb->vport = vport;
439 if (lpfc_sli_issue_mbox(phba, mb, 571 if (lpfc_sli_issue_mbox(phba, mb,
440 (MBX_NOWAIT | MBX_STOP_IOCB)) 572 (MBX_NOWAIT | MBX_STOP_IOCB))
@@ -443,66 +575,88 @@ lpfc_linkdown(struct lpfc_hba *phba)
443 } 575 }
444 } 576 }
445 spin_lock_irq(shost->host_lock); 577 spin_lock_irq(shost->host_lock);
446 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 578 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
447 spin_unlock_irq(shost->host_lock); 579 spin_unlock_irq(shost->host_lock);
448 } 580 }
449 581
450 spin_lock_irq(shost->host_lock); 582 return 0;
451 vport->fc_flag &= ~FC_LBIT; 583}
452 spin_unlock_irq(shost->host_lock);
453 584
454 /* Turn off discovery timer if its running */ 585static void
455 lpfc_can_disctmo(vport); 586lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
587{
588 struct lpfc_nodelist *ndlp;
456 589
457 /* Must process IOCBs on all rings to handle ABORTed I/Os */ 590 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
458 return 0; 591 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
592 continue;
593
594 if (ndlp->nlp_type & NLP_FABRIC) {
595 /* On Linkup its safe to clean up the ndlp
596 * from Fabric connections.
597 */
598 if (ndlp->nlp_DID != Fabric_DID)
599 lpfc_unreg_rpi(vport, ndlp);
600 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
601 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
602 /* Fail outstanding IO now since device is
603 * marked for PLOGI.
604 */
605 lpfc_unreg_rpi(vport, ndlp);
606 }
607 }
459} 608}
460 609
461static int 610static void
462lpfc_linkup(struct lpfc_hba *phba) 611lpfc_linkup_port(struct lpfc_vport *vport)
463{ 612{
464 struct lpfc_vport *vport = phba->pport; 613 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
465 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
466 struct lpfc_nodelist *ndlp, *next_ndlp; 614 struct lpfc_nodelist *ndlp, *next_ndlp;
615 struct lpfc_hba *phba = vport->phba;
616
617 if ((vport->load_flag & FC_UNLOADING) != 0)
618 return;
619
620 /* If NPIV is not enabled, only bring the physical port up */
621 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
622 (vport != phba->pport))
623 return;
467 624
468 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); 625 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
469 626
470 spin_lock_irq(shost->host_lock); 627 spin_lock_irq(shost->host_lock);
471 phba->link_state = LPFC_LINK_UP;
472 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 628 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
473 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 629 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
474 vport->fc_flag |= FC_NDISC_ACTIVE; 630 vport->fc_flag |= FC_NDISC_ACTIVE;
475 vport->fc_ns_retry = 0; 631 vport->fc_ns_retry = 0;
476 spin_unlock_irq(shost->host_lock); 632 spin_unlock_irq(shost->host_lock);
477 633
634 if (vport->fc_flag & FC_LBIT)
635 lpfc_linkup_cleanup_nodes(vport);
478 636
479 if (vport->fc_flag & FC_LBIT) { 637 /* free any ndlp's in unused state */
480 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
481 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
482 if (ndlp->nlp_type & NLP_FABRIC) {
483 /*
484 * On Linkup its safe to clean up the
485 * ndlp from Fabric connections.
486 */
487 lpfc_nlp_set_state(vport, ndlp,
488 NLP_STE_UNUSED_NODE);
489 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
490 /*
491 * Fail outstanding IO now since
492 * device is marked for PLOGI.
493 */
494 lpfc_unreg_rpi(vport, ndlp);
495 }
496 }
497 }
498 }
499
500 /* free any ndlp's in unused state */
501 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 638 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
502 nlp_listp) { 639 nlp_listp)
503 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 640 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
504 lpfc_drop_node(vport, ndlp); 641 lpfc_drop_node(vport, ndlp);
642}
643
644static int
645lpfc_linkup(struct lpfc_hba *phba)
646{
647 struct lpfc_vport *vport;
648
649 phba->link_state = LPFC_LINK_UP;
650
651 /* Unblock fabric iocbs if they are blocked */
652 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
653 del_timer_sync(&phba->fabric_block_timer);
654
655 list_for_each_entry(vport, &phba->port_list, listentry) {
656 lpfc_linkup_port(vport);
505 } 657 }
658 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
659 lpfc_issue_clear_la(phba, phba->pport);
506 660
507 return 0; 661 return 0;
508} 662}
@@ -529,18 +683,28 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
529 683
530 /* Check for error */ 684 /* Check for error */
531 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 685 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
532 /* CLEAR_LA mbox error <mbxStatus> state <port_state> */ 686 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
533 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 687 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
534 "%d:0320 CLEAR_LA mbxStatus error x%x hba " 688 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
535 "state x%x\n", 689 "state x%x\n",
536 phba->brd_no, mb->mbxStatus, vport->port_state); 690 phba->brd_no, vport->vpi, mb->mbxStatus,
691 vport->port_state);
537 692
538 phba->link_state = LPFC_HBA_ERROR; 693 phba->link_state = LPFC_HBA_ERROR;
539 goto out; 694 goto out;
540 } 695 }
541 696
542 if (vport->fc_flag & FC_ABORT_DISCOVERY) 697 if (vport->port_type == LPFC_PHYSICAL_PORT)
543 goto out; 698 phba->link_state = LPFC_HBA_READY;
699
700 spin_lock_irq(&phba->hbalock);
701 psli->sli_flag |= LPFC_PROCESS_LA;
702 control = readl(phba->HCregaddr);
703 control |= HC_LAINT_ENA;
704 writel(control, phba->HCregaddr);
705 readl(phba->HCregaddr); /* flush */
706 spin_unlock_irq(&phba->hbalock);
707 return;
544 708
545 vport->num_disc_nodes = 0; 709 vport->num_disc_nodes = 0;
546 /* go thru NPR nodes and issue ELS PLOGIs */ 710 /* go thru NPR nodes and issue ELS PLOGIs */
@@ -558,8 +722,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
558out: 722out:
559 /* Device Discovery completes */ 723 /* Device Discovery completes */
560 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 724 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
561 "%d:0225 Device Discovery completes\n", 725 "%d (%d):0225 Device Discovery completes\n",
562 phba->brd_no); 726 phba->brd_no, vport->vpi);
563 727
564 mempool_free(pmb, phba->mbox_mem_pool); 728 mempool_free(pmb, phba->mbox_mem_pool);
565 729
@@ -589,8 +753,6 @@ static void
589lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 753lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
590{ 754{
591 struct lpfc_vport *vport = pmb->vport; 755 struct lpfc_vport *vport = pmb->vport;
592 struct lpfc_sli *psli = &phba->sli;
593 int rc;
594 756
595 if (pmb->mb.mbxStatus) 757 if (pmb->mb.mbxStatus)
596 goto out; 758 goto out;
@@ -606,49 +768,40 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
606 */ 768 */
607 lpfc_set_disctmo(vport); 769 lpfc_set_disctmo(vport);
608 return; 770 return;
609 } 771 }
610 772
611 /* Start discovery by sending a FLOGI. port_state is identically 773 /* Start discovery by sending a FLOGI. port_state is identically
612 * LPFC_FLOGI while waiting for FLOGI cmpl 774 * LPFC_FLOGI while waiting for FLOGI cmpl
613 */ 775 */
614 vport->port_state = LPFC_FLOGI; 776 if (vport->port_state != LPFC_FLOGI) {
615 lpfc_set_disctmo(vport); 777 vport->port_state = LPFC_FLOGI;
616 lpfc_initial_flogi(vport); 778 lpfc_set_disctmo(vport);
779 lpfc_initial_flogi(vport);
780 }
617 return; 781 return;
618 782
619out: 783out:
620 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 784 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
621 "%d:0306 CONFIG_LINK mbxStatus error x%x " 785 "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
622 "HBA state x%x\n", 786 "HBA state x%x\n",
623 phba->brd_no, pmb->mb.mbxStatus, vport->port_state); 787 phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
788 vport->port_state);
624 789
625 lpfc_linkdown(phba); 790 mempool_free(pmb, phba->mbox_mem_pool);
626 791
627 phba->link_state = LPFC_HBA_ERROR; 792 lpfc_linkdown(phba);
628 793
629 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 794 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
630 "%d:0200 CONFIG_LINK bad hba state x%x\n", 795 "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
631 phba->brd_no, vport->port_state); 796 phba->brd_no, vport->vpi, vport->port_state);
632 797
633 lpfc_clear_la(phba, pmb); 798 lpfc_issue_clear_la(phba, vport);
634 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
635 pmb->vport = vport;
636 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
637 if (rc == MBX_NOT_FINISHED) {
638 mempool_free(pmb, phba->mbox_mem_pool);
639 lpfc_disc_flush_list(vport);
640 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
641 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
642 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
643 vport->port_state = LPFC_VPORT_READY;
644 }
645 return; 799 return;
646} 800}
647 801
648static void 802static void
649lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 803lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
650{ 804{
651 struct lpfc_sli *psli = &phba->sli;
652 MAILBOX_t *mb = &pmb->mb; 805 MAILBOX_t *mb = &pmb->mb;
653 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 806 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
654 struct lpfc_vport *vport = pmb->vport; 807 struct lpfc_vport *vport = pmb->vport;
@@ -658,12 +811,12 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
658 if (mb->mbxStatus) { 811 if (mb->mbxStatus) {
659 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 812 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
660 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 813 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
661 "%d:0319 READ_SPARAM mbxStatus error x%x " 814 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
662 "hba state x%x>\n", 815 "hba state x%x>\n",
663 phba->brd_no, mb->mbxStatus, vport->port_state); 816 phba->brd_no, vport->vpi, mb->mbxStatus,
817 vport->port_state);
664 818
665 lpfc_linkdown(phba); 819 lpfc_linkdown(phba);
666 phba->link_state = LPFC_HBA_ERROR;
667 goto out; 820 goto out;
668 } 821 }
669 822
@@ -675,12 +828,15 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
675 if (phba->cfg_soft_wwpn) 828 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, 829 u64_to_wwn(phba->cfg_soft_wwpn,
677 vport->fc_sparam.portName.u.wwn); 830 vport->fc_sparam.portName.u.wwn);
678 memcpy((uint8_t *) &vport->fc_nodename, 831 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
679 (uint8_t *) &vport->fc_sparam.nodeName, 832 sizeof(vport->fc_nodename));
680 sizeof (struct lpfc_name)); 833 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
681 memcpy((uint8_t *) &vport->fc_portname, 834 sizeof(vport->fc_portname));
682 (uint8_t *) &vport->fc_sparam.portName, 835 if (vport->port_type == LPFC_PHYSICAL_PORT) {
683 sizeof (struct lpfc_name)); 836 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
837 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
838 }
839
684 lpfc_mbuf_free(phba, mp->virt, mp->phys); 840 lpfc_mbuf_free(phba, mp->virt, mp->phys);
685 kfree(mp); 841 kfree(mp);
686 mempool_free(pmb, phba->mbox_mem_pool); 842 mempool_free(pmb, phba->mbox_mem_pool);
@@ -690,35 +846,15 @@ out:
690 pmb->context1 = NULL; 846 pmb->context1 = NULL;
691 lpfc_mbuf_free(phba, mp->virt, mp->phys); 847 lpfc_mbuf_free(phba, mp->virt, mp->phys);
692 kfree(mp); 848 kfree(mp);
693 if (phba->link_state != LPFC_CLEAR_LA) { 849 lpfc_issue_clear_la(phba, vport);
694 struct lpfc_sli_ring *extra_ring = 850 mempool_free(pmb, phba->mbox_mem_pool);
695 &psli->ring[psli->extra_ring];
696 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
697 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
698
699 lpfc_clear_la(phba, pmb);
700 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
701 pmb->vport = vport;
702 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
703 == MBX_NOT_FINISHED) {
704 mempool_free(pmb, phba->mbox_mem_pool);
705 lpfc_disc_flush_list(vport);
706 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
707 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
708 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
709 vport->port_state = LPFC_VPORT_READY;
710 }
711 } else {
712 mempool_free(pmb, phba->mbox_mem_pool);
713 }
714 return; 851 return;
715} 852}
716 853
717static void 854static void
718lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la) 855lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
719{ 856{
720 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 857 struct lpfc_vport *vport = phba->pport;
721 struct lpfc_hba *phba = vport->phba;
722 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 858 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
723 int i; 859 int i;
724 struct lpfc_dmabuf *mp; 860 struct lpfc_dmabuf *mp;
@@ -727,30 +863,32 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
727 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 863 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
728 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 864 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
729 865
730 spin_lock_irq(shost->host_lock); 866 spin_lock_irq(&phba->hbalock);
731 switch (la->UlnkSpeed) { 867 switch (la->UlnkSpeed) {
732 case LA_1GHZ_LINK: 868 case LA_1GHZ_LINK:
733 phba->fc_linkspeed = LA_1GHZ_LINK; 869 phba->fc_linkspeed = LA_1GHZ_LINK;
734 break; 870 break;
735 case LA_2GHZ_LINK: 871 case LA_2GHZ_LINK:
736 phba->fc_linkspeed = LA_2GHZ_LINK; 872 phba->fc_linkspeed = LA_2GHZ_LINK;
737 break; 873 break;
738 case LA_4GHZ_LINK: 874 case LA_4GHZ_LINK:
739 phba->fc_linkspeed = LA_4GHZ_LINK; 875 phba->fc_linkspeed = LA_4GHZ_LINK;
740 break; 876 break;
741 case LA_8GHZ_LINK: 877 case LA_8GHZ_LINK:
742 phba->fc_linkspeed = LA_8GHZ_LINK; 878 phba->fc_linkspeed = LA_8GHZ_LINK;
743 break; 879 break;
744 default: 880 default:
745 phba->fc_linkspeed = LA_UNKNW_LINK; 881 phba->fc_linkspeed = LA_UNKNW_LINK;
746 break; 882 break;
747 } 883 }
748 884
749 phba->fc_topology = la->topology; 885 phba->fc_topology = la->topology;
886 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
750 887
751 if (phba->fc_topology == TOPOLOGY_LOOP) { 888 if (phba->fc_topology == TOPOLOGY_LOOP) {
752 /* Get Loop Map information */ 889 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
753 890
891 /* Get Loop Map information */
754 if (la->il) 892 if (la->il)
755 vport->fc_flag |= FC_LBIT; 893 vport->fc_flag |= FC_LBIT;
756 894
@@ -784,30 +922,35 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
784 } 922 }
785 /* Link Up Event ALPA map */ 923 /* Link Up Event ALPA map */
786 lpfc_printf_log(phba, 924 lpfc_printf_log(phba,
787 KERN_WARNING, 925 KERN_WARNING,
788 LOG_LINK_EVENT, 926 LOG_LINK_EVENT,
789 "%d:1304 Link Up Event " 927 "%d:1304 Link Up Event "
790 "ALPA map Data: x%x " 928 "ALPA map Data: x%x "
791 "x%x x%x x%x\n", 929 "x%x x%x x%x\n",
792 phba->brd_no, 930 phba->brd_no,
793 un.pa.wd1, un.pa.wd2, 931 un.pa.wd1, un.pa.wd2,
794 un.pa.wd3, un.pa.wd4); 932 un.pa.wd3, un.pa.wd4);
795 } 933 }
796 } 934 }
797 } 935 }
798 } else { 936 } else {
937 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
938 if (phba->max_vpi && lpfc_npiv_enable &&
939 (phba->sli_rev == 3))
940 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
941 }
799 vport->fc_myDID = phba->fc_pref_DID; 942 vport->fc_myDID = phba->fc_pref_DID;
800 vport->fc_flag |= FC_LBIT; 943 vport->fc_flag |= FC_LBIT;
801 } 944 }
802 spin_unlock_irq(shost->host_lock); 945 spin_unlock_irq(&phba->hbalock);
803 946
804 lpfc_linkup(phba); 947 lpfc_linkup(phba);
805 if (sparam_mbox) { 948 if (sparam_mbox) {
806 lpfc_read_sparam(phba, sparam_mbox); 949 lpfc_read_sparam(phba, sparam_mbox, 0);
807 sparam_mbox->vport = vport; 950 sparam_mbox->vport = vport;
808 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 951 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
809 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, 952 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
810 (MBX_NOWAIT | MBX_STOP_IOCB)); 953 (MBX_NOWAIT | MBX_STOP_IOCB));
811 if (rc == MBX_NOT_FINISHED) { 954 if (rc == MBX_NOT_FINISHED) {
812 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 955 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
813 lpfc_mbuf_free(phba, mp->virt, mp->phys); 956 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -815,7 +958,7 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
815 mempool_free(sparam_mbox, phba->mbox_mem_pool); 958 mempool_free(sparam_mbox, phba->mbox_mem_pool);
816 if (cfglink_mbox) 959 if (cfglink_mbox)
817 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 960 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
818 return; 961 goto out;
819 } 962 }
820 } 963 }
821 964
@@ -825,10 +968,20 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
825 cfglink_mbox->vport = vport; 968 cfglink_mbox->vport = vport;
826 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 969 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
827 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, 970 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
828 (MBX_NOWAIT | MBX_STOP_IOCB)); 971 (MBX_NOWAIT | MBX_STOP_IOCB));
829 if (rc == MBX_NOT_FINISHED) 972 if (rc != MBX_NOT_FINISHED)
830 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 973 return;
974 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
831 } 975 }
976out:
977 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
978 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
979 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
980 phba->brd_no, vport->vpi,
981 vport->port_state, sparam_mbox, cfglink_mbox);
982
983 lpfc_issue_clear_la(phba, vport);
984 return;
832} 985}
833 986
834static void 987static void
@@ -886,12 +1039,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
886 spin_unlock_irq(shost->host_lock); 1039 spin_unlock_irq(shost->host_lock);
887 1040
888 if (((phba->fc_eventTag + 1) < la->eventTag) || 1041 if (((phba->fc_eventTag + 1) < la->eventTag) ||
889 (phba->fc_eventTag == la->eventTag)) { 1042 (phba->fc_eventTag == la->eventTag)) {
890 phba->fc_stat.LinkMultiEvent++; 1043 phba->fc_stat.LinkMultiEvent++;
891 if (la->attType == AT_LINK_UP) 1044 if (la->attType == AT_LINK_UP)
892 if (phba->fc_eventTag != 0) 1045 if (phba->fc_eventTag != 0)
893 lpfc_linkdown(phba); 1046 lpfc_linkdown(phba);
894 } 1047 }
895 1048
896 phba->fc_eventTag = la->eventTag; 1049 phba->fc_eventTag = la->eventTag;
897 1050
@@ -912,7 +1065,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
912 la->granted_AL_PA, la->UlnkSpeed, 1065 la->granted_AL_PA, la->UlnkSpeed,
913 phba->alpa_map[0]); 1066 phba->alpa_map[0]);
914 } 1067 }
915 lpfc_mbx_process_link_up(vport, la); 1068 lpfc_mbx_process_link_up(phba, la);
916 } else { 1069 } else {
917 phba->fc_stat.LinkDown++; 1070 phba->fc_stat.LinkDown++;
918 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1071 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -940,7 +1093,7 @@ void
940lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1093lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
941{ 1094{
942 struct lpfc_vport *vport = pmb->vport; 1095 struct lpfc_vport *vport = pmb->vport;
943 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1096 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
944 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 1097 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
945 1098
946 pmb->context1 = NULL; 1099 pmb->context1 = NULL;
@@ -955,6 +1108,100 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
955 return; 1108 return;
956} 1109}
957 1110
1111static void
1112lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1113{
1114 MAILBOX_t *mb = &pmb->mb;
1115 struct lpfc_vport *vport = pmb->vport;
1116 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1117
1118 switch (mb->mbxStatus) {
1119 case 0x0011:
1120 case 0x0020:
1121 case 0x9700:
1122 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1123 "%d (%d):0911 cmpl_unreg_vpi, "
1124 "mb status = 0x%x\n",
1125 phba->brd_no, vport->vpi, mb->mbxStatus);
1126 break;
1127 default:
1128 phba->vpi_cnt--;
1129 }
1130 vport->unreg_vpi_cmpl = VPORT_OK;
1131 mempool_free(pmb, phba->mbox_mem_pool);
1132 /*
1133 * This shost reference might have been taken at the beginning of
1134 * lpfc_vport_delete()
1135 */
1136 if (vport->load_flag & FC_UNLOADING)
1137 scsi_host_put(shost);
1138}
1139
1140void
1141lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1142{
1143 struct lpfc_hba *phba = vport->phba;
1144 LPFC_MBOXQ_t *mbox;
1145 int rc;
1146
1147 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1148 if (!mbox)
1149 return;
1150
1151 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1152 mbox->vport = vport;
1153 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1154 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1155 if (rc == MBX_NOT_FINISHED) {
1156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1157 "%d (%d):1800 Could not issue unreg_vpi\n",
1158 phba->brd_no, vport->vpi);
1159 mempool_free(mbox, phba->mbox_mem_pool);
1160 vport->unreg_vpi_cmpl = VPORT_ERROR;
1161 }
1162}
1163
1164static void
1165lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1166{
1167 struct lpfc_vport *vport = pmb->vport;
1168 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1169 MAILBOX_t *mb = &pmb->mb;
1170
1171 switch (mb->mbxStatus) {
1172 case 0x0011:
1173 case 0x9601:
1174 case 0x9602:
1175 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1176 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1177 phba->brd_no, vport->vpi, mb->mbxStatus);
1178 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1179 spin_lock_irq(shost->host_lock);
1180 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1181 spin_unlock_irq(shost->host_lock);
1182 vport->fc_myDID = 0;
1183 goto out;
1184 }
1185 phba->vpi_cnt++;
1186
1187 vport->num_disc_nodes = 0;
1188 /* go thru NPR list and issue ELS PLOGIs */
1189 if (vport->fc_npr_cnt)
1190 lpfc_els_disc_plogi(vport);
1191
1192 if (!vport->num_disc_nodes) {
1193 spin_lock_irq(shost->host_lock);
1194 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1195 spin_unlock_irq(shost->host_lock);
1196 lpfc_can_disctmo(vport);
1197 }
1198 vport->port_state = LPFC_VPORT_READY;
1199
1200out:
1201 mempool_free(pmb, phba->mbox_mem_pool);
1202 return;
1203}
1204
958/* 1205/*
959 * This routine handles processing a Fabric REG_LOGIN mailbox 1206 * This routine handles processing a Fabric REG_LOGIN mailbox
960 * command upon completion. It is setup in the LPFC_MBOXQ 1207 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -964,10 +1211,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
964void 1211void
965lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1212lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
966{ 1213{
967 struct lpfc_vport *vport = pmb->vport; 1214 struct lpfc_vport *vport = pmb->vport;
1215 struct lpfc_vport *next_vport;
968 MAILBOX_t *mb = &pmb->mb; 1216 MAILBOX_t *mb = &pmb->mb;
969 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1217 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
970 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 1218 struct lpfc_nodelist *ndlp;
971 ndlp = (struct lpfc_nodelist *) pmb->context2; 1219 ndlp = (struct lpfc_nodelist *) pmb->context2;
972 1220
973 pmb->context1 = NULL; 1221 pmb->context1 = NULL;
@@ -979,11 +1227,20 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
979 mempool_free(pmb, phba->mbox_mem_pool); 1227 mempool_free(pmb, phba->mbox_mem_pool);
980 lpfc_nlp_put(ndlp); 1228 lpfc_nlp_put(ndlp);
981 1229
982 /* FLOGI failed, so just use loop map to make discovery list */ 1230 if (phba->fc_topology == TOPOLOGY_LOOP) {
983 lpfc_disc_list_loopmap(vport); 1231 /* FLOGI failed, use loop map to make discovery list */
1232 lpfc_disc_list_loopmap(vport);
1233
1234 /* Start discovery */
1235 lpfc_disc_start(vport);
1236 return;
1237 }
1238
1239 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1240 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1241 "%d (%d):0258 Register Fabric login error: 0x%x\n",
1242 phba->brd_no, vport->vpi, mb->mbxStatus);
984 1243
985 /* Start discovery */
986 lpfc_disc_start(vport);
987 return; 1244 return;
988 } 1245 }
989 1246
@@ -994,47 +1251,25 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
994 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ 1251 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
995 1252
996 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1253 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
997 /* This NPort has been assigned an NPort_ID by the fabric as a 1254 list_for_each_entry(next_vport, &phba->port_list, listentry) {
998 * result of the completed fabric login. Issue a State Change 1255 if (next_vport->port_type == LPFC_PHYSICAL_PORT)
999 * Registration (SCR) ELS request to the fabric controller 1256 continue;
1000 * (SCR_DID) so that this NPort gets RSCN events from the
1001 * fabric.
1002 */
1003 lpfc_issue_els_scr(vport, SCR_DID, 0);
1004
1005 ndlp = lpfc_findnode_did(vport, NameServer_DID);
1006 if (!ndlp) {
1007 /* Allocate a new node instance. If the pool is empty,
1008 * start the discovery process and skip the Nameserver
1009 * login process. This is attempted again later on.
1010 * Otherwise, issue a Port Login (PLOGI) to
1011 * the NameServer
1012 */
1013 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1014 if (!ndlp) {
1015 lpfc_disc_start(vport);
1016 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1017 kfree(mp);
1018 mempool_free(pmb, phba->mbox_mem_pool);
1019 return;
1020 } else {
1021 lpfc_nlp_init(vport, ndlp, NameServer_DID);
1022 ndlp->nlp_type |= NLP_FABRIC;
1023 }
1024 }
1025 1257
1026 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1258 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1027 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 1259 lpfc_initial_fdisc(next_vport);
1028 if (phba->cfg_fdmi_on) { 1260 else {
1029 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 1261 if (phba->sli3_options &
1030 GFP_KERNEL); 1262 LPFC_SLI3_NPIV_ENABLED) {
1031 if (ndlp_fdmi) { 1263 lpfc_vport_set_state(vport,
1032 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 1264 FC_VPORT_NO_FABRIC_SUPP);
1033 ndlp_fdmi->nlp_type |= NLP_FABRIC; 1265 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1034 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE; 1266 "%d (%d):0259 No NPIV Fabric "
1035 lpfc_issue_els_plogi(vport, FDMI_DID, 0); 1267 "support\n",
1268 phba->brd_no, vport->vpi);
1269 }
1036 } 1270 }
1037 } 1271 }
1272 lpfc_do_scr_ns_plogi(phba, vport);
1038 } 1273 }
1039 1274
1040 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1275 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1058,20 +1293,28 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1058 struct lpfc_vport *vport = pmb->vport; 1293 struct lpfc_vport *vport = pmb->vport;
1059 1294
1060 if (mb->mbxStatus) { 1295 if (mb->mbxStatus) {
1296out:
1061 lpfc_nlp_put(ndlp); 1297 lpfc_nlp_put(ndlp);
1062 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1298 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1063 kfree(mp); 1299 kfree(mp);
1064 mempool_free(pmb, phba->mbox_mem_pool); 1300 mempool_free(pmb, phba->mbox_mem_pool);
1065 lpfc_drop_node(vport, ndlp); 1301 lpfc_drop_node(vport, ndlp);
1066 1302
1067 /* 1303 if (phba->fc_topology == TOPOLOGY_LOOP) {
1068 * RegLogin failed, so just use loop map to make discovery 1304 /*
1069 * list 1305 * RegLogin failed, use loop map to make discovery
1070 */ 1306 * list
1071 lpfc_disc_list_loopmap(vport); 1307 */
1308 lpfc_disc_list_loopmap(vport);
1072 1309
1073 /* Start discovery */ 1310 /* Start discovery */
1074 lpfc_disc_start(vport); 1311 lpfc_disc_start(vport);
1312 return;
1313 }
1314 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1315 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1316 "%d (%d):0260 Register NameServer error: 0x%x\n",
1317 phba->brd_no, vport->vpi, mb->mbxStatus);
1075 return; 1318 return;
1076 } 1319 }
1077 1320
@@ -1083,17 +1326,21 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1083 1326
1084 if (vport->port_state < LPFC_VPORT_READY) { 1327 if (vport->port_state < LPFC_VPORT_READY) {
1085 /* Link up discovery requires Fabric registration. */ 1328 /* Link up discovery requires Fabric registration. */
1086 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RNN_ID); 1329 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1087 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RSNN_NN); 1330 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1088 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFT_ID); 1331 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1089 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFF_ID); 1332 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1333 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1334
1335 /* Issue SCR just before NameServer GID_FT Query */
1336 lpfc_issue_els_scr(vport, SCR_DID, 0);
1090 } 1337 }
1091 1338
1092 vport->fc_ns_retry = 0; 1339 vport->fc_ns_retry = 0;
1093 /* Good status, issue CT Request to NameServer */ 1340 /* Good status, issue CT Request to NameServer */
1094 if (lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT)) { 1341 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1095 /* Cannot issue NameServer Query, so finish up discovery */ 1342 /* Cannot issue NameServer Query, so finish up discovery */
1096 lpfc_disc_start(vport); 1343 goto out;
1097 } 1344 }
1098 1345
1099 lpfc_nlp_put(ndlp); 1346 lpfc_nlp_put(ndlp);
@@ -1127,7 +1374,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1127 * registered the port. 1374 * registered the port.
1128 */ 1375 */
1129 if (ndlp->rport && ndlp->rport->dd_data && 1376 if (ndlp->rport && ndlp->rport->dd_data &&
1130 *(struct lpfc_rport_data **) ndlp->rport->dd_data) { 1377 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1131 lpfc_nlp_put(ndlp); 1378 lpfc_nlp_put(ndlp);
1132 } 1379 }
1133 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 1380 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
@@ -1147,16 +1394,16 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1147 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 1394 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1148 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 1395 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1149 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 1396 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1397 del_timer_sync(&ndlp->nlp_initiator_tmr);
1150 1398
1151 1399
1152 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1400 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1153 fc_remote_port_rolechg(rport, rport_ids.roles); 1401 fc_remote_port_rolechg(rport, rport_ids.roles);
1154 1402
1155 if ((rport->scsi_target_id != -1) && 1403 if ((rport->scsi_target_id != -1) &&
1156 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 1404 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1157 ndlp->nlp_sid = rport->scsi_target_id; 1405 ndlp->nlp_sid = rport->scsi_target_id;
1158 } 1406 }
1159
1160 return; 1407 return;
1161} 1408}
1162 1409
@@ -1164,14 +1411,6 @@ static void
1164lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 1411lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1165{ 1412{
1166 struct fc_rport *rport = ndlp->rport; 1413 struct fc_rport *rport = ndlp->rport;
1167 struct lpfc_rport_data *rdata = rport->dd_data;
1168
1169 if (rport->scsi_target_id == -1) {
1170 ndlp->rport = NULL;
1171 rdata->pnode = NULL;
1172 lpfc_nlp_put(ndlp);
1173 put_device(&rport->dev);
1174 }
1175 1414
1176 fc_remote_port_delete(rport); 1415 fc_remote_port_delete(rport);
1177 1416
@@ -1377,9 +1616,9 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
1377 1616
1378 /* Start Discovery Timer state <hba_state> */ 1617 /* Start Discovery Timer state <hba_state> */
1379 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1618 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1380 "%d:0247 Start Discovery Timer state x%x " 1619 "%d (%d):0247 Start Discovery Timer state x%x "
1381 "Data: x%x x%lx x%x x%x\n", 1620 "Data: x%x x%lx x%x x%x\n",
1382 phba->brd_no, vport->port_state, tmo, 1621 phba->brd_no, vport->vpi, vport->port_state, tmo,
1383 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, 1622 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1384 vport->fc_adisc_cnt); 1623 vport->fc_adisc_cnt);
1385 1624
@@ -1409,10 +1648,11 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
1409 1648
1410 /* Cancel Discovery Timer state <hba_state> */ 1649 /* Cancel Discovery Timer state <hba_state> */
1411 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1650 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1412 "%d:0248 Cancel Discovery Timer state x%x " 1651 "%d (%d):0248 Cancel Discovery Timer state x%x "
1413 "Data: x%x x%x x%x\n", 1652 "Data: x%x x%x x%x\n",
1414 phba->brd_no, vport->port_state, vport->fc_flag, 1653 phba->brd_no, vport->vpi, vport->port_state,
1415 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 1654 vport->fc_flag, vport->fc_plogi_cnt,
1655 vport->fc_adisc_cnt);
1416 1656
1417 return 0; 1657 return 0;
1418} 1658}
@@ -1429,6 +1669,11 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1429{ 1669{
1430 struct lpfc_sli *psli = &phba->sli; 1670 struct lpfc_sli *psli = &phba->sli;
1431 IOCB_t *icmd = &iocb->iocb; 1671 IOCB_t *icmd = &iocb->iocb;
1672 struct lpfc_vport *vport = ndlp->vport;
1673
1674 if (iocb->vport != vport)
1675 return 0;
1676
1432 if (pring->ringno == LPFC_ELS_RING) { 1677 if (pring->ringno == LPFC_ELS_RING) {
1433 switch (icmd->ulpCommand) { 1678 switch (icmd->ulpCommand) {
1434 case CMD_GEN_REQUEST64_CR: 1679 case CMD_GEN_REQUEST64_CR:
@@ -1446,7 +1691,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1446 } else if (pring->ringno == psli->fcp_ring) { 1691 } else if (pring->ringno == psli->fcp_ring) {
1447 /* Skip match check if waiting to relogin to FCP target */ 1692 /* Skip match check if waiting to relogin to FCP target */
1448 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 1693 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1449 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 1694 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1450 return 0; 1695 return 0;
1451 } 1696 }
1452 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 1697 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1472,6 +1717,8 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1472 IOCB_t *icmd; 1717 IOCB_t *icmd;
1473 uint32_t rpi, i; 1718 uint32_t rpi, i;
1474 1719
1720 lpfc_fabric_abort_nport(ndlp);
1721
1475 /* 1722 /*
1476 * Everything that matches on txcmplq will be returned 1723 * Everything that matches on txcmplq will be returned
1477 * by firmware with a no rpi error. 1724 * by firmware with a no rpi error.
@@ -1490,8 +1737,8 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1490 * Check to see if iocb matches the nport we are 1737 * Check to see if iocb matches the nport we are
1491 * looking for 1738 * looking for
1492 */ 1739 */
1493 if ((lpfc_check_sli_ndlp 1740 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1494 (phba, pring, iocb, ndlp))) { 1741 ndlp))) {
1495 /* It matches, so deque and call compl 1742 /* It matches, so deque and call compl
1496 with an error */ 1743 with an error */
1497 list_move_tail(&iocb->list, 1744 list_move_tail(&iocb->list,
@@ -1505,7 +1752,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1505 1752
1506 while (!list_empty(&completions)) { 1753 while (!list_empty(&completions)) {
1507 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1754 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1508 list_del(&iocb->list); 1755 list_del_init(&iocb->list);
1509 1756
1510 if (!iocb->iocb_cmpl) 1757 if (!iocb->iocb_cmpl)
1511 lpfc_sli_release_iocbq(phba, iocb); 1758 lpfc_sli_release_iocbq(phba, iocb);
@@ -1539,11 +1786,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1539 if (ndlp->nlp_rpi) { 1786 if (ndlp->nlp_rpi) {
1540 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1787 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1541 if (mbox) { 1788 if (mbox) {
1542 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); 1789 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1543 mbox->vport = vport; 1790 mbox->vport = vport;
1544 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 1791 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1545 rc = lpfc_sli_issue_mbox 1792 rc = lpfc_sli_issue_mbox(phba, mbox,
1546 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 1793 (MBX_NOWAIT | MBX_STOP_IOCB));
1547 if (rc == MBX_NOT_FINISHED) 1794 if (rc == MBX_NOT_FINISHED)
1548 mempool_free(mbox, phba->mbox_mem_pool); 1795 mempool_free(mbox, phba->mbox_mem_pool);
1549 } 1796 }
@@ -1554,6 +1801,50 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1554 return 0; 1801 return 0;
1555} 1802}
1556 1803
1804void
1805lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1806{
1807 struct lpfc_hba *phba = vport->phba;
1808 LPFC_MBOXQ_t *mbox;
1809 int rc;
1810
1811 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1812 if (mbox) {
1813 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1814 mbox->vport = vport;
1815 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1816 rc = lpfc_sli_issue_mbox(phba, mbox,
1817 (MBX_NOWAIT | MBX_STOP_IOCB));
1818 if (rc == MBX_NOT_FINISHED) {
1819 mempool_free(mbox, phba->mbox_mem_pool);
1820 }
1821 }
1822}
1823
1824void
1825lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1826{
1827 struct lpfc_hba *phba = vport->phba;
1828 LPFC_MBOXQ_t *mbox;
1829 int rc;
1830
1831 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1832 if (mbox) {
1833 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1834 mbox->vport = vport;
1835 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1836 rc = lpfc_sli_issue_mbox(phba, mbox,
1837 (MBX_NOWAIT | MBX_STOP_IOCB));
1838 if (rc == MBX_NOT_FINISHED) {
1839 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1840 "%d (%d):1815 Could not issue "
1841 "unreg_did (default rpis)\n",
1842 phba->brd_no, vport->vpi);
1843 mempool_free(mbox, phba->mbox_mem_pool);
1844 }
1845 }
1846}
1847
1557/* 1848/*
1558 * Free resources associated with LPFC_NODELIST entry 1849 * Free resources associated with LPFC_NODELIST entry
1559 * so it can be freed. 1850 * so it can be freed.
@@ -1568,9 +1859,9 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1568 1859
1569 /* Cleanup node for NPort <nlp_DID> */ 1860 /* Cleanup node for NPort <nlp_DID> */
1570 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1861 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1571 "%d:0900 Cleanup node for NPort x%x " 1862 "%d (%d):0900 Cleanup node for NPort x%x "
1572 "Data: x%x x%x x%x\n", 1863 "Data: x%x x%x x%x\n",
1573 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 1864 phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
1574 ndlp->nlp_state, ndlp->nlp_rpi); 1865 ndlp->nlp_state, ndlp->nlp_rpi);
1575 1866
1576 lpfc_dequeue_node(vport, ndlp); 1867 lpfc_dequeue_node(vport, ndlp);
@@ -1587,7 +1878,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1587 spin_lock_irq(&phba->hbalock); 1878 spin_lock_irq(&phba->hbalock);
1588 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1879 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1589 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1880 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1590 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1881 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1591 mp = (struct lpfc_dmabuf *) (mb->context1); 1882 mp = (struct lpfc_dmabuf *) (mb->context1);
1592 if (mp) { 1883 if (mp) {
1593 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1884 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1607,9 +1898,12 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1607 1898
1608 ndlp->nlp_last_elscmd = 0; 1899 ndlp->nlp_last_elscmd = 0;
1609 del_timer_sync(&ndlp->nlp_delayfunc); 1900 del_timer_sync(&ndlp->nlp_delayfunc);
1901 del_timer_sync(&ndlp->nlp_initiator_tmr);
1610 1902
1611 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 1903 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1612 list_del_init(&ndlp->els_retry_evt.evt_listp); 1904 list_del_init(&ndlp->els_retry_evt.evt_listp);
1905 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1906 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1613 1907
1614 lpfc_unreg_rpi(vport, ndlp); 1908 lpfc_unreg_rpi(vport, ndlp);
1615 1909
@@ -1633,12 +1927,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1633 lpfc_cleanup_node(vport, ndlp); 1927 lpfc_cleanup_node(vport, ndlp);
1634 1928
1635 /* 1929 /*
1636 * We should never get here with a non-NULL ndlp->rport. But 1930 * We can get here with a non-NULL ndlp->rport because when we
1637 * if we do, drop the reference to the rport. That seems the 1931 * unregister a rport we don't break the rport/node linkage. So if we
1638 * intelligent thing to do. 1932 * do, make sure we don't leaving any dangling pointers behind.
1639 */ 1933 */
1640 if (ndlp->rport && !(vport->load_flag & FC_UNLOADING)) { 1934 if (ndlp->rport) {
1641 put_device(&ndlp->rport->dev);
1642 rdata = ndlp->rport->dd_data; 1935 rdata = ndlp->rport->dd_data;
1643 rdata->pnode = NULL; 1936 rdata->pnode = NULL;
1644 ndlp->rport = NULL; 1937 ndlp->rport = NULL;
@@ -1709,9 +2002,9 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1709 ((uint32_t) ndlp->nlp_type << 8) | 2002 ((uint32_t) ndlp->nlp_type << 8) |
1710 ((uint32_t) ndlp->nlp_rpi & 0xff)); 2003 ((uint32_t) ndlp->nlp_rpi & 0xff));
1711 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2004 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1712 "%d:0929 FIND node DID " 2005 "%d (%d):0929 FIND node DID "
1713 " Data: x%p x%x x%x x%x\n", 2006 " Data: x%p x%x x%x x%x\n",
1714 phba->brd_no, 2007 phba->brd_no, vport->vpi,
1715 ndlp, ndlp->nlp_DID, 2008 ndlp, ndlp->nlp_DID,
1716 ndlp->nlp_flag, data1); 2009 ndlp->nlp_flag, data1);
1717 return ndlp; 2010 return ndlp;
@@ -1720,8 +2013,8 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1720 2013
1721 /* FIND node did <did> NOT FOUND */ 2014 /* FIND node did <did> NOT FOUND */
1722 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2015 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1723 "%d:0932 FIND node did x%x NOT FOUND.\n", 2016 "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
1724 phba->brd_no, did); 2017 phba->brd_no, vport->vpi, did);
1725 return NULL; 2018 return NULL;
1726} 2019}
1727 2020
@@ -1835,6 +2128,14 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1835 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; 2128 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
1836 int rc; 2129 int rc;
1837 2130
2131 /*
2132 * if it's not a physical port or if we already send
2133 * clear_la then don't send it.
2134 */
2135 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2136 (vport->port_type != LPFC_PHYSICAL_PORT))
2137 return;
2138
1838 /* Link up discovery */ 2139 /* Link up discovery */
1839 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 2140 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
1840 phba->link_state = LPFC_CLEAR_LA; 2141 phba->link_state = LPFC_CLEAR_LA;
@@ -1849,7 +2150,26 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1849 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 2150 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1850 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 2151 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1851 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 2152 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1852 vport->port_state = LPFC_VPORT_READY; 2153 phba->link_state = LPFC_HBA_ERROR;
2154 }
2155 }
2156}
2157
2158/* Reg_vpi to tell firmware to resume normal operations */
2159void
2160lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2161{
2162 LPFC_MBOXQ_t *regvpimbox;
2163
2164 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2165 if (regvpimbox) {
2166 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2167 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2168 regvpimbox->vport = vport;
2169 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2170 (MBX_NOWAIT | MBX_STOP_IOCB))
2171 == MBX_NOT_FINISHED) {
2172 mempool_free(regvpimbox, phba->mbox_mem_pool);
1853 } 2173 }
1854 } 2174 }
1855} 2175}
@@ -1860,7 +2180,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
1860{ 2180{
1861 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2181 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1862 struct lpfc_hba *phba = vport->phba; 2182 struct lpfc_hba *phba = vport->phba;
1863 struct lpfc_nodelist *ndlp, *next_ndlp;
1864 uint32_t num_sent; 2183 uint32_t num_sent;
1865 uint32_t clear_la_pending; 2184 uint32_t clear_la_pending;
1866 int did_changed; 2185 int did_changed;
@@ -1888,21 +2207,11 @@ lpfc_disc_start(struct lpfc_vport *vport)
1888 2207
1889 /* Start Discovery state <hba_state> */ 2208 /* Start Discovery state <hba_state> */
1890 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2209 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1891 "%d:0202 Start Discovery hba state x%x " 2210 "%d (%d):0202 Start Discovery hba state x%x "
1892 "Data: x%x x%x x%x\n", 2211 "Data: x%x x%x x%x\n",
1893 phba->brd_no, vport->port_state, vport->fc_flag, 2212 phba->brd_no, vport->vpi, vport->port_state,
1894 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 2213 vport->fc_flag, vport->fc_plogi_cnt,
1895 2214 vport->fc_adisc_cnt);
1896 /* If our did changed, we MUST do PLOGI */
1897 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1898 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1899 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1900 did_changed) {
1901 spin_lock_irq(shost->host_lock);
1902 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1903 spin_unlock_irq(shost->host_lock);
1904 }
1905 }
1906 2215
1907 /* First do ADISCs - if any */ 2216 /* First do ADISCs - if any */
1908 num_sent = lpfc_els_disc_adisc(vport); 2217 num_sent = lpfc_els_disc_adisc(vport);
@@ -1910,12 +2219,26 @@ lpfc_disc_start(struct lpfc_vport *vport)
1910 if (num_sent) 2219 if (num_sent)
1911 return; 2220 return;
1912 2221
2222 /*
2223 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2224 * continue discovery.
2225 */
2226 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2227 !(vport->fc_flag & FC_RSCN_MODE)) {
2228 lpfc_issue_reg_vpi(phba, vport);
2229 return;
2230 }
2231
2232 /*
2233 * For SLI2, we need to set port_state to READY and continue
2234 * discovery.
2235 */
1913 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 2236 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
1914 if (vport->port_type == LPFC_PHYSICAL_PORT) {
1915 /* If we get here, there is nothing to ADISC */ 2237 /* If we get here, there is nothing to ADISC */
2238 if (vport->port_type == LPFC_PHYSICAL_PORT)
1916 lpfc_issue_clear_la(phba, vport); 2239 lpfc_issue_clear_la(phba, vport);
1917 } else if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1918 2240
2241 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1919 vport->num_disc_nodes = 0; 2242 vport->num_disc_nodes = 0;
1920 /* go thru NPR nodes and issue ELS PLOGIs */ 2243 /* go thru NPR nodes and issue ELS PLOGIs */
1921 if (vport->fc_npr_cnt) 2244 if (vport->fc_npr_cnt)
@@ -1925,9 +2248,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
1925 spin_lock_irq(shost->host_lock); 2248 spin_lock_irq(shost->host_lock);
1926 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2249 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1927 spin_unlock_irq(shost->host_lock); 2250 spin_unlock_irq(shost->host_lock);
2251 lpfc_can_disctmo(vport);
1928 } 2252 }
1929 vport->port_state = LPFC_VPORT_READY;
1930 } 2253 }
2254 vport->port_state = LPFC_VPORT_READY;
1931 } else { 2255 } else {
1932 /* Next do PLOGIs - if any */ 2256 /* Next do PLOGIs - if any */
1933 num_sent = lpfc_els_disc_plogi(vport); 2257 num_sent = lpfc_els_disc_plogi(vport);
@@ -1944,6 +2268,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
1944 spin_lock_irq(shost->host_lock); 2268 spin_lock_irq(shost->host_lock);
1945 vport->fc_flag &= ~FC_RSCN_MODE; 2269 vport->fc_flag &= ~FC_RSCN_MODE;
1946 spin_unlock_irq(shost->host_lock); 2270 spin_unlock_irq(shost->host_lock);
2271 lpfc_can_disctmo(vport);
1947 } else 2272 } else
1948 lpfc_els_handle_rscn(vport); 2273 lpfc_els_handle_rscn(vport);
1949 } 2274 }
@@ -1999,7 +2324,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1999 2324
2000 while (!list_empty(&completions)) { 2325 while (!list_empty(&completions)) {
2001 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2326 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2002 list_del(&iocb->list); 2327 list_del_init(&iocb->list);
2003 2328
2004 if (!iocb->iocb_cmpl) 2329 if (!iocb->iocb_cmpl)
2005 lpfc_sli_release_iocbq(phba, iocb); 2330 lpfc_sli_release_iocbq(phba, iocb);
@@ -2030,6 +2355,14 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
2030 } 2355 }
2031} 2356}
2032 2357
2358void
2359lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2360{
2361 lpfc_els_flush_rscn(vport);
2362 lpfc_els_flush_cmd(vport);
2363 lpfc_disc_flush_list(vport);
2364}
2365
2033/*****************************************************************************/ 2366/*****************************************************************************/
2034/* 2367/*
2035 * NAME: lpfc_disc_timeout 2368 * NAME: lpfc_disc_timeout
@@ -2060,8 +2393,10 @@ lpfc_disc_timeout(unsigned long ptr)
2060 vport->work_port_events |= WORKER_DISC_TMO; 2393 vport->work_port_events |= WORKER_DISC_TMO;
2061 spin_unlock_irqrestore(&vport->work_port_lock, flags); 2394 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2062 2395
2396 spin_lock_irqsave(&phba->hbalock, flags);
2063 if (phba->work_wait) 2397 if (phba->work_wait)
2064 wake_up(phba->work_wait); 2398 lpfc_worker_wake_up(phba);
2399 spin_unlock_irqrestore(&phba->hbalock, flags);
2065 } 2400 }
2066 return; 2401 return;
2067} 2402}
@@ -2073,7 +2408,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2073 struct lpfc_hba *phba = vport->phba; 2408 struct lpfc_hba *phba = vport->phba;
2074 struct lpfc_sli *psli = &phba->sli; 2409 struct lpfc_sli *psli = &phba->sli;
2075 struct lpfc_nodelist *ndlp, *next_ndlp; 2410 struct lpfc_nodelist *ndlp, *next_ndlp;
2076 LPFC_MBOXQ_t *clearlambox, *initlinkmbox; 2411 LPFC_MBOXQ_t *initlinkmbox;
2077 int rc, clrlaerr = 0; 2412 int rc, clrlaerr = 0;
2078 2413
2079 if (!(vport->fc_flag & FC_DISC_TMO)) 2414 if (!(vport->fc_flag & FC_DISC_TMO))
@@ -2091,8 +2426,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2091 */ 2426 */
2092 /* FAN timeout */ 2427 /* FAN timeout */
2093 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, 2428 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
2094 "%d:0221 FAN timeout\n", 2429 "%d (%d):0221 FAN timeout\n",
2095 phba->brd_no); 2430 phba->brd_no, vport->vpi);
2096 2431
2097 /* Start discovery by sending FLOGI, clean up old rpis */ 2432 /* Start discovery by sending FLOGI, clean up old rpis */
2098 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2433 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
@@ -2109,17 +2444,21 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2109 lpfc_unreg_rpi(vport, ndlp); 2444 lpfc_unreg_rpi(vport, ndlp);
2110 } 2445 }
2111 } 2446 }
2112 vport->port_state = LPFC_FLOGI; 2447 if (vport->port_state != LPFC_FLOGI) {
2113 lpfc_set_disctmo(vport); 2448 vport->port_state = LPFC_FLOGI;
2114 lpfc_initial_flogi(vport); 2449 lpfc_set_disctmo(vport);
2450 lpfc_initial_flogi(vport);
2451 }
2115 break; 2452 break;
2116 2453
2454 case LPFC_FDISC:
2117 case LPFC_FLOGI: 2455 case LPFC_FLOGI:
2118 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 2456 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2119 /* Initial FLOGI timeout */ 2457 /* Initial FLOGI timeout */
2120 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2458 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2121 "%d:0222 Initial FLOGI timeout\n", 2459 "%d (%d):0222 Initial %s timeout\n",
2122 phba->brd_no); 2460 phba->brd_no, vport->vpi,
2461 vport->vpi ? "FLOGI" : "FDISC");
2123 2462
2124 /* Assume no Fabric and go on with discovery. 2463 /* Assume no Fabric and go on with discovery.
2125 * Check for outstanding ELS FLOGI to abort. 2464 * Check for outstanding ELS FLOGI to abort.
@@ -2136,8 +2475,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2136 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 2475 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2137 NameServer login */ 2476 NameServer login */
2138 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2477 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2139 "%d:0223 Timeout while waiting for NameServer " 2478 "%d (%d):0223 Timeout while waiting for "
2140 "login\n", phba->brd_no); 2479 "NameServer login\n",
2480 phba->brd_no, vport->vpi);
2141 2481
2142 /* Next look for NameServer ndlp */ 2482 /* Next look for NameServer ndlp */
2143 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2483 ndlp = lpfc_findnode_did(vport, NameServer_DID);
@@ -2150,53 +2490,40 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2150 case LPFC_NS_QRY: 2490 case LPFC_NS_QRY:
2151 /* Check for wait for NameServer Rsp timeout */ 2491 /* Check for wait for NameServer Rsp timeout */
2152 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2492 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2153 "%d:0224 NameServer Query timeout " 2493 "%d (%d):0224 NameServer Query timeout "
2154 "Data: x%x x%x\n", 2494 "Data: x%x x%x\n",
2155 phba->brd_no, 2495 phba->brd_no, vport->vpi,
2156 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 2496 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2157 2497
2158 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2498 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2159 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2499 /* Try it one more time */
2160 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 2500 vport->fc_ns_retry++;
2161 /* Try it one more time */ 2501 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2162 rc = lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT); 2502 vport->fc_ns_retry, 0);
2163 if (rc == 0) 2503 if (rc == 0)
2164 break; 2504 break;
2165 }
2166 vport->fc_ns_retry = 0;
2167 }
2168
2169 /* Nothing to authenticate, so CLEAR_LA right now */
2170 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2171 if (!clearlambox) {
2172 clrlaerr = 1;
2173 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2174 "%d:0226 Device Discovery "
2175 "completion error\n",
2176 phba->brd_no);
2177 phba->link_state = LPFC_HBA_ERROR;
2178 break;
2179 } 2505 }
2506 vport->fc_ns_retry = 0;
2180 2507
2181 phba->link_state = LPFC_CLEAR_LA; 2508 /*
2182 lpfc_clear_la(phba, clearlambox); 2509 * Discovery is over.
2183 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2510 * set port_state to PORT_READY if SLI2.
2184 clearlambox->vport = vport; 2511 * cmpl_reg_vpi will set port_state to READY for SLI3.
2185 rc = lpfc_sli_issue_mbox(phba, clearlambox, 2512 */
2186 (MBX_NOWAIT | MBX_STOP_IOCB)); 2513 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2187 if (rc == MBX_NOT_FINISHED) { 2514 lpfc_issue_reg_vpi(phba, vport);
2188 mempool_free(clearlambox, phba->mbox_mem_pool); 2515 else { /* NPIV Not enabled */
2189 clrlaerr = 1; 2516 lpfc_issue_clear_la(phba, vport);
2190 break; 2517 vport->port_state = LPFC_VPORT_READY;
2191 } 2518 }
2192 2519
2193 /* Setup and issue mailbox INITIALIZE LINK command */ 2520 /* Setup and issue mailbox INITIALIZE LINK command */
2194 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2521 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2195 if (!initlinkmbox) { 2522 if (!initlinkmbox) {
2196 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2523 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2197 "%d:0206 Device Discovery " 2524 "%d (%d):0206 Device Discovery "
2198 "completion error\n", 2525 "completion error\n",
2199 phba->brd_no); 2526 phba->brd_no, vport->vpi);
2200 phba->link_state = LPFC_HBA_ERROR; 2527 phba->link_state = LPFC_HBA_ERROR;
2201 break; 2528 break;
2202 } 2529 }
@@ -2206,6 +2533,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2206 phba->cfg_link_speed); 2533 phba->cfg_link_speed);
2207 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2534 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2208 initlinkmbox->vport = vport; 2535 initlinkmbox->vport = vport;
2536 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2209 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2537 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2210 (MBX_NOWAIT | MBX_STOP_IOCB)); 2538 (MBX_NOWAIT | MBX_STOP_IOCB));
2211 lpfc_set_loopback_flag(phba); 2539 lpfc_set_loopback_flag(phba);
@@ -2217,37 +2545,28 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2217 case LPFC_DISC_AUTH: 2545 case LPFC_DISC_AUTH:
2218 /* Node Authentication timeout */ 2546 /* Node Authentication timeout */
2219 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2547 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2220 "%d:0227 Node Authentication timeout\n", 2548 "%d (%d):0227 Node Authentication timeout\n",
2221 phba->brd_no); 2549 phba->brd_no, vport->vpi);
2222 lpfc_disc_flush_list(vport); 2550 lpfc_disc_flush_list(vport);
2223 2551
2224 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2552 /*
2225 if (!clearlambox) { 2553 * set port_state to PORT_READY if SLI2.
2226 clrlaerr = 1; 2554 * cmpl_reg_vpi will set port_state to READY for SLI3.
2227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2555 */
2228 "%d:0207 Device Discovery " 2556 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2229 "completion error\n", 2557 lpfc_issue_reg_vpi(phba, vport);
2230 phba->brd_no); 2558 else { /* NPIV Not enabled */
2231 phba->link_state = LPFC_HBA_ERROR; 2559 lpfc_issue_clear_la(phba, vport);
2232 break; 2560 vport->port_state = LPFC_VPORT_READY;
2233 }
2234 phba->link_state = LPFC_CLEAR_LA;
2235 lpfc_clear_la(phba, clearlambox);
2236 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2237 clearlambox->vport = vport;
2238 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2239 (MBX_NOWAIT | MBX_STOP_IOCB));
2240 if (rc == MBX_NOT_FINISHED) {
2241 mempool_free(clearlambox, phba->mbox_mem_pool);
2242 clrlaerr = 1;
2243 } 2561 }
2244 break; 2562 break;
2245 2563
2246 case LPFC_VPORT_READY: 2564 case LPFC_VPORT_READY:
2247 if (vport->fc_flag & FC_RSCN_MODE) { 2565 if (vport->fc_flag & FC_RSCN_MODE) {
2248 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2566 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2249 "%d:0231 RSCN timeout Data: x%x x%x\n", 2567 "%d (%d):0231 RSCN timeout Data: x%x "
2250 phba->brd_no, 2568 "x%x\n",
2569 phba->brd_no, vport->vpi,
2251 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 2570 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2252 2571
2253 /* Cleanup any outstanding ELS commands */ 2572 /* Cleanup any outstanding ELS commands */
@@ -2258,23 +2577,21 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2258 } 2577 }
2259 break; 2578 break;
2260 2579
2261 case LPFC_STATE_UNKNOWN: 2580 default:
2262 case LPFC_NS_REG:
2263 case LPFC_BUILD_DISC_LIST:
2264 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2581 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2265 "%d:0229 Unexpected discovery timeout, vport " 2582 "%d (%d):0229 Unexpected discovery timeout, "
2266 "State x%x\n", 2583 "vport State x%x\n",
2267 vport->port_state, phba->brd_no); 2584 phba->brd_no, vport->vpi, vport->port_state);
2268 2585
2269 break; 2586 break;
2270 } 2587 }
2271 2588
2272 switch (phba->link_state) { 2589 switch (phba->link_state) {
2273 case LPFC_CLEAR_LA: 2590 case LPFC_CLEAR_LA:
2274 /* CLEAR LA timeout */ 2591 /* CLEAR LA timeout */
2275 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2592 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2276 "%d:0228 CLEAR LA timeout\n", 2593 "%d (%d):0228 CLEAR LA timeout\n",
2277 phba->brd_no); 2594 phba->brd_no, vport->vpi);
2278 clrlaerr = 1; 2595 clrlaerr = 1;
2279 break; 2596 break;
2280 2597
@@ -2286,11 +2603,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2286 case LPFC_LINK_UP: 2603 case LPFC_LINK_UP:
2287 case LPFC_HBA_ERROR: 2604 case LPFC_HBA_ERROR:
2288 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2605 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2289 "%d:0230 Unexpected timeout, hba link " 2606 "%d (%d):0230 Unexpected timeout, hba link "
2290 "state x%x\n", 2607 "state x%x\n",
2291 phba->brd_no, phba->link_state); 2608 phba->brd_no, vport->vpi, phba->link_state);
2292 clrlaerr = 1; 2609 clrlaerr = 1;
2293 break; 2610 break;
2611
2612 case LPFC_HBA_READY:
2613 break;
2294 } 2614 }
2295 2615
2296 if (clrlaerr) { 2616 if (clrlaerr) {
@@ -2374,7 +2694,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2374 2694
2375/* 2695/*
2376 * Search node lists for a remote port matching filter criteria 2696 * Search node lists for a remote port matching filter criteria
2377 * This routine is used when the caller does NOT have host_lock. 2697 * Caller needs to hold host_lock before calling this routine.
2378 */ 2698 */
2379struct lpfc_nodelist * 2699struct lpfc_nodelist *
2380lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 2700lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
@@ -2427,11 +2747,41 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2427} 2747}
2428 2748
2429void 2749void
2750lpfc_dev_loss_delay(unsigned long ptr)
2751{
2752 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2753 struct lpfc_vport *vport = ndlp->vport;
2754 struct lpfc_hba *phba = vport->phba;
2755 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2756 unsigned long flags;
2757
2758 evtp = &ndlp->dev_loss_evt;
2759
2760 spin_lock_irqsave(&phba->hbalock, flags);
2761 if (!list_empty(&evtp->evt_listp)) {
2762 spin_unlock_irqrestore(&phba->hbalock, flags);
2763 return;
2764 }
2765
2766 evtp->evt_arg1 = ndlp;
2767 evtp->evt = LPFC_EVT_DEV_LOSS;
2768 list_add_tail(&evtp->evt_listp, &phba->work_list);
2769 if (phba->work_wait)
2770 lpfc_worker_wake_up(phba);
2771 spin_unlock_irqrestore(&phba->hbalock, flags);
2772 return;
2773}
2774
2775void
2430lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2776lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2431 uint32_t did) 2777 uint32_t did)
2432{ 2778{
2433 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2779 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2434 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2780 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2781 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2782 init_timer(&ndlp->nlp_initiator_tmr);
2783 ndlp->nlp_initiator_tmr.function = lpfc_dev_loss_delay;
2784 ndlp->nlp_initiator_tmr.data = (unsigned long)ndlp;
2435 init_timer(&ndlp->nlp_delayfunc); 2785 init_timer(&ndlp->nlp_delayfunc);
2436 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2786 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2437 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2787 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;