aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c79
1 files changed, 49 insertions, 30 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9244aa64b3b..da9ba06ad58 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3300,10 +3300,10 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3300 if (!ndlp) 3300 if (!ndlp)
3301 return 0; 3301 return 0;
3302 } 3302 }
3303 if (phba->pport->port_state <= LPFC_FLOGI) 3303 if (phba->pport->port_state < LPFC_FLOGI)
3304 return NULL; 3304 return NULL;
3305 /* If virtual link is not yet instantiated ignore CVL */ 3305 /* If virtual link is not yet instantiated ignore CVL */
3306 if (vport->port_state <= LPFC_FDISC) 3306 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC))
3307 return NULL; 3307 return NULL;
3308 shost = lpfc_shost_from_vport(vport); 3308 shost = lpfc_shost_from_vport(vport);
3309 if (!shost) 3309 if (!shost)
@@ -3376,21 +3376,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3376 "evt_tag:x%x, fcf_index:x%x\n", 3376 "evt_tag:x%x, fcf_index:x%x\n",
3377 acqe_fcoe->event_tag, 3377 acqe_fcoe->event_tag,
3378 acqe_fcoe->index); 3378 acqe_fcoe->index);
3379 /* If the FCF discovery is in progress, do nothing. */ 3379 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3380 spin_lock_irq(&phba->hbalock);
3381 if (phba->hba_flag & FCF_DISC_INPROGRESS) {
3382 spin_unlock_irq(&phba->hbalock);
3383 break;
3384 }
3385 /* If fast FCF failover rescan event is pending, do nothing */
3386 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3387 spin_unlock_irq(&phba->hbalock);
3388 break;
3389 }
3390 spin_unlock_irq(&phba->hbalock);
3391
3392 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3393 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3394 /* 3380 /*
3395 * During period of FCF discovery, read the FCF 3381 * During period of FCF discovery, read the FCF
3396 * table record indexed by the event to update 3382 * table record indexed by the event to update
@@ -3404,13 +3390,26 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3404 acqe_fcoe->index); 3390 acqe_fcoe->index);
3405 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3391 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3406 } 3392 }
3407 /* If the FCF has been in discovered state, do nothing. */ 3393
3394 /* If the FCF discovery is in progress, do nothing. */
3408 spin_lock_irq(&phba->hbalock); 3395 spin_lock_irq(&phba->hbalock);
3396 if (phba->hba_flag & FCF_DISC_INPROGRESS) {
3397 spin_unlock_irq(&phba->hbalock);
3398 break;
3399 }
3400 /* If fast FCF failover rescan event is pending, do nothing */
3401 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3402 spin_unlock_irq(&phba->hbalock);
3403 break;
3404 }
3405
3406 /* If the FCF has been in discovered state, do nothing. */
3409 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3407 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3410 spin_unlock_irq(&phba->hbalock); 3408 spin_unlock_irq(&phba->hbalock);
3411 break; 3409 break;
3412 } 3410 }
3413 spin_unlock_irq(&phba->hbalock); 3411 spin_unlock_irq(&phba->hbalock);
3412
3414 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3413 /* Otherwise, scan the entire FCF table and re-discover SAN */
3415 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3414 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3416 "2770 Start FCF table scan due to new FCF " 3415 "2770 Start FCF table scan due to new FCF "
@@ -3436,13 +3435,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3436 "2549 FCF disconnected from network index 0x%x" 3435 "2549 FCF disconnected from network index 0x%x"
3437 " tag 0x%x\n", acqe_fcoe->index, 3436 " tag 0x%x\n", acqe_fcoe->index,
3438 acqe_fcoe->event_tag); 3437 acqe_fcoe->event_tag);
3439 /* If the event is not for currently used fcf do nothing */ 3438 /*
3440 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3439 * If we are in the middle of FCF failover process, clear
3441 break; 3440 * the corresponding FCF bit in the roundrobin bitmap.
3442 /* We request port to rediscover the entire FCF table for
3443 * a fast recovery from case that the current FCF record
3444 * is no longer valid if we are not in the middle of FCF
3445 * failover process already.
3446 */ 3441 */
3447 spin_lock_irq(&phba->hbalock); 3442 spin_lock_irq(&phba->hbalock);
3448 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3443 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
@@ -3451,9 +3446,23 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3451 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3446 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3452 break; 3447 break;
3453 } 3448 }
3449 spin_unlock_irq(&phba->hbalock);
3450
3451 /* If the event is not for currently used fcf do nothing */
3452 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3453 break;
3454
3455 /*
3456 * Otherwise, request the port to rediscover the entire FCF
3457 * table for a fast recovery from case that the current FCF
3458 * is no longer valid as we are not in the middle of FCF
3459 * failover process already.
3460 */
3461 spin_lock_irq(&phba->hbalock);
3454 /* Mark the fast failover process in progress */ 3462 /* Mark the fast failover process in progress */
3455 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3463 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3456 spin_unlock_irq(&phba->hbalock); 3464 spin_unlock_irq(&phba->hbalock);
3465
3457 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3466 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3458 "2771 Start FCF fast failover process due to " 3467 "2771 Start FCF fast failover process due to "
3459 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3468 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
@@ -3473,12 +3482,16 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3473 * as a link down to FCF registration. 3482 * as a link down to FCF registration.
3474 */ 3483 */
3475 lpfc_sli4_fcf_dead_failthrough(phba); 3484 lpfc_sli4_fcf_dead_failthrough(phba);
3476 } else 3485 } else {
3477 /* Handling fast FCF failover to a DEAD FCF event 3486 /* Reset FCF roundrobin bmask for new discovery */
3478 * is considered equalivant to receiving CVL to all 3487 memset(phba->fcf.fcf_rr_bmask, 0,
3479 * vports. 3488 sizeof(*phba->fcf.fcf_rr_bmask));
3489 /*
3490 * Handling fast FCF failover to a DEAD FCF event is
3491 * considered equalivant to receiving CVL to all vports.
3480 */ 3492 */
3481 lpfc_sli4_perform_all_vport_cvl(phba); 3493 lpfc_sli4_perform_all_vport_cvl(phba);
3494 }
3482 break; 3495 break;
3483 case LPFC_FCOE_EVENT_TYPE_CVL: 3496 case LPFC_FCOE_EVENT_TYPE_CVL:
3484 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3497 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -3553,7 +3566,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3553 * the current registered FCF entry. 3566 * the current registered FCF entry.
3554 */ 3567 */
3555 lpfc_retry_pport_discovery(phba); 3568 lpfc_retry_pport_discovery(phba);
3556 } 3569 } else
3570 /*
3571 * Reset FCF roundrobin bmask for new
3572 * discovery.
3573 */
3574 memset(phba->fcf.fcf_rr_bmask, 0,
3575 sizeof(*phba->fcf.fcf_rr_bmask));
3557 } 3576 }
3558 break; 3577 break;
3559 default: 3578 default: