aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-02-26 14:15:57 -0500
committerJames Bottomley <James.Bottomley@suse.de>2010-03-03 08:40:09 -0500
commit0c9ab6f5cb28199ef5de84874d135ed44f64d92b (patch)
tree51140c5edce1250e0c06b5a38b540b533b092247 /drivers/scsi/lpfc/lpfc_init.c
parentfc2b989be9190f3311a5ae41289828e24897a20e (diff)
[SCSI] lpfc 8.3.10: Added round robin FCF failover
- Added round robin FCF failover on initial or FCF rediscovery FLOGI failure. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c123
1 files changed, 95 insertions, 28 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ff45e336917a..ea44239eeb33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2201,8 +2201,8 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2201{ 2201{
2202 /* Clear pending FCF rediscovery wait and failover in progress flags */ 2202 /* Clear pending FCF rediscovery wait and failover in progress flags */
2203 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2203 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2204 FCF_DEAD_FOVER | 2204 FCF_DEAD_DISC |
2205 FCF_CVL_FOVER); 2205 FCF_ACVL_DISC);
2206 /* Now, try to stop the timer */ 2206 /* Now, try to stop the timer */
2207 del_timer(&phba->fcf.redisc_wait); 2207 del_timer(&phba->fcf.redisc_wait);
2208} 2208}
@@ -2943,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2943 /* FCF rediscovery event to worker thread */ 2943 /* FCF rediscovery event to worker thread */
2944 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2944 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2945 spin_unlock_irq(&phba->hbalock); 2945 spin_unlock_irq(&phba->hbalock);
2946 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2947 "2776 FCF rediscover wait timer expired, post "
2948 "a worker thread event for FCF table scan\n");
2946 /* wake up worker thread */ 2949 /* wake up worker thread */
2947 lpfc_worker_wake_up(phba); 2950 lpfc_worker_wake_up(phba);
2948} 2951}
@@ -3300,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3300 switch (event_type) { 3303 switch (event_type) {
3301 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3304 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3302 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3305 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3303 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3306 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3304 "2546 New FCF found index 0x%x tag 0x%x\n", 3307 "2546 New FCF found/FCF parameter modified event: "
3305 acqe_fcoe->index, 3308 "evt_tag:x%x, fcf_index:x%x\n",
3306 acqe_fcoe->event_tag); 3309 acqe_fcoe->event_tag, acqe_fcoe->index);
3310
3307 spin_lock_irq(&phba->hbalock); 3311 spin_lock_irq(&phba->hbalock);
3308 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3312 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3309 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3313 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3314,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3314 spin_unlock_irq(&phba->hbalock); 3318 spin_unlock_irq(&phba->hbalock);
3315 break; 3319 break;
3316 } 3320 }
3321
3317 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3322 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3318 /* 3323 /*
3319 * If fast FCF failover rescan event is pending, 3324 * If fast FCF failover rescan event is pending,
@@ -3324,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3324 } 3329 }
3325 spin_unlock_irq(&phba->hbalock); 3330 spin_unlock_irq(&phba->hbalock);
3326 3331
3327 /* Read the FCF table and re-discover SAN. */ 3332 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3328 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3333 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3334 /*
3335 * During period of FCF discovery, read the FCF
3336 * table record indexed by the event to update
3337 * FCF round robin failover eligible FCF bmask.
3338 */
3339 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3340 LOG_DISCOVERY,
3341 "2779 Read new FCF record with "
3342 "fcf_index:x%x for updating FCF "
3343 "round robin failover bmask\n",
3344 acqe_fcoe->index);
3345 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3346 }
3347
3348 /* Otherwise, scan the entire FCF table and re-discover SAN */
3349 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3350 "2770 Start FCF table scan due to new FCF "
3351 "event: evt_tag:x%x, fcf_index:x%x\n",
3352 acqe_fcoe->event_tag, acqe_fcoe->index);
3353 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3354 LPFC_FCOE_FCF_GET_FIRST);
3329 if (rc) 3355 if (rc)
3330 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3356 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3331 "2547 Read FCF record failed 0x%x\n", 3357 "2547 Issue FCF scan read FCF mailbox "
3332 rc); 3358 "command failed 0x%x\n", rc);
3333 break; 3359 break;
3334 3360
3335 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3361 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3340,7 +3366,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3340 break; 3366 break;
3341 3367
3342 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3368 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3343 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3369 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3344 "2549 FCF disconnected from network index 0x%x" 3370 "2549 FCF disconnected from network index 0x%x"
3345 " tag 0x%x\n", acqe_fcoe->index, 3371 " tag 0x%x\n", acqe_fcoe->index,
3346 acqe_fcoe->event_tag); 3372 acqe_fcoe->event_tag);
@@ -3349,21 +3375,32 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3349 break; 3375 break;
3350 /* We request port to rediscover the entire FCF table for 3376 /* We request port to rediscover the entire FCF table for
3351 * a fast recovery from case that the current FCF record 3377 * a fast recovery from case that the current FCF record
3352 * is no longer valid if the last CVL event hasn't already 3378 * is no longer valid if we are not in the middle of FCF
3353 * triggered process. 3379 * failover process already.
3354 */ 3380 */
3355 spin_lock_irq(&phba->hbalock); 3381 spin_lock_irq(&phba->hbalock);
3356 if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { 3382 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3357 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
3384 /* Update FLOGI FCF failover eligible FCF bmask */
3385 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3358 break; 3386 break;
3359 } 3387 }
3360 /* Mark the fast failover process in progress */ 3388 /* Mark the fast failover process in progress */
3361 phba->fcf.fcf_flag |= FCF_DEAD_FOVER; 3389 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3362 spin_unlock_irq(&phba->hbalock); 3390 spin_unlock_irq(&phba->hbalock);
3391 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3392 "2771 Start FCF fast failover process due to "
3393 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3394 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3363 rc = lpfc_sli4_redisc_fcf_table(phba); 3395 rc = lpfc_sli4_redisc_fcf_table(phba);
3364 if (rc) { 3396 if (rc) {
3397 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3398 LOG_DISCOVERY,
3399 "2772 Issue FCF rediscover mabilbox "
3400 "command failed, fail through to FCF "
3401 "dead event\n");
3365 spin_lock_irq(&phba->hbalock); 3402 spin_lock_irq(&phba->hbalock);
3366 phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; 3403 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3367 spin_unlock_irq(&phba->hbalock); 3404 spin_unlock_irq(&phba->hbalock);
3368 /* 3405 /*
3369 * Last resort will fail over by treating this 3406 * Last resort will fail over by treating this
@@ -3378,7 +3415,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3378 lpfc_sli4_perform_all_vport_cvl(phba); 3415 lpfc_sli4_perform_all_vport_cvl(phba);
3379 break; 3416 break;
3380 case LPFC_FCOE_EVENT_TYPE_CVL: 3417 case LPFC_FCOE_EVENT_TYPE_CVL:
3381 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3418 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3382 "2718 Clear Virtual Link Received for VPI 0x%x" 3419 "2718 Clear Virtual Link Received for VPI 0x%x"
3383 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3420 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3384 vport = lpfc_find_vport_by_vpid(phba, 3421 vport = lpfc_find_vport_by_vpid(phba,
@@ -3419,21 +3456,31 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3419 * Otherwise, we request port to rediscover 3456 * Otherwise, we request port to rediscover
3420 * the entire FCF table for a fast recovery 3457 * the entire FCF table for a fast recovery
3421 * from possible case that the current FCF 3458 * from possible case that the current FCF
3422 * is no longer valid if the FCF_DEAD event 3459 * is no longer valid if we are not already
3423 * hasn't already triggered process. 3460 * in the FCF failover process.
3424 */ 3461 */
3425 spin_lock_irq(&phba->hbalock); 3462 spin_lock_irq(&phba->hbalock);
3426 if (phba->fcf.fcf_flag & FCF_DEAD_FOVER) { 3463 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3427 spin_unlock_irq(&phba->hbalock); 3464 spin_unlock_irq(&phba->hbalock);
3428 break; 3465 break;
3429 } 3466 }
3430 /* Mark the fast failover process in progress */ 3467 /* Mark the fast failover process in progress */
3431 phba->fcf.fcf_flag |= FCF_CVL_FOVER; 3468 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3432 spin_unlock_irq(&phba->hbalock); 3469 spin_unlock_irq(&phba->hbalock);
3470 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3471 LOG_DISCOVERY,
3472 "2773 Start FCF fast failover due "
3473 "to CVL event: evt_tag:x%x\n",
3474 acqe_fcoe->event_tag);
3433 rc = lpfc_sli4_redisc_fcf_table(phba); 3475 rc = lpfc_sli4_redisc_fcf_table(phba);
3434 if (rc) { 3476 if (rc) {
3477 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3478 LOG_DISCOVERY,
3479 "2774 Issue FCF rediscover "
3480 "mabilbox command failed, "
3481 "through to CVL event\n");
3435 spin_lock_irq(&phba->hbalock); 3482 spin_lock_irq(&phba->hbalock);
3436 phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; 3483 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3437 spin_unlock_irq(&phba->hbalock); 3484 spin_unlock_irq(&phba->hbalock);
3438 /* 3485 /*
3439 * Last resort will be re-try on the 3486 * Last resort will be re-try on the
@@ -3537,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3537 spin_unlock_irq(&phba->hbalock); 3584 spin_unlock_irq(&phba->hbalock);
3538 3585
3539 /* Scan FCF table from the first entry to re-discover SAN */ 3586 /* Scan FCF table from the first entry to re-discover SAN */
3540 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3587 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3588 "2777 Start FCF table scan after FCF "
3589 "rediscovery quiescent period over\n");
3590 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3541 if (rc) 3591 if (rc)
3542 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3592 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3543 "2747 Post FCF rediscovery read FCF record " 3593 "2747 Issue FCF scan read FCF mailbox "
3544 "failed 0x%x\n", rc); 3594 "command failed 0x%x\n", rc);
3545} 3595}
3546 3596
3547/** 3597/**
@@ -3833,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3833 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3883 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3834 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3884 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3835 struct lpfc_mqe *mqe; 3885 struct lpfc_mqe *mqe;
3886 int longs;
3836 3887
3837 /* Before proceed, wait for POST done and device ready */ 3888 /* Before proceed, wait for POST done and device ready */
3838 rc = lpfc_sli4_post_status_check(phba); 3889 rc = lpfc_sli4_post_status_check(phba);
@@ -4009,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4009 goto out_free_active_sgl; 4060 goto out_free_active_sgl;
4010 } 4061 }
4011 4062
4063 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4064 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4065 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4066 GFP_KERNEL);
4067 if (!phba->fcf.fcf_rr_bmask) {
4068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4069 "2759 Failed allocate memory for FCF round "
4070 "robin failover bmask\n");
4071 goto out_remove_rpi_hdrs;
4072 }
4073
4012 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4074 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4013 phba->cfg_fcp_eq_count), GFP_KERNEL); 4075 phba->cfg_fcp_eq_count), GFP_KERNEL);
4014 if (!phba->sli4_hba.fcp_eq_hdl) { 4076 if (!phba->sli4_hba.fcp_eq_hdl) {
4015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4016 "2572 Failed allocate memory for fast-path " 4078 "2572 Failed allocate memory for fast-path "
4017 "per-EQ handle array\n"); 4079 "per-EQ handle array\n");
4018 goto out_remove_rpi_hdrs; 4080 goto out_free_fcf_rr_bmask;
4019 } 4081 }
4020 4082
4021 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4083 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -4068,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4068 4130
4069out_free_fcp_eq_hdl: 4131out_free_fcp_eq_hdl:
4070 kfree(phba->sli4_hba.fcp_eq_hdl); 4132 kfree(phba->sli4_hba.fcp_eq_hdl);
4133out_free_fcf_rr_bmask:
4134 kfree(phba->fcf.fcf_rr_bmask);
4071out_remove_rpi_hdrs: 4135out_remove_rpi_hdrs:
4072 lpfc_sli4_remove_rpi_hdrs(phba); 4136 lpfc_sli4_remove_rpi_hdrs(phba);
4073out_free_active_sgl: 4137out_free_active_sgl:
@@ -4113,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4113 lpfc_sli4_remove_rpi_hdrs(phba); 4177 lpfc_sli4_remove_rpi_hdrs(phba);
4114 lpfc_sli4_remove_rpis(phba); 4178 lpfc_sli4_remove_rpis(phba);
4115 4179
4180 /* Free eligible FCF index bmask */
4181 kfree(phba->fcf.fcf_rr_bmask);
4182
4116 /* Free the ELS sgl list */ 4183 /* Free the ELS sgl list */
4117 lpfc_free_active_sgl(phba); 4184 lpfc_free_active_sgl(phba);
4118 lpfc_free_sgl_list(phba); 4185 lpfc_free_sgl_list(phba);