aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c278
1 files changed, 229 insertions, 49 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d29ac7c317d9..774663e8e1fe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -29,6 +29,7 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h> 31#include <linux/aer.h>
32#include <linux/slab.h>
32 33
33#include <scsi/scsi.h> 34#include <scsi/scsi.h>
34#include <scsi/scsi_device.h> 35#include <scsi/scsi_device.h>
@@ -350,7 +351,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
350 mb = &pmb->u.mb; 351 mb = &pmb->u.mb;
351 352
352 /* Get login parameters for NID. */ 353 /* Get login parameters for NID. */
353 lpfc_read_sparam(phba, pmb, 0); 354 rc = lpfc_read_sparam(phba, pmb, 0);
355 if (rc) {
356 mempool_free(pmb, phba->mbox_mem_pool);
357 return -ENOMEM;
358 }
359
354 pmb->vport = vport; 360 pmb->vport = vport;
355 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -359,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
359 mb->mbxCommand, mb->mbxStatus); 365 mb->mbxCommand, mb->mbxStatus);
360 phba->link_state = LPFC_HBA_ERROR; 366 phba->link_state = LPFC_HBA_ERROR;
361 mp = (struct lpfc_dmabuf *) pmb->context1; 367 mp = (struct lpfc_dmabuf *) pmb->context1;
362 mempool_free( pmb, phba->mbox_mem_pool); 368 mempool_free(pmb, phba->mbox_mem_pool);
363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
364 kfree(mp); 370 kfree(mp);
365 return -EIO; 371 return -EIO;
@@ -544,7 +550,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
544 mempool_free(pmb, phba->mbox_mem_pool); 550 mempool_free(pmb, phba->mbox_mem_pool);
545 return -EIO; 551 return -EIO;
546 } 552 }
547 } else if (phba->cfg_suppress_link_up == 0) { 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
548 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 lpfc_init_link(phba, pmb, phba->cfg_topology,
549 phba->cfg_link_speed); 555 phba->cfg_link_speed);
550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -571,6 +577,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
571 } 577 }
572 /* MBOX buffer will be freed in mbox compl */ 578 /* MBOX buffer will be freed in mbox compl */
573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 if (!pmb) {
581 phba->link_state = LPFC_HBA_ERROR;
582 return -ENOMEM;
583 }
584
574 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
575 pmb->mbox_cmpl = lpfc_config_async_cmpl; 586 pmb->mbox_cmpl = lpfc_config_async_cmpl;
576 pmb->vport = phba->pport; 587 pmb->vport = phba->pport;
@@ -588,6 +599,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
588 599
589 /* Get Option rom version */ 600 /* Get Option rom version */
590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 if (!pmb) {
603 phba->link_state = LPFC_HBA_ERROR;
604 return -ENOMEM;
605 }
606
591 lpfc_dump_wakeup_param(phba, pmb); 607 lpfc_dump_wakeup_param(phba, pmb);
592 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
593 pmb->vport = phba->pport; 609 pmb->vport = phba->pport;
@@ -652,7 +668,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
652 mempool_free(pmb, phba->mbox_mem_pool); 668 mempool_free(pmb, phba->mbox_mem_pool);
653 return -EIO; 669 return -EIO;
654 } 670 }
655 phba->cfg_suppress_link_up = 0; 671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
656 672
657 return 0; 673 return 0;
658} 674}
@@ -807,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
807 LIST_HEAD(aborts); 823 LIST_HEAD(aborts);
808 int ret; 824 int ret;
809 unsigned long iflag = 0; 825 unsigned long iflag = 0;
826 struct lpfc_sglq *sglq_entry = NULL;
827
810 ret = lpfc_hba_down_post_s3(phba); 828 ret = lpfc_hba_down_post_s3(phba);
811 if (ret) 829 if (ret)
812 return ret; 830 return ret;
@@ -822,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
822 * list. 840 * list.
823 */ 841 */
824 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 list_for_each_entry(sglq_entry,
844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 sglq_entry->state = SGL_FREED;
846
825 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
826 &phba->sli4_hba.lpfc_sgl_list); 848 &phba->sli4_hba.lpfc_sgl_list);
827 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -2178,8 +2200,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2178void 2200void
2179__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2201__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2180{ 2202{
2181 /* Clear pending FCF rediscovery wait timer */ 2203 /* Clear pending FCF rediscovery wait and failover in progress flags */
2182 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2205 FCF_DEAD_DISC |
2206 FCF_ACVL_DISC);
2183 /* Now, try to stop the timer */ 2207 /* Now, try to stop the timer */
2184 del_timer(&phba->fcf.redisc_wait); 2208 del_timer(&phba->fcf.redisc_wait);
2185} 2209}
@@ -2576,6 +2600,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2576 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2577 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2578 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2579 2611
2580 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2581 if (error) 2613 if (error)
@@ -2912,6 +2944,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2912 /* FCF rediscovery event to worker thread */ 2944 /* FCF rediscovery event to worker thread */
2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2914 spin_unlock_irq(&phba->hbalock); 2946 spin_unlock_irq(&phba->hbalock);
2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 "2776 FCF rediscover wait timer expired, post "
2949 "a worker thread event for FCF table scan\n");
2915 /* wake up worker thread */ 2950 /* wake up worker thread */
2916 lpfc_worker_wake_up(phba); 2951 lpfc_worker_wake_up(phba);
2917} 2952}
@@ -3183,6 +3218,68 @@ out_free_pmb:
3183} 3218}
3184 3219
3185/** 3220/**
3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222 * @vport: pointer to vport data structure.
3223 *
3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225 * response to a CVL event.
3226 *
3227 * Return the pointer to the ndlp with the vport if successful, otherwise
3228 * return NULL.
3229 **/
3230static struct lpfc_nodelist *
3231lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3232{
3233 struct lpfc_nodelist *ndlp;
3234 struct Scsi_Host *shost;
3235 struct lpfc_hba *phba;
3236
3237 if (!vport)
3238 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba;
3243 if (!phba)
3244 return NULL;
3245 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */
3248 if (vport->port_state <= LPFC_FDISC)
3249 return NULL;
3250 shost = lpfc_shost_from_vport(vport);
3251 if (!shost)
3252 return NULL;
3253 lpfc_linkdown_port(vport);
3254 lpfc_cleanup_pending_mbox(vport);
3255 spin_lock_irq(shost->host_lock);
3256 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 spin_unlock_irq(shost->host_lock);
3258
3259 return ndlp;
3260}
3261
3262/**
3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264 * @vport: pointer to lpfc hba data structure.
3265 *
3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267 * response to a FCF dead event.
3268 **/
3269static void
3270lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3271{
3272 struct lpfc_vport **vports;
3273 int i;
3274
3275 vports = lpfc_create_vport_work_array(phba);
3276 if (vports)
3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 lpfc_sli4_perform_vport_cvl(vports[i]);
3279 lpfc_destroy_vport_work_array(phba, vports);
3280}
3281
3282/**
3186 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3187 * @phba: pointer to lpfc hba data structure. 3284 * @phba: pointer to lpfc hba data structure.
3188 * @acqe_link: pointer to the async fcoe completion queue entry. 3285 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3198,7 +3295,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3198 struct lpfc_vport *vport; 3295 struct lpfc_vport *vport;
3199 struct lpfc_nodelist *ndlp; 3296 struct lpfc_nodelist *ndlp;
3200 struct Scsi_Host *shost; 3297 struct Scsi_Host *shost;
3201 uint32_t link_state;
3202 int active_vlink_present; 3298 int active_vlink_present;
3203 struct lpfc_vport **vports; 3299 struct lpfc_vport **vports;
3204 int i; 3300 int i;
@@ -3208,10 +3304,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3208 switch (event_type) { 3304 switch (event_type) {
3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3211 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3212 "2546 New FCF found index 0x%x tag 0x%x\n", 3308 "2546 New FCF found/FCF parameter modified event: "
3213 acqe_fcoe->index, 3309 "evt_tag:x%x, fcf_index:x%x\n",
3214 acqe_fcoe->event_tag); 3310 acqe_fcoe->event_tag, acqe_fcoe->index);
3311
3215 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3216 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3217 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3222,6 +3319,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3222 spin_unlock_irq(&phba->hbalock); 3319 spin_unlock_irq(&phba->hbalock);
3223 break; 3320 break;
3224 } 3321 }
3322
3225 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3323 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3226 /* 3324 /*
3227 * If fast FCF failover rescan event is pending, 3325 * If fast FCF failover rescan event is pending,
@@ -3232,12 +3330,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3232 } 3330 }
3233 spin_unlock_irq(&phba->hbalock); 3331 spin_unlock_irq(&phba->hbalock);
3234 3332
3235 /* Read the FCF table and re-discover SAN. */ 3333 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3236 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3334 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3335 /*
3336 * During period of FCF discovery, read the FCF
3337 * table record indexed by the event to update
3338 * FCF round robin failover eligible FCF bmask.
3339 */
3340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3341 LOG_DISCOVERY,
3342 "2779 Read new FCF record with "
3343 "fcf_index:x%x for updating FCF "
3344 "round robin failover bmask\n",
3345 acqe_fcoe->index);
3346 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3347 }
3348
3349 /* Otherwise, scan the entire FCF table and re-discover SAN */
3350 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3351 "2770 Start FCF table scan due to new FCF "
3352 "event: evt_tag:x%x, fcf_index:x%x\n",
3353 acqe_fcoe->event_tag, acqe_fcoe->index);
3354 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3355 LPFC_FCOE_FCF_GET_FIRST);
3237 if (rc) 3356 if (rc)
3238 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3357 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3239 "2547 Read FCF record failed 0x%x\n", 3358 "2547 Issue FCF scan read FCF mailbox "
3240 rc); 3359 "command failed 0x%x\n", rc);
3241 break; 3360 break;
3242 3361
3243 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3362 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3248,47 +3367,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3248 break; 3367 break;
3249 3368
3250 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3369 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3251 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3370 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3252 "2549 FCF disconnected from network index 0x%x" 3371 "2549 FCF disconnected from network index 0x%x"
3253 " tag 0x%x\n", acqe_fcoe->index, 3372 " tag 0x%x\n", acqe_fcoe->index,
3254 acqe_fcoe->event_tag); 3373 acqe_fcoe->event_tag);
3255 /* If the event is not for currently used fcf do nothing */ 3374 /* If the event is not for currently used fcf do nothing */
3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3375 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3257 break; 3376 break;
3258 /* 3377 /* We request port to rediscover the entire FCF table for
3259 * Currently, driver support only one FCF - so treat this as 3378 * a fast recovery from case that the current FCF record
3260 * a link down, but save the link state because we don't want 3379 * is no longer valid if we are not in the middle of FCF
3261 * it to be changed to Link Down unless it is already down. 3380 * failover process already.
3262 */ 3381 */
3263 link_state = phba->link_state; 3382 spin_lock_irq(&phba->hbalock);
3264 lpfc_linkdown(phba); 3383 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3265 phba->link_state = link_state; 3384 spin_unlock_irq(&phba->hbalock);
3266 /* Unregister FCF if no devices connected to it */ 3385 /* Update FLOGI FCF failover eligible FCF bmask */
3267 lpfc_unregister_unused_fcf(phba); 3386 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3387 break;
3388 }
3389 /* Mark the fast failover process in progress */
3390 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3391 spin_unlock_irq(&phba->hbalock);
3392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3393 "2771 Start FCF fast failover process due to "
3394 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3395 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3396 rc = lpfc_sli4_redisc_fcf_table(phba);
3397 if (rc) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3399 LOG_DISCOVERY,
3400 "2772 Issue FCF rediscover mabilbox "
3401 "command failed, fail through to FCF "
3402 "dead event\n");
3403 spin_lock_irq(&phba->hbalock);
3404 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3405 spin_unlock_irq(&phba->hbalock);
3406 /*
3407 * Last resort will fail over by treating this
3408 * as a link down to FCF registration.
3409 */
3410 lpfc_sli4_fcf_dead_failthrough(phba);
3411 } else
3412 /* Handling fast FCF failover to a DEAD FCF event
3413 * is considered equalivant to receiving CVL to all
3414 * vports.
3415 */
3416 lpfc_sli4_perform_all_vport_cvl(phba);
3268 break; 3417 break;
3269 case LPFC_FCOE_EVENT_TYPE_CVL: 3418 case LPFC_FCOE_EVENT_TYPE_CVL:
3270 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3271 "2718 Clear Virtual Link Received for VPI 0x%x" 3420 "2718 Clear Virtual Link Received for VPI 0x%x"
3272 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3421 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3273 vport = lpfc_find_vport_by_vpid(phba, 3422 vport = lpfc_find_vport_by_vpid(phba,
3274 acqe_fcoe->index - phba->vpi_base); 3423 acqe_fcoe->index - phba->vpi_base);
3275 if (!vport) 3424 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3276 break;
3277 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3278 if (!ndlp) 3425 if (!ndlp)
3279 break; 3426 break;
3280 shost = lpfc_shost_from_vport(vport);
3281 if (phba->pport->port_state <= LPFC_FLOGI)
3282 break;
3283 /* If virtual link is not yet instantiated ignore CVL */
3284 if (vport->port_state <= LPFC_FDISC)
3285 break;
3286
3287 lpfc_linkdown_port(vport);
3288 lpfc_cleanup_pending_mbox(vport);
3289 spin_lock_irq(shost->host_lock);
3290 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3291 spin_unlock_irq(shost->host_lock);
3292 active_vlink_present = 0; 3427 active_vlink_present = 0;
3293 3428
3294 vports = lpfc_create_vport_work_array(phba); 3429 vports = lpfc_create_vport_work_array(phba);
@@ -3311,6 +3446,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3311 * re-instantiate the Vlink using FDISC. 3446 * re-instantiate the Vlink using FDISC.
3312 */ 3447 */
3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3448 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3449 shost = lpfc_shost_from_vport(vport);
3314 spin_lock_irq(shost->host_lock); 3450 spin_lock_irq(shost->host_lock);
3315 ndlp->nlp_flag |= NLP_DELAY_TMO; 3451 ndlp->nlp_flag |= NLP_DELAY_TMO;
3316 spin_unlock_irq(shost->host_lock); 3452 spin_unlock_irq(shost->host_lock);
@@ -3321,15 +3457,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3321 * Otherwise, we request port to rediscover 3457 * Otherwise, we request port to rediscover
3322 * the entire FCF table for a fast recovery 3458 * the entire FCF table for a fast recovery
3323 * from possible case that the current FCF 3459 * from possible case that the current FCF
3324 * is no longer valid. 3460 * is no longer valid if we are not already
3461 * in the FCF failover process.
3325 */ 3462 */
3463 spin_lock_irq(&phba->hbalock);
3464 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3465 spin_unlock_irq(&phba->hbalock);
3466 break;
3467 }
3468 /* Mark the fast failover process in progress */
3469 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3470 spin_unlock_irq(&phba->hbalock);
3471 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3472 LOG_DISCOVERY,
3473 "2773 Start FCF fast failover due "
3474 "to CVL event: evt_tag:x%x\n",
3475 acqe_fcoe->event_tag);
3326 rc = lpfc_sli4_redisc_fcf_table(phba); 3476 rc = lpfc_sli4_redisc_fcf_table(phba);
3327 if (rc) 3477 if (rc) {
3478 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3479 LOG_DISCOVERY,
3480 "2774 Issue FCF rediscover "
3481 "mabilbox command failed, "
3482 "through to CVL event\n");
3483 spin_lock_irq(&phba->hbalock);
3484 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3485 spin_unlock_irq(&phba->hbalock);
3328 /* 3486 /*
3329 * Last resort will be re-try on the 3487 * Last resort will be re-try on the
3330 * the current registered FCF entry. 3488 * the current registered FCF entry.
3331 */ 3489 */
3332 lpfc_retry_pport_discovery(phba); 3490 lpfc_retry_pport_discovery(phba);
3491 }
3333 } 3492 }
3334 break; 3493 break;
3335 default: 3494 default:
@@ -3426,11 +3585,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3426 spin_unlock_irq(&phba->hbalock); 3585 spin_unlock_irq(&phba->hbalock);
3427 3586
3428 /* Scan FCF table from the first entry to re-discover SAN */ 3587 /* Scan FCF table from the first entry to re-discover SAN */
3429 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3589 "2777 Start FCF table scan after FCF "
3590 "rediscovery quiescent period over\n");
3591 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3430 if (rc) 3592 if (rc)
3431 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3432 "2747 Post FCF rediscovery read FCF record " 3594 "2747 Issue FCF scan read FCF mailbox "
3433 "failed 0x%x\n", rc); 3595 "command failed 0x%x\n", rc);
3434} 3596}
3435 3597
3436/** 3598/**
@@ -3722,6 +3884,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3884 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3885 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3724 struct lpfc_mqe *mqe; 3886 struct lpfc_mqe *mqe;
3887 int longs;
3725 3888
3726 /* Before proceed, wait for POST done and device ready */ 3889 /* Before proceed, wait for POST done and device ready */
3727 rc = lpfc_sli4_post_status_check(phba); 3890 rc = lpfc_sli4_post_status_check(phba);
@@ -3898,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3898 goto out_free_active_sgl; 4061 goto out_free_active_sgl;
3899 } 4062 }
3900 4063
4064 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4065 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4066 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4067 GFP_KERNEL);
4068 if (!phba->fcf.fcf_rr_bmask) {
4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070 "2759 Failed allocate memory for FCF round "
4071 "robin failover bmask\n");
4072 goto out_remove_rpi_hdrs;
4073 }
4074
3901 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4075 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3902 phba->cfg_fcp_eq_count), GFP_KERNEL); 4076 phba->cfg_fcp_eq_count), GFP_KERNEL);
3903 if (!phba->sli4_hba.fcp_eq_hdl) { 4077 if (!phba->sli4_hba.fcp_eq_hdl) {
3904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3905 "2572 Failed allocate memory for fast-path " 4079 "2572 Failed allocate memory for fast-path "
3906 "per-EQ handle array\n"); 4080 "per-EQ handle array\n");
3907 goto out_remove_rpi_hdrs; 4081 goto out_free_fcf_rr_bmask;
3908 } 4082 }
3909 4083
3910 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4084 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3957,6 +4131,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3957 4131
3958out_free_fcp_eq_hdl: 4132out_free_fcp_eq_hdl:
3959 kfree(phba->sli4_hba.fcp_eq_hdl); 4133 kfree(phba->sli4_hba.fcp_eq_hdl);
4134out_free_fcf_rr_bmask:
4135 kfree(phba->fcf.fcf_rr_bmask);
3960out_remove_rpi_hdrs: 4136out_remove_rpi_hdrs:
3961 lpfc_sli4_remove_rpi_hdrs(phba); 4137 lpfc_sli4_remove_rpi_hdrs(phba);
3962out_free_active_sgl: 4138out_free_active_sgl:
@@ -4002,6 +4178,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4002 lpfc_sli4_remove_rpi_hdrs(phba); 4178 lpfc_sli4_remove_rpi_hdrs(phba);
4003 lpfc_sli4_remove_rpis(phba); 4179 lpfc_sli4_remove_rpis(phba);
4004 4180
4181 /* Free eligible FCF index bmask */
4182 kfree(phba->fcf.fcf_rr_bmask);
4183
4005 /* Free the ELS sgl list */ 4184 /* Free the ELS sgl list */
4006 lpfc_free_active_sgl(phba); 4185 lpfc_free_active_sgl(phba);
4007 lpfc_free_sgl_list(phba); 4186 lpfc_free_sgl_list(phba);
@@ -4397,6 +4576,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4397 4576
4398 /* The list order is used by later block SGL registraton */ 4577 /* The list order is used by later block SGL registraton */
4399 spin_lock_irq(&phba->hbalock); 4578 spin_lock_irq(&phba->hbalock);
4579 sglq_entry->state = SGL_FREED;
4400 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4580 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4401 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4581 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4402 phba->sli4_hba.total_sglq_bufs++; 4582 phba->sli4_hba.total_sglq_bufs++;