aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rwxr-xr-x[-rw-r--r--]drivers/scsi/lpfc/lpfc_hbadisc.c129
1 files changed, 91 insertions, 38 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e6a47e25b218..3b9424427652 100644..100755
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba)
525 spin_unlock_irq(&phba->hbalock); 525 spin_unlock_irq(&phba->hbalock);
526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
527 } 527 }
528 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
529 lpfc_sli4_handle_received_buffer(phba);
530 } 528 }
531 529
532 vports = lpfc_create_vport_work_array(phba); 530 vports = lpfc_create_vport_work_array(phba);
@@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba)
568 pring = &phba->sli.ring[LPFC_ELS_RING]; 566 pring = &phba->sli.ring[LPFC_ELS_RING];
569 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 567 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
570 status >>= (4*LPFC_ELS_RING); 568 status >>= (4*LPFC_ELS_RING);
571 if ((status & HA_RXMASK) 569 if ((status & HA_RXMASK) ||
572 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 570 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
571 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
573 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 572 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
574 pring->flag |= LPFC_DEFERRED_RING_EVENT; 573 pring->flag |= LPFC_DEFERRED_RING_EVENT;
575 /* Set the lpfc data pending flag */ 574 /* Set the lpfc data pending flag */
@@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
688 lpfc_unreg_rpi(vport, ndlp); 687 lpfc_unreg_rpi(vport, ndlp);
689 688
690 /* Leave Fabric nodes alone on link down */ 689 /* Leave Fabric nodes alone on link down */
691 if (!remove && ndlp->nlp_type & NLP_FABRIC) 690 if ((phba->sli_rev < LPFC_SLI_REV4) &&
691 (!remove && ndlp->nlp_type & NLP_FABRIC))
692 continue; 692 continue;
693 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 693 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
694 remove 694 remove
@@ -706,6 +706,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
706void 706void
707lpfc_port_link_failure(struct lpfc_vport *vport) 707lpfc_port_link_failure(struct lpfc_vport *vport)
708{ 708{
709 /* Cleanup any outstanding received buffers */
710 lpfc_cleanup_rcv_buffers(vport);
711
709 /* Cleanup any outstanding RSCN activity */ 712 /* Cleanup any outstanding RSCN activity */
710 lpfc_els_flush_rscn(vport); 713 lpfc_els_flush_rscn(vport);
711 714
@@ -1015,13 +1018,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1015 mempool_free(mboxq, phba->mbox_mem_pool); 1018 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return; 1019 return;
1017 } 1020 }
1018 if (vport->port_state != LPFC_FLOGI) { 1021 spin_lock_irqsave(&phba->hbalock, flags);
1019 spin_lock_irqsave(&phba->hbalock, flags); 1022 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1023 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1024 spin_unlock_irqrestore(&phba->hbalock, flags);
1022 spin_unlock_irqrestore(&phba->hbalock, flags); 1025 if (vport->port_state != LPFC_FLOGI)
1023 lpfc_initial_flogi(vport); 1026 lpfc_initial_flogi(vport);
1024 }
1025 1027
1026 mempool_free(mboxq, phba->mbox_mem_pool); 1028 mempool_free(mboxq, phba->mbox_mem_pool);
1027 return; 1029 return;
@@ -1199,6 +1201,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1199 1201
1200 /* If the FCF is not availabe do nothing. */ 1202 /* If the FCF is not availabe do nothing. */
1201 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1203 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1204 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1202 spin_unlock_irqrestore(&phba->hbalock, flags); 1205 spin_unlock_irqrestore(&phba->hbalock, flags);
1203 return; 1206 return;
1204 } 1207 }
@@ -1216,15 +1219,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1216 1219
1217 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1220 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1218 GFP_KERNEL); 1221 GFP_KERNEL);
1219 if (!fcf_mbxq) 1222 if (!fcf_mbxq) {
1223 spin_lock_irqsave(&phba->hbalock, flags);
1224 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1225 spin_unlock_irqrestore(&phba->hbalock, flags);
1220 return; 1226 return;
1227 }
1221 1228
1222 lpfc_reg_fcfi(phba, fcf_mbxq); 1229 lpfc_reg_fcfi(phba, fcf_mbxq);
1223 fcf_mbxq->vport = phba->pport; 1230 fcf_mbxq->vport = phba->pport;
1224 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1231 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1225 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1232 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1226 if (rc == MBX_NOT_FINISHED) 1233 if (rc == MBX_NOT_FINISHED) {
1234 spin_lock_irqsave(&phba->hbalock, flags);
1235 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1236 spin_unlock_irqrestore(&phba->hbalock, flags);
1227 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1237 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1238 }
1228 1239
1229 return; 1240 return;
1230} 1241}
@@ -1253,13 +1264,27 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1253 uint16_t *vlan_id) 1264 uint16_t *vlan_id)
1254{ 1265{
1255 struct lpfc_fcf_conn_entry *conn_entry; 1266 struct lpfc_fcf_conn_entry *conn_entry;
1267 int i, j, fcf_vlan_id = 0;
1268
1269 /* Find the lowest VLAN id in the FCF record */
1270 for (i = 0; i < 512; i++) {
1271 if (new_fcf_record->vlan_bitmap[i]) {
1272 fcf_vlan_id = i * 8;
1273 j = 0;
1274 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1275 j++;
1276 fcf_vlan_id++;
1277 }
1278 break;
1279 }
1280 }
1256 1281
1257 /* If FCF not available return 0 */ 1282 /* If FCF not available return 0 */
1258 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1283 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1259 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1284 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1260 return 0; 1285 return 0;
1261 1286
1262 if (!phba->cfg_enable_fip) { 1287 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1263 *boot_flag = 0; 1288 *boot_flag = 0;
1264 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1289 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1265 new_fcf_record); 1290 new_fcf_record);
@@ -1286,7 +1311,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1286 if (*addr_mode & LPFC_FCF_FPMA) 1311 if (*addr_mode & LPFC_FCF_FPMA)
1287 *addr_mode = LPFC_FCF_FPMA; 1312 *addr_mode = LPFC_FCF_FPMA;
1288 1313
1289 *vlan_id = 0xFFFF; 1314 /* If FCF record report a vlan id use that vlan id */
1315 if (fcf_vlan_id)
1316 *vlan_id = fcf_vlan_id;
1317 else
1318 *vlan_id = 0xFFFF;
1290 return 1; 1319 return 1;
1291 } 1320 }
1292 1321
@@ -1384,8 +1413,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1384 (*addr_mode & LPFC_FCF_FPMA)) 1413 (*addr_mode & LPFC_FCF_FPMA))
1385 *addr_mode = LPFC_FCF_FPMA; 1414 *addr_mode = LPFC_FCF_FPMA;
1386 1415
1416 /* If matching connect list has a vlan id, use it */
1387 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1417 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1388 *vlan_id = conn_entry->conn_rec.vlan_tag; 1418 *vlan_id = conn_entry->conn_rec.vlan_tag;
1419 /*
1420 * If no vlan id is specified in connect list, use the vlan id
1421 * in the FCF record
1422 */
1423 else if (fcf_vlan_id)
1424 *vlan_id = fcf_vlan_id;
1389 else 1425 else
1390 *vlan_id = 0xFFFF; 1426 *vlan_id = 0xFFFF;
1391 1427
@@ -1423,6 +1459,15 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1423 1459
1424 if (phba->link_state >= LPFC_LINK_UP) 1460 if (phba->link_state >= LPFC_LINK_UP)
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1461 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
1462 else {
1463 /*
1464 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1465 * flag
1466 */
1467 spin_lock_irq(&phba->hbalock);
1468 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1469 spin_unlock_irq(&phba->hbalock);
1470 }
1426 1471
1427 if (unreg_fcf) { 1472 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock); 1473 spin_lock_irq(&phba->hbalock);
@@ -1659,9 +1704,8 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1659 lpfc_initial_fdisc(vport); 1704 lpfc_initial_fdisc(vport);
1660 else { 1705 else {
1661 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 1706 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662 lpfc_printf_vlog(vport, KERN_ERR, 1707 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1663 LOG_ELS, 1708 "2606 No NPIV Fabric support\n");
1664 "2606 No NPIV Fabric support\n");
1665 } 1709 }
1666 return; 1710 return;
1667} 1711}
@@ -1756,8 +1800,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1756 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1800 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1757 goto fail_free_mem; 1801 goto fail_free_mem;
1758 } 1802 }
1759 /* Mark the vport has registered with its VFI */ 1803 /* The VPI is implicitly registered when the VFI is registered */
1760 vport->vfi_state |= LPFC_VFI_REGISTERED; 1804 vport->vpi_state |= LPFC_VPI_REGISTERED;
1761 1805
1762 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1806 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1763 lpfc_start_fdiscs(phba); 1807 lpfc_start_fdiscs(phba);
@@ -1861,7 +1905,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1861 if (phba->fc_topology == TOPOLOGY_LOOP) { 1905 if (phba->fc_topology == TOPOLOGY_LOOP) {
1862 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 1906 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
1863 1907
1864 if (phba->cfg_enable_npiv) 1908 /* if npiv is enabled and this adapter supports npiv log
1909 * a message that npiv is not supported in this topology
1910 */
1911 if (phba->cfg_enable_npiv && phba->max_vpi)
1865 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1912 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1866 "1309 Link Up Event npiv not supported in loop " 1913 "1309 Link Up Event npiv not supported in loop "
1867 "topology\n"); 1914 "topology\n");
@@ -1955,7 +2002,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1955 * is phase 1 implementation that support FCF index 0 and driver 2002 * is phase 1 implementation that support FCF index 0 and driver
1956 * defaults. 2003 * defaults.
1957 */ 2004 */
1958 if (phba->cfg_enable_fip == 0) { 2005 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1959 fcf_record = kzalloc(sizeof(struct fcf_record), 2006 fcf_record = kzalloc(sizeof(struct fcf_record),
1960 GFP_KERNEL); 2007 GFP_KERNEL);
1961 if (unlikely(!fcf_record)) { 2008 if (unlikely(!fcf_record)) {
@@ -2085,6 +2132,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2085 else 2132 else
2086 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2133 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2087 2134
2135 phba->link_events++;
2088 if (la->attType == AT_LINK_UP && (!la->mm)) { 2136 if (la->attType == AT_LINK_UP && (!la->mm)) {
2089 phba->fc_stat.LinkUp++; 2137 phba->fc_stat.LinkUp++;
2090 if (phba->link_flag & LS_LOOPBACK_MODE) { 2138 if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -2211,13 +2259,14 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2211 mb->mbxStatus); 2259 mb->mbxStatus);
2212 break; 2260 break;
2213 } 2261 }
2262 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2214 vport->unreg_vpi_cmpl = VPORT_OK; 2263 vport->unreg_vpi_cmpl = VPORT_OK;
2215 mempool_free(pmb, phba->mbox_mem_pool); 2264 mempool_free(pmb, phba->mbox_mem_pool);
2216 /* 2265 /*
2217 * This shost reference might have been taken at the beginning of 2266 * This shost reference might have been taken at the beginning of
2218 * lpfc_vport_delete() 2267 * lpfc_vport_delete()
2219 */ 2268 */
2220 if (vport->load_flag & FC_UNLOADING) 2269 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
2221 scsi_host_put(shost); 2270 scsi_host_put(shost);
2222} 2271}
2223 2272
@@ -2268,6 +2317,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2268 goto out; 2317 goto out;
2269 } 2318 }
2270 2319
2320 vport->vpi_state |= LPFC_VPI_REGISTERED;
2271 vport->num_disc_nodes = 0; 2321 vport->num_disc_nodes = 0;
2272 /* go thru NPR list and issue ELS PLOGIs */ 2322 /* go thru NPR list and issue ELS PLOGIs */
2273 if (vport->fc_npr_cnt) 2323 if (vport->fc_npr_cnt)
@@ -3077,7 +3127,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3077 struct lpfc_sli *psli; 3127 struct lpfc_sli *psli;
3078 struct lpfc_sli_ring *pring; 3128 struct lpfc_sli_ring *pring;
3079 struct lpfc_iocbq *iocb, *next_iocb; 3129 struct lpfc_iocbq *iocb, *next_iocb;
3080 uint32_t rpi, i; 3130 uint32_t i;
3081 3131
3082 lpfc_fabric_abort_nport(ndlp); 3132 lpfc_fabric_abort_nport(ndlp);
3083 3133
@@ -3086,7 +3136,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3086 * by firmware with a no rpi error. 3136 * by firmware with a no rpi error.
3087 */ 3137 */
3088 psli = &phba->sli; 3138 psli = &phba->sli;
3089 rpi = ndlp->nlp_rpi;
3090 if (ndlp->nlp_flag & NLP_RPI_VALID) { 3139 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3091 /* Now process each ring */ 3140 /* Now process each ring */
3092 for (i = 0; i < psli->num_rings; i++) { 3141 for (i = 0; i < psli->num_rings; i++) {
@@ -4322,6 +4371,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
4322 ret = 1; 4371 ret = 1;
4323 spin_unlock_irq(shost->host_lock); 4372 spin_unlock_irq(shost->host_lock);
4324 goto out; 4373 goto out;
4374 } else {
4375 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4376 "2624 RPI %x DID %x flg %x still "
4377 "logged in\n",
4378 ndlp->nlp_rpi, ndlp->nlp_DID,
4379 ndlp->nlp_flag);
4380 if (ndlp->nlp_flag & NLP_RPI_VALID)
4381 ret = 1;
4325 } 4382 }
4326 } 4383 }
4327 spin_unlock_irq(shost->host_lock); 4384 spin_unlock_irq(shost->host_lock);
@@ -4400,7 +4457,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4400 */ 4457 */
4401 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 4458 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4402 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 4459 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4403 (phba->cfg_enable_fip == 0)) { 4460 (!(phba->hba_flag & HBA_FIP_SUPPORT))) {
4404 spin_unlock_irq(&phba->hbalock); 4461 spin_unlock_irq(&phba->hbalock);
4405 return; 4462 return;
4406 } 4463 }
@@ -4409,6 +4466,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4409 if (lpfc_fcf_inuse(phba)) 4466 if (lpfc_fcf_inuse(phba))
4410 return; 4467 return;
4411 4468
4469 /* At this point, all discovery is aborted */
4470 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4412 4471
4413 /* Unregister VPIs */ 4472 /* Unregister VPIs */
4414 vports = lpfc_create_vport_work_array(phba); 4473 vports = lpfc_create_vport_work_array(phba);
@@ -4416,8 +4475,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4416 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 4475 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4476 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4418 lpfc_mbx_unreg_vpi(vports[i]); 4477 lpfc_mbx_unreg_vpi(vports[i]);
4419 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4478 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4420 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 4479 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4421 } 4480 }
4422 lpfc_destroy_vport_work_array(phba, vports); 4481 lpfc_destroy_vport_work_array(phba, vports);
4423 4482
@@ -4431,7 +4490,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4431 return; 4490 return;
4432 } 4491 }
4433 4492
4434 lpfc_unreg_vfi(mbox, phba->pport->vfi); 4493 lpfc_unreg_vfi(mbox, phba->pport);
4435 mbox->vport = phba->pport; 4494 mbox->vport = phba->pport;
4436 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; 4495 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4437 4496
@@ -4512,8 +4571,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4512 4571
4513 /* Free the current connect table */ 4572 /* Free the current connect table */
4514 list_for_each_entry_safe(conn_entry, next_conn_entry, 4573 list_for_each_entry_safe(conn_entry, next_conn_entry,
4515 &phba->fcf_conn_rec_list, list) 4574 &phba->fcf_conn_rec_list, list) {
4575 list_del_init(&conn_entry->list);
4516 kfree(conn_entry); 4576 kfree(conn_entry);
4577 }
4517 4578
4518 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 4579 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4519 record_count = conn_hdr->length * sizeof(uint32_t)/ 4580 record_count = conn_hdr->length * sizeof(uint32_t)/
@@ -4569,14 +4630,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4569 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 4630 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4570 return; 4631 return;
4571 4632
4572 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4573 FIPP_MODE_ON)
4574 phba->cfg_enable_fip = 1;
4575
4576 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4577 FIPP_MODE_OFF)
4578 phba->cfg_enable_fip = 0;
4579
4580 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 4633 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4581 phba->valid_vlan = 1; 4634 phba->valid_vlan = 1;
4582 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 4635 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &