aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2011-12-13 13:22:37 -0500
committerJames Bottomley <JBottomley@Parallels.com>2011-12-15 01:57:45 -0500
commit2e90f4b5a2a0ce5ab72c0c81c74269bd0a62522b (patch)
tree8e131436d13de07d6ee771243384e1d91bbb68c4 /drivers/scsi/lpfc/lpfc_init.c
parentdf9e1b59f9e4671930a7762b9518461df4ea85f5 (diff)
[SCSI] lpfc 8.3.28: Critical Miscellaneous fixes
- Make lpfc_sli4_pci_mem_unset interface type aware (CR 124390) - Convert byte count to word count when calling __iowrite32_copy (CR 122550) - Checked the ERR1 and ERR2 registers for error attention due to SLI Port state affected by forced debug dump. (CR 122986, 122426, 124859) - Use the lpfc_readl routine instead of the readl for the port status register read in lpfc_handle_eratt_s4 (CR 125403) - Call lpfc_sli4_queue_destroy inside of lpfc_sli4_brdreset before doing a pci function reset (CR 125124, 125168, 125572, 125622) - Zero out the HBQ when it is allocated (CR 125663) - Alter port reset log messages to indicate error type (CR 125989) - Added proper NULL pointer checking to all the places that accessing the queue memory (CR 125832) Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c164
1 files changed, 116 insertions, 48 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6096c9a091d1..cb714d2342d4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1417,7 +1417,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1417 uint32_t event_data; 1417 uint32_t event_data;
1418 struct Scsi_Host *shost; 1418 struct Scsi_Host *shost;
1419 uint32_t if_type; 1419 uint32_t if_type;
1420 struct lpfc_register portstat_reg; 1420 struct lpfc_register portstat_reg = {0};
1421 uint32_t reg_err1, reg_err2;
1422 uint32_t uerrlo_reg, uemasklo_reg;
1423 uint32_t pci_rd_rc1, pci_rd_rc2;
1421 int rc; 1424 int rc;
1422 1425
1423 /* If the pci channel is offline, ignore possible errors, since 1426 /* If the pci channel is offline, ignore possible errors, since
@@ -1429,27 +1432,29 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1429 if (!phba->cfg_enable_hba_reset) 1432 if (!phba->cfg_enable_hba_reset)
1430 return; 1433 return;
1431 1434
1432 /* Send an internal error event to mgmt application */
1433 lpfc_board_errevt_to_mgmt(phba);
1434
1435 /* For now, the actual action for SLI4 device handling is not
1436 * specified yet, just treated it as adaptor hardware failure
1437 */
1438 event_data = FC_REG_DUMP_EVENT;
1439 shost = lpfc_shost_from_vport(vport);
1440 fc_host_post_vendor_event(shost, fc_get_event_number(),
1441 sizeof(event_data), (char *) &event_data,
1442 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1443
1444 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1435 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1445 switch (if_type) { 1436 switch (if_type) {
1446 case LPFC_SLI_INTF_IF_TYPE_0: 1437 case LPFC_SLI_INTF_IF_TYPE_0:
1438 pci_rd_rc1 = lpfc_readl(
1439 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1440 &uerrlo_reg);
1441 pci_rd_rc2 = lpfc_readl(
1442 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1443 &uemasklo_reg);
1444 /* consider PCI bus read error as pci_channel_offline */
1445 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1446 return;
1447 lpfc_sli4_offline_eratt(phba); 1447 lpfc_sli4_offline_eratt(phba);
1448 break; 1448 break;
1449 case LPFC_SLI_INTF_IF_TYPE_2: 1449 case LPFC_SLI_INTF_IF_TYPE_2:
1450 portstat_reg.word0 = 1450 pci_rd_rc1 = lpfc_readl(
1451 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1451 phba->sli4_hba.u.if_type2.STATUSregaddr,
1452 1452 &portstat_reg.word0);
1453 /* consider PCI bus read error as pci_channel_offline */
1454 if (pci_rd_rc1 == -EIO)
1455 return;
1456 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1457 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1453 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1458 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1454 /* TODO: Register for Overtemp async events. */ 1459 /* TODO: Register for Overtemp async events. */
1455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1459,8 +1464,20 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1459 phba->over_temp_state = HBA_OVER_TEMP; 1464 phba->over_temp_state = HBA_OVER_TEMP;
1460 spin_unlock_irq(&phba->hbalock); 1465 spin_unlock_irq(&phba->hbalock);
1461 lpfc_sli4_offline_eratt(phba); 1466 lpfc_sli4_offline_eratt(phba);
1462 return; 1467 break;
1463 } 1468 }
1469 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1470 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1472 "3143 Port Down: Firmware Restarted\n");
1473 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1474 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1476 "3144 Port Down: Debug Dump\n");
1477 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1478 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1480 "3145 Port Down: Provisioning\n");
1464 /* 1481 /*
1465 * On error status condition, driver need to wait for port 1482 * On error status condition, driver need to wait for port
1466 * ready before performing reset. 1483 * ready before performing reset.
@@ -1469,14 +1486,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1469 if (!rc) { 1486 if (!rc) {
1470 /* need reset: attempt for port recovery */ 1487 /* need reset: attempt for port recovery */
1471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1472 "2887 Port Error: Attempting " 1489 "2887 Reset Needed: Attempting Port "
1473 "Port Recovery\n"); 1490 "Recovery...\n");
1474 lpfc_offline_prep(phba); 1491 lpfc_offline_prep(phba);
1475 lpfc_offline(phba); 1492 lpfc_offline(phba);
1476 lpfc_sli_brdrestart(phba); 1493 lpfc_sli_brdrestart(phba);
1477 if (lpfc_online(phba) == 0) { 1494 if (lpfc_online(phba) == 0) {
1478 lpfc_unblock_mgmt_io(phba); 1495 lpfc_unblock_mgmt_io(phba);
1479 return; 1496 /* don't report event on forced debug dump */
1497 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1498 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1499 return;
1500 else
1501 break;
1480 } 1502 }
1481 /* fall through for not able to recover */ 1503 /* fall through for not able to recover */
1482 } 1504 }
@@ -1486,6 +1508,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1486 default: 1508 default:
1487 break; 1509 break;
1488 } 1510 }
1511 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1512 "3123 Report dump event to upper layer\n");
1513 /* Send an internal error event to mgmt application */
1514 lpfc_board_errevt_to_mgmt(phba);
1515
1516 event_data = FC_REG_DUMP_EVENT;
1517 shost = lpfc_shost_from_vport(vport);
1518 fc_host_post_vendor_event(shost, fc_get_event_number(),
1519 sizeof(event_data), (char *) &event_data,
1520 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1489} 1521}
1490 1522
1491/** 1523/**
@@ -6475,6 +6507,7 @@ out_free_fcp_wq:
6475 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6507 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6476 } 6508 }
6477 kfree(phba->sli4_hba.fcp_wq); 6509 kfree(phba->sli4_hba.fcp_wq);
6510 phba->sli4_hba.fcp_wq = NULL;
6478out_free_els_wq: 6511out_free_els_wq:
6479 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6512 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6480 phba->sli4_hba.els_wq = NULL; 6513 phba->sli4_hba.els_wq = NULL;
@@ -6487,6 +6520,7 @@ out_free_fcp_cq:
6487 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6520 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6488 } 6521 }
6489 kfree(phba->sli4_hba.fcp_cq); 6522 kfree(phba->sli4_hba.fcp_cq);
6523 phba->sli4_hba.fcp_cq = NULL;
6490out_free_els_cq: 6524out_free_els_cq:
6491 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6525 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6492 phba->sli4_hba.els_cq = NULL; 6526 phba->sli4_hba.els_cq = NULL;
@@ -6499,6 +6533,7 @@ out_free_fp_eq:
6499 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6533 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6500 } 6534 }
6501 kfree(phba->sli4_hba.fp_eq); 6535 kfree(phba->sli4_hba.fp_eq);
6536 phba->sli4_hba.fp_eq = NULL;
6502out_free_sp_eq: 6537out_free_sp_eq:
6503 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6538 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6504 phba->sli4_hba.sp_eq = NULL; 6539 phba->sli4_hba.sp_eq = NULL;
@@ -6532,8 +6567,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6532 phba->sli4_hba.els_wq = NULL; 6567 phba->sli4_hba.els_wq = NULL;
6533 6568
6534 /* Release FCP work queue */ 6569 /* Release FCP work queue */
6535 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6570 if (phba->sli4_hba.fcp_wq != NULL)
6536 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6571 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6572 fcp_qidx++)
6573 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6537 kfree(phba->sli4_hba.fcp_wq); 6574 kfree(phba->sli4_hba.fcp_wq);
6538 phba->sli4_hba.fcp_wq = NULL; 6575 phba->sli4_hba.fcp_wq = NULL;
6539 6576
@@ -6553,15 +6590,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6553 6590
6554 /* Release FCP response complete queue */ 6591 /* Release FCP response complete queue */
6555 fcp_qidx = 0; 6592 fcp_qidx = 0;
6556 do 6593 if (phba->sli4_hba.fcp_cq != NULL)
6557 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6594 do
6558 while (++fcp_qidx < phba->cfg_fcp_eq_count); 6595 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6596 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6559 kfree(phba->sli4_hba.fcp_cq); 6597 kfree(phba->sli4_hba.fcp_cq);
6560 phba->sli4_hba.fcp_cq = NULL; 6598 phba->sli4_hba.fcp_cq = NULL;
6561 6599
6562 /* Release fast-path event queue */ 6600 /* Release fast-path event queue */
6563 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6601 if (phba->sli4_hba.fp_eq != NULL)
6564 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6602 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6603 fcp_qidx++)
6604 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6565 kfree(phba->sli4_hba.fp_eq); 6605 kfree(phba->sli4_hba.fp_eq);
6566 phba->sli4_hba.fp_eq = NULL; 6606 phba->sli4_hba.fp_eq = NULL;
6567 6607
@@ -6614,6 +6654,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6614 phba->sli4_hba.sp_eq->queue_id); 6654 phba->sli4_hba.sp_eq->queue_id);
6615 6655
6616 /* Set up fast-path event queue */ 6656 /* Set up fast-path event queue */
6657 if (!phba->sli4_hba.fp_eq) {
6658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6659 "3147 Fast-path EQs not allocated\n");
6660 goto out_destroy_sp_eq;
6661 }
6617 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6662 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6618 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6663 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -6678,6 +6723,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6678 phba->sli4_hba.sp_eq->queue_id); 6723 phba->sli4_hba.sp_eq->queue_id);
6679 6724
6680 /* Set up fast-path FCP Response Complete Queue */ 6725 /* Set up fast-path FCP Response Complete Queue */
6726 if (!phba->sli4_hba.fcp_cq) {
6727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6728 "3148 Fast-path FCP CQ array not "
6729 "allocated\n");
6730 goto out_destroy_els_cq;
6731 }
6681 fcp_cqidx = 0; 6732 fcp_cqidx = 0;
6682 do { 6733 do {
6683 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6734 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -6757,6 +6808,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6757 phba->sli4_hba.els_cq->queue_id); 6808 phba->sli4_hba.els_cq->queue_id);
6758 6809
6759 /* Set up fast-path FCP Work Queue */ 6810 /* Set up fast-path FCP Work Queue */
6811 if (!phba->sli4_hba.fcp_wq) {
6812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6813 "3149 Fast-path FCP WQ array not "
6814 "allocated\n");
6815 goto out_destroy_els_wq;
6816 }
6760 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6817 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6761 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6818 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -6818,18 +6875,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6818out_destroy_fcp_wq: 6875out_destroy_fcp_wq:
6819 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6876 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6820 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6877 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6878out_destroy_els_wq:
6821 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6879 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6822out_destroy_mbx_wq: 6880out_destroy_mbx_wq:
6823 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6881 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6824out_destroy_fcp_cq: 6882out_destroy_fcp_cq:
6825 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6883 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6826 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6884 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6885out_destroy_els_cq:
6827 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6886 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6828out_destroy_mbx_cq: 6887out_destroy_mbx_cq:
6829 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6888 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6830out_destroy_fp_eq: 6889out_destroy_fp_eq:
6831 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6890 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6832 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6891 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6892out_destroy_sp_eq:
6833 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6893 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6834out_error: 6894out_error:
6835 return rc; 6895 return rc;
@@ -6866,13 +6926,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6866 /* Unset ELS complete queue */ 6926 /* Unset ELS complete queue */
6867 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6927 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6868 /* Unset FCP response complete queue */ 6928 /* Unset FCP response complete queue */
6869 fcp_qidx = 0; 6929 if (phba->sli4_hba.fcp_cq) {
6870 do { 6930 fcp_qidx = 0;
6871 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6931 do {
6872 } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6932 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6933 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6934 }
6873 /* Unset fast-path event queue */ 6935 /* Unset fast-path event queue */
6874 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6936 if (phba->sli4_hba.fp_eq) {
6875 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6937 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6938 fcp_qidx++)
6939 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6940 }
6876 /* Unset slow-path event queue */ 6941 /* Unset slow-path event queue */
6877 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6942 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6878} 6943}
@@ -7411,22 +7476,25 @@ out:
7411static void 7476static void
7412lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7477lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7413{ 7478{
7414 struct pci_dev *pdev; 7479 uint32_t if_type;
7415 7480 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7416 /* Obtain PCI device reference */
7417 if (!phba->pcidev)
7418 return;
7419 else
7420 pdev = phba->pcidev;
7421
7422 /* Free coherent DMA memory allocated */
7423
7424 /* Unmap I/O memory space */
7425 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7426 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7427 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7428 7481
7429 return; 7482 switch (if_type) {
7483 case LPFC_SLI_INTF_IF_TYPE_0:
7484 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7485 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7486 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7487 break;
7488 case LPFC_SLI_INTF_IF_TYPE_2:
7489 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7490 break;
7491 case LPFC_SLI_INTF_IF_TYPE_1:
7492 default:
7493 dev_printk(KERN_ERR, &phba->pcidev->dev,
7494 "FATAL - unsupported SLI4 interface type - %d\n",
7495 if_type);
7496 break;
7497 }
7430} 7498}
7431 7499
7432/** 7500/**