aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c515
1 files changed, 339 insertions, 176 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 562d8cee874b..0ba35a9a5c5f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -28,6 +28,7 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h>
31 32
32#include <scsi/scsi.h> 33#include <scsi/scsi.h>
33#include <scsi/scsi_device.h> 34#include <scsi/scsi_device.h>
@@ -852,12 +853,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
852void 853void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba) 854lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{ 855{
856 struct lpfc_vport **vports;
855 LPFC_MBOXQ_t *pmboxq; 857 LPFC_MBOXQ_t *pmboxq;
856 struct lpfc_dmabuf *buf_ptr; 858 struct lpfc_dmabuf *buf_ptr;
857 int retval; 859 int retval, i;
858 struct lpfc_sli *psli = &phba->sli; 860 struct lpfc_sli *psli = &phba->sli;
859 LIST_HEAD(completions); 861 LIST_HEAD(completions);
860 862
863 vports = lpfc_create_vport_work_array(phba);
864 if (vports != NULL)
865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
866 lpfc_rcv_seq_check_edtov(vports[i]);
867 lpfc_destroy_vport_work_array(phba, vports);
868
861 if ((phba->link_state == LPFC_HBA_ERROR) || 869 if ((phba->link_state == LPFC_HBA_ERROR) ||
862 (phba->pport->load_flag & FC_UNLOADING) || 870 (phba->pport->load_flag & FC_UNLOADING) ||
863 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 871 (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -1521,10 +1529,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1521 int GE = 0; 1529 int GE = 0;
1522 int oneConnect = 0; /* default is not a oneConnect */ 1530 int oneConnect = 0; /* default is not a oneConnect */
1523 struct { 1531 struct {
1524 char * name; 1532 char *name;
1525 int max_speed; 1533 char *bus;
1526 char * bus; 1534 char *function;
1527 } m = {"<Unknown>", 0, ""}; 1535 } m = {"<Unknown>", "", ""};
1528 1536
1529 if (mdp && mdp[0] != '\0' 1537 if (mdp && mdp[0] != '\0'
1530 && descp && descp[0] != '\0') 1538 && descp && descp[0] != '\0')
@@ -1545,132 +1553,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1545 1553
1546 switch (dev_id) { 1554 switch (dev_id) {
1547 case PCI_DEVICE_ID_FIREFLY: 1555 case PCI_DEVICE_ID_FIREFLY:
1548 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1556 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1549 break; 1557 break;
1550 case PCI_DEVICE_ID_SUPERFLY: 1558 case PCI_DEVICE_ID_SUPERFLY:
1551 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1559 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1560 m = (typeof(m)){"LP7000", "PCI",
1561 "Fibre Channel Adapter"};
1553 else 1562 else
1554 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1563 m = (typeof(m)){"LP7000E", "PCI",
1564 "Fibre Channel Adapter"};
1555 break; 1565 break;
1556 case PCI_DEVICE_ID_DRAGONFLY: 1566 case PCI_DEVICE_ID_DRAGONFLY:
1557 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1567 m = (typeof(m)){"LP8000", "PCI",
1568 "Fibre Channel Adapter"};
1558 break; 1569 break;
1559 case PCI_DEVICE_ID_CENTAUR: 1570 case PCI_DEVICE_ID_CENTAUR:
1560 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1571 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1572 m = (typeof(m)){"LP9002", "PCI",
1573 "Fibre Channel Adapter"};
1562 else 1574 else
1563 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1575 m = (typeof(m)){"LP9000", "PCI",
1576 "Fibre Channel Adapter"};
1564 break; 1577 break;
1565 case PCI_DEVICE_ID_RFLY: 1578 case PCI_DEVICE_ID_RFLY:
1566 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1579 m = (typeof(m)){"LP952", "PCI",
1580 "Fibre Channel Adapter"};
1567 break; 1581 break;
1568 case PCI_DEVICE_ID_PEGASUS: 1582 case PCI_DEVICE_ID_PEGASUS:
1569 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1583 m = (typeof(m)){"LP9802", "PCI-X",
1584 "Fibre Channel Adapter"};
1570 break; 1585 break;
1571 case PCI_DEVICE_ID_THOR: 1586 case PCI_DEVICE_ID_THOR:
1572 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1587 m = (typeof(m)){"LP10000", "PCI-X",
1588 "Fibre Channel Adapter"};
1573 break; 1589 break;
1574 case PCI_DEVICE_ID_VIPER: 1590 case PCI_DEVICE_ID_VIPER:
1575 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1591 m = (typeof(m)){"LPX1000", "PCI-X",
1592 "Fibre Channel Adapter"};
1576 break; 1593 break;
1577 case PCI_DEVICE_ID_PFLY: 1594 case PCI_DEVICE_ID_PFLY:
1578 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1595 m = (typeof(m)){"LP982", "PCI-X",
1596 "Fibre Channel Adapter"};
1579 break; 1597 break;
1580 case PCI_DEVICE_ID_TFLY: 1598 case PCI_DEVICE_ID_TFLY:
1581 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1599 m = (typeof(m)){"LP1050", "PCI-X",
1600 "Fibre Channel Adapter"};
1582 break; 1601 break;
1583 case PCI_DEVICE_ID_HELIOS: 1602 case PCI_DEVICE_ID_HELIOS:
1584 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1603 m = (typeof(m)){"LP11000", "PCI-X2",
1604 "Fibre Channel Adapter"};
1585 break; 1605 break;
1586 case PCI_DEVICE_ID_HELIOS_SCSP: 1606 case PCI_DEVICE_ID_HELIOS_SCSP:
1587 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1607 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1608 "Fibre Channel Adapter"};
1588 break; 1609 break;
1589 case PCI_DEVICE_ID_HELIOS_DCSP: 1610 case PCI_DEVICE_ID_HELIOS_DCSP:
1590 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1611 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1612 "Fibre Channel Adapter"};
1591 break; 1613 break;
1592 case PCI_DEVICE_ID_NEPTUNE: 1614 case PCI_DEVICE_ID_NEPTUNE:
1593 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1615 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1594 break; 1616 break;
1595 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1617 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1618 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1597 break; 1619 break;
1598 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1620 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1621 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1600 break; 1622 break;
1601 case PCI_DEVICE_ID_BMID: 1623 case PCI_DEVICE_ID_BMID:
1602 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1624 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1603 break; 1625 break;
1604 case PCI_DEVICE_ID_BSMB: 1626 case PCI_DEVICE_ID_BSMB:
1605 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1627 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1606 break; 1628 break;
1607 case PCI_DEVICE_ID_ZEPHYR: 1629 case PCI_DEVICE_ID_ZEPHYR:
1608 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1630 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1609 break; 1631 break;
1610 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1632 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1633 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1612 break; 1634 break;
1613 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1635 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1636 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1615 GE = 1; 1637 GE = 1;
1616 break; 1638 break;
1617 case PCI_DEVICE_ID_ZMID: 1639 case PCI_DEVICE_ID_ZMID:
1618 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1640 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1619 break; 1641 break;
1620 case PCI_DEVICE_ID_ZSMB: 1642 case PCI_DEVICE_ID_ZSMB:
1621 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1643 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1622 break; 1644 break;
1623 case PCI_DEVICE_ID_LP101: 1645 case PCI_DEVICE_ID_LP101:
1624 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1646 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1625 break; 1647 break;
1626 case PCI_DEVICE_ID_LP10000S: 1648 case PCI_DEVICE_ID_LP10000S:
1627 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1649 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1628 break; 1650 break;
1629 case PCI_DEVICE_ID_LP11000S: 1651 case PCI_DEVICE_ID_LP11000S:
1630 m = (typeof(m)){"LP11000-S", max_speed, 1652 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1631 "PCI-X2"};
1632 break; 1653 break;
1633 case PCI_DEVICE_ID_LPE11000S: 1654 case PCI_DEVICE_ID_LPE11000S:
1634 m = (typeof(m)){"LPe11000-S", max_speed, 1655 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1635 "PCIe"};
1636 break; 1656 break;
1637 case PCI_DEVICE_ID_SAT: 1657 case PCI_DEVICE_ID_SAT:
1638 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1658 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1639 break; 1659 break;
1640 case PCI_DEVICE_ID_SAT_MID: 1660 case PCI_DEVICE_ID_SAT_MID:
1641 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1661 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1642 break; 1662 break;
1643 case PCI_DEVICE_ID_SAT_SMB: 1663 case PCI_DEVICE_ID_SAT_SMB:
1644 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1664 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1645 break; 1665 break;
1646 case PCI_DEVICE_ID_SAT_DCSP: 1666 case PCI_DEVICE_ID_SAT_DCSP:
1647 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1667 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1648 break; 1668 break;
1649 case PCI_DEVICE_ID_SAT_SCSP: 1669 case PCI_DEVICE_ID_SAT_SCSP:
1650 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1670 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1651 break; 1671 break;
1652 case PCI_DEVICE_ID_SAT_S: 1672 case PCI_DEVICE_ID_SAT_S:
1653 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1673 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1654 break; 1674 break;
1655 case PCI_DEVICE_ID_HORNET: 1675 case PCI_DEVICE_ID_HORNET:
1656 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1676 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1657 GE = 1; 1677 GE = 1;
1658 break; 1678 break;
1659 case PCI_DEVICE_ID_PROTEUS_VF: 1679 case PCI_DEVICE_ID_PROTEUS_VF:
1660 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1680 m = (typeof(m)){"LPev12000", "PCIe IOV",
1681 "Fibre Channel Adapter"};
1661 break; 1682 break;
1662 case PCI_DEVICE_ID_PROTEUS_PF: 1683 case PCI_DEVICE_ID_PROTEUS_PF:
1663 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1684 m = (typeof(m)){"LPev12000", "PCIe IOV",
1685 "Fibre Channel Adapter"};
1664 break; 1686 break;
1665 case PCI_DEVICE_ID_PROTEUS_S: 1687 case PCI_DEVICE_ID_PROTEUS_S:
1666 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1688 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1689 "Fibre Channel Adapter"};
1667 break; 1690 break;
1668 case PCI_DEVICE_ID_TIGERSHARK: 1691 case PCI_DEVICE_ID_TIGERSHARK:
1669 oneConnect = 1; 1692 oneConnect = 1;
1670 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1693 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1694 break;
1695 case PCI_DEVICE_ID_TOMCAT:
1696 oneConnect = 1;
1697 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1698 break;
1699 case PCI_DEVICE_ID_FALCON:
1700 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1701 "EmulexSecure Fibre"};
1671 break; 1702 break;
1672 default: 1703 default:
1673 m = (typeof(m)){ NULL }; 1704 m = (typeof(m)){"Unknown", "", ""};
1674 break; 1705 break;
1675 } 1706 }
1676 1707
@@ -1682,17 +1713,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1682 if (descp && descp[0] == '\0') { 1713 if (descp && descp[0] == '\0') {
1683 if (oneConnect) 1714 if (oneConnect)
1684 snprintf(descp, 255, 1715 snprintf(descp, 255,
1685 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1716 "Emulex OneConnect %s, %s Initiator, Port %s",
1686 m.name, 1717 m.name, m.function,
1687 phba->Port); 1718 phba->Port);
1688 else 1719 else
1689 snprintf(descp, 255, 1720 snprintf(descp, 255,
1690 "Emulex %s %d%s %s %s", 1721 "Emulex %s %d%s %s %s",
1691 m.name, m.max_speed, 1722 m.name, max_speed, (GE) ? "GE" : "Gb",
1692 (GE) ? "GE" : "Gb", 1723 m.bus, m.function);
1693 m.bus,
1694 (GE) ? "FCoE Adapter" :
1695 "Fibre Channel Adapter");
1696 } 1724 }
1697} 1725}
1698 1726
@@ -2217,7 +2245,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2217 2245
2218 if (vports[i]->load_flag & FC_UNLOADING) 2246 if (vports[i]->load_flag & FC_UNLOADING)
2219 continue; 2247 continue;
2220 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2248 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2221 shost = lpfc_shost_from_vport(vports[i]); 2249 shost = lpfc_shost_from_vport(vports[i]);
2222 list_for_each_entry_safe(ndlp, next_ndlp, 2250 list_for_each_entry_safe(ndlp, next_ndlp,
2223 &vports[i]->fc_nodes, 2251 &vports[i]->fc_nodes,
@@ -2308,6 +2336,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2308 2336
2309 spin_lock_irq(&phba->hbalock); 2337 spin_lock_irq(&phba->hbalock);
2310 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2338 /* Release all the lpfc_scsi_bufs maintained by this host. */
2339 spin_lock(&phba->scsi_buf_list_lock);
2311 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2340 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2312 list_del(&sb->list); 2341 list_del(&sb->list);
2313 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2342 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2315,6 +2344,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2315 kfree(sb); 2344 kfree(sb);
2316 phba->total_scsi_bufs--; 2345 phba->total_scsi_bufs--;
2317 } 2346 }
2347 spin_unlock(&phba->scsi_buf_list_lock);
2318 2348
2319 /* Release all the lpfc_iocbq entries maintained by this host. */ 2349 /* Release all the lpfc_iocbq entries maintained by this host. */
2320 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2350 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2322,9 +2352,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2322 kfree(io); 2352 kfree(io);
2323 phba->total_iocbq_bufs--; 2353 phba->total_iocbq_bufs--;
2324 } 2354 }
2325
2326 spin_unlock_irq(&phba->hbalock); 2355 spin_unlock_irq(&phba->hbalock);
2327
2328 return 0; 2356 return 0;
2329} 2357}
2330 2358
@@ -2408,7 +2436,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2408 vport->els_tmofunc.function = lpfc_els_timeout; 2436 vport->els_tmofunc.function = lpfc_els_timeout;
2409 vport->els_tmofunc.data = (unsigned long)vport; 2437 vport->els_tmofunc.data = (unsigned long)vport;
2410 2438
2411 error = scsi_add_host(shost, dev); 2439 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2412 if (error) 2440 if (error)
2413 goto out_put_shost; 2441 goto out_put_shost;
2414 2442
@@ -2699,6 +2727,63 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2699} 2727}
2700 2728
2701/** 2729/**
2730 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2731 * @phba: pointer to lpfc hba data structure.
2732 *
2733 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2734 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2735 * was successful and the firmware supports FCoE. Any other return indicates
2736 * a error. It is assumed that this function will be called before interrupts
2737 * are enabled.
2738 **/
2739static int
2740lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2741{
2742 int rc = 0;
2743 LPFC_MBOXQ_t *mboxq;
2744 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2745 uint32_t length;
2746 uint32_t shdr_status, shdr_add_status;
2747
2748 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2749 if (!mboxq) {
2750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2751 "2621 Failed to allocate mbox for "
2752 "query firmware config cmd\n");
2753 return -ENOMEM;
2754 }
2755 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2756 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2757 sizeof(struct lpfc_sli4_cfg_mhdr));
2758 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2759 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2760 length, LPFC_SLI4_MBX_EMBED);
2761 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2762 /* The IOCTL status is embedded in the mailbox subheader. */
2763 shdr_status = bf_get(lpfc_mbox_hdr_status,
2764 &query_fw_cfg->header.cfg_shdr.response);
2765 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2766 &query_fw_cfg->header.cfg_shdr.response);
2767 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2769 "2622 Query Firmware Config failed "
2770 "mbx status x%x, status x%x add_status x%x\n",
2771 rc, shdr_status, shdr_add_status);
2772 return -EINVAL;
2773 }
2774 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2776 "2623 FCoE Function not supported by firmware. "
2777 "Function mode = %08x\n",
2778 query_fw_cfg->function_mode);
2779 return -EINVAL;
2780 }
2781 if (rc != MBX_TIMEOUT)
2782 mempool_free(mboxq, phba->mbox_mem_pool);
2783 return 0;
2784}
2785
2786/**
2702 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2787 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2703 * @phba: pointer to lpfc hba data structure. 2788 * @phba: pointer to lpfc hba data structure.
2704 * @acqe_link: pointer to the async link completion queue entry. 2789 * @acqe_link: pointer to the async link completion queue entry.
@@ -2918,13 +3003,17 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2918{ 3003{
2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3004 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 int rc; 3005 int rc;
3006 struct lpfc_vport *vport;
3007 struct lpfc_nodelist *ndlp;
3008 struct Scsi_Host *shost;
2921 3009
3010 phba->fc_eventTag = acqe_fcoe->event_tag;
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3011 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 switch (event_type) { 3012 switch (event_type) {
2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3013 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3014 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2926 "2546 New FCF found index 0x%x tag 0x%x\n", 3015 "2546 New FCF found index 0x%x tag 0x%x\n",
2927 acqe_fcoe->fcf_index, 3016 acqe_fcoe->index,
2928 acqe_fcoe->event_tag); 3017 acqe_fcoe->event_tag);
2929 /* 3018 /*
2930 * If the current FCF is in discovered state, or 3019 * If the current FCF is in discovered state, or
@@ -2939,12 +3028,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2939 spin_unlock_irq(&phba->hbalock); 3028 spin_unlock_irq(&phba->hbalock);
2940 3029
2941 /* Read the FCF table and re-discover SAN. */ 3030 /* Read the FCF table and re-discover SAN. */
2942 rc = lpfc_sli4_read_fcf_record(phba, 3031 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
2943 LPFC_FCOE_FCF_GET_FIRST);
2944 if (rc) 3032 if (rc)
2945 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3033 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2946 "2547 Read FCF record failed 0x%x\n", 3034 "2547 Read FCF record failed 0x%x\n",
2947 rc); 3035 rc);
2948 break; 3036 break;
2949 3037
2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3038 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -2956,11 +3044,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2956 3044
2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3045 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3046 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2959 "2549 FCF disconnected fron network index 0x%x" 3047 "2549 FCF disconnected from network index 0x%x"
2960 " tag 0x%x\n", acqe_fcoe->fcf_index, 3048 " tag 0x%x\n", acqe_fcoe->index,
2961 acqe_fcoe->event_tag); 3049 acqe_fcoe->event_tag);
2962 /* If the event is not for currently used fcf do nothing */ 3050 /* If the event is not for currently used fcf do nothing */
2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 3051 if (phba->fcf.fcf_indx != acqe_fcoe->index)
2964 break; 3052 break;
2965 /* 3053 /*
2966 * Currently, driver support only one FCF - so treat this as 3054 * Currently, driver support only one FCF - so treat this as
@@ -2970,7 +3058,28 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2970 /* Unregister FCF if no devices connected to it */ 3058 /* Unregister FCF if no devices connected to it */
2971 lpfc_unregister_unused_fcf(phba); 3059 lpfc_unregister_unused_fcf(phba);
2972 break; 3060 break;
2973 3061 case LPFC_FCOE_EVENT_TYPE_CVL:
3062 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3063 "2718 Clear Virtual Link Received for VPI 0x%x"
3064 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3065 vport = lpfc_find_vport_by_vpid(phba,
3066 acqe_fcoe->index - phba->vpi_base);
3067 if (!vport)
3068 break;
3069 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3070 if (!ndlp)
3071 break;
3072 shost = lpfc_shost_from_vport(vport);
3073 lpfc_linkdown_port(vport);
3074 if (vport->port_type != LPFC_NPIV_PORT) {
3075 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3076 spin_lock_irq(shost->host_lock);
3077 ndlp->nlp_flag |= NLP_DELAY_TMO;
3078 spin_unlock_irq(shost->host_lock);
3079 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
3080 vport->port_state = LPFC_FLOGI;
3081 }
3082 break;
2974 default: 3083 default:
2975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976 "0288 Unknown FCoE event type 0x%x event tag " 3085 "0288 Unknown FCoE event type 0x%x event tag "
@@ -2990,6 +3099,7 @@ static void
2990lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3099lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 struct lpfc_acqe_dcbx *acqe_dcbx) 3100 struct lpfc_acqe_dcbx *acqe_dcbx)
2992{ 3101{
3102 phba->fc_eventTag = acqe_dcbx->event_tag;
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "0290 The SLI4 DCBX asynchronous event is not " 3104 "0290 The SLI4 DCBX asynchronous event is not "
2995 "handled yet\n"); 3105 "handled yet\n");
@@ -3432,7 +3542,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3432 /* Driver internel slow-path CQ Event pool */ 3542 /* Driver internel slow-path CQ Event pool */
3433 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3543 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3434 /* Response IOCB work queue list */ 3544 /* Response IOCB work queue list */
3435 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3545 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3436 /* Asynchronous event CQ Event work queue list */ 3546 /* Asynchronous event CQ Event work queue list */
3437 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3547 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3438 /* Fast-path XRI aborted CQ Event work queue list */ 3548 /* Fast-path XRI aborted CQ Event work queue list */
@@ -3461,6 +3571,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3461 if (unlikely(rc)) 3571 if (unlikely(rc))
3462 goto out_free_bsmbx; 3572 goto out_free_bsmbx;
3463 3573
3574 rc = lpfc_sli4_fw_cfg_check(phba);
3575 if (unlikely(rc))
3576 goto out_free_bsmbx;
3577
3464 /* Set up the hba's configuration parameters. */ 3578 /* Set up the hba's configuration parameters. */
3465 rc = lpfc_sli4_read_config(phba); 3579 rc = lpfc_sli4_read_config(phba);
3466 if (unlikely(rc)) 3580 if (unlikely(rc))
@@ -3594,8 +3708,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3594 3708
3595 /* Free the current connect table */ 3709 /* Free the current connect table */
3596 list_for_each_entry_safe(conn_entry, next_conn_entry, 3710 list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 &phba->fcf_conn_rec_list, list) 3711 &phba->fcf_conn_rec_list, list) {
3712 list_del_init(&conn_entry->list);
3598 kfree(conn_entry); 3713 kfree(conn_entry);
3714 }
3599 3715
3600 return; 3716 return;
3601} 3717}
@@ -3824,7 +3940,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
3824 rc = lpfc_sli4_remove_all_sgl_pages(phba); 3940 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3825 if (rc) { 3941 if (rc) {
3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3942 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827 "2005 Unable to deregister pages from HBA: %x", rc); 3943 "2005 Unable to deregister pages from HBA: %x\n", rc);
3828 } 3944 }
3829 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3945 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3830} 3946}
@@ -4273,7 +4389,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4273 _dump_buf_data = 4389 _dump_buf_data =
4274 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4390 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4275 if (_dump_buf_data) { 4391 if (_dump_buf_data) {
4276 printk(KERN_ERR "BLKGRD allocated %d pages for " 4392 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4393 "9043 BLKGRD: allocated %d pages for "
4277 "_dump_buf_data at 0x%p\n", 4394 "_dump_buf_data at 0x%p\n",
4278 (1 << pagecnt), _dump_buf_data); 4395 (1 << pagecnt), _dump_buf_data);
4279 _dump_buf_data_order = pagecnt; 4396 _dump_buf_data_order = pagecnt;
@@ -4284,17 +4401,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4284 --pagecnt; 4401 --pagecnt;
4285 } 4402 }
4286 if (!_dump_buf_data_order) 4403 if (!_dump_buf_data_order)
4287 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4404 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4405 "9044 BLKGRD: ERROR unable to allocate "
4288 "memory for hexdump\n"); 4406 "memory for hexdump\n");
4289 } else 4407 } else
4290 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4408 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4409 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4291 "\n", _dump_buf_data); 4410 "\n", _dump_buf_data);
4292 if (!_dump_buf_dif) { 4411 if (!_dump_buf_dif) {
4293 while (pagecnt) { 4412 while (pagecnt) {
4294 _dump_buf_dif = 4413 _dump_buf_dif =
4295 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4414 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4296 if (_dump_buf_dif) { 4415 if (_dump_buf_dif) {
4297 printk(KERN_ERR "BLKGRD allocated %d pages for " 4416 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4417 "9046 BLKGRD: allocated %d pages for "
4298 "_dump_buf_dif at 0x%p\n", 4418 "_dump_buf_dif at 0x%p\n",
4299 (1 << pagecnt), _dump_buf_dif); 4419 (1 << pagecnt), _dump_buf_dif);
4300 _dump_buf_dif_order = pagecnt; 4420 _dump_buf_dif_order = pagecnt;
@@ -4305,10 +4425,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4305 --pagecnt; 4425 --pagecnt;
4306 } 4426 }
4307 if (!_dump_buf_dif_order) 4427 if (!_dump_buf_dif_order)
4308 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4428 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4429 "9047 BLKGRD: ERROR unable to allocate "
4309 "memory for hexdump\n"); 4430 "memory for hexdump\n");
4310 } else 4431 } else
4311 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4432 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4433 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4312 _dump_buf_dif); 4434 _dump_buf_dif);
4313} 4435}
4314 4436
@@ -4512,7 +4634,6 @@ int
4512lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4634lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4513{ 4635{
4514 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4636 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4515 uint32_t onlnreg0, onlnreg1;
4516 int i, port_error = -ENODEV; 4637 int i, port_error = -ENODEV;
4517 4638
4518 if (!phba->sli4_hba.STAregaddr) 4639 if (!phba->sli4_hba.STAregaddr)
@@ -4556,21 +4677,20 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4556 bf_get(lpfc_scratchpad_slirev, &scratchpad), 4677 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4678 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4679 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4559 4680 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
4681 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
4560 /* With uncoverable error, log the error message and return error */ 4682 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 4683 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 4684 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 4685 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 4686 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 4687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 4688 "1422 HBA Unrecoverable error: "
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4689 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4568 "1422 HBA Unrecoverable error: " 4690 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 4691 uerrlo_reg.word0, uerrhi_reg.word0,
4570 "online0_reg=0x%x, online1_reg=0x%x\n", 4692 phba->sli4_hba.ue_mask_lo,
4571 uerrlo_reg.word0, uerrhi_reg.word0, 4693 phba->sli4_hba.ue_mask_hi);
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV; 4694 return -ENODEV;
4575 } 4695 }
4576 4696
@@ -4591,10 +4711,10 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4591 LPFC_UERR_STATUS_LO; 4711 LPFC_UERR_STATUS_LO;
4592 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4712 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4593 LPFC_UERR_STATUS_HI; 4713 LPFC_UERR_STATUS_HI;
4594 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 4714 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4595 LPFC_ONLINE0; 4715 LPFC_UE_MASK_LO;
4596 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 4716 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4597 LPFC_ONLINE1; 4717 LPFC_UE_MASK_HI;
4598 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 4718 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4599 LPFC_SCRATCHPAD; 4719 LPFC_SCRATCHPAD;
4600} 4720}
@@ -4825,7 +4945,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4825 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 4945 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4826 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 4946 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4827 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4947 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4828 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 4948 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
4949 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
4829 phba->max_vports = phba->max_vpi; 4950 phba->max_vports = phba->max_vpi;
4830 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4951 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4831 "2003 cfg params XRI(B:%d M:%d), " 4952 "2003 cfg params XRI(B:%d M:%d), "
@@ -4979,10 +5100,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
4979 /* It does not make sense to have more EQs than WQs */ 5100 /* It does not make sense to have more EQs than WQs */
4980 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5101 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5102 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 "2593 The number of FCP EQs (%d) is more " 5103 "2593 The FCP EQ count(%d) cannot be greater "
4983 "than the number of FCP WQs (%d), take " 5104 "than the FCP WQ count(%d), limiting the "
4984 "the number of FCP EQs same as than of " 5105 "FCP EQ count to %d\n", cfg_fcp_eq_count,
4985 "WQs (%d)\n", cfg_fcp_eq_count,
4986 phba->cfg_fcp_wq_count, 5106 phba->cfg_fcp_wq_count,
4987 phba->cfg_fcp_wq_count); 5107 phba->cfg_fcp_wq_count);
4988 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5108 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5058,15 +5178,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5058 } 5178 }
5059 phba->sli4_hba.els_cq = qdesc; 5179 phba->sli4_hba.els_cq = qdesc;
5060 5180
5061 /* Create slow-path Unsolicited Receive Complete Queue */
5062 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 phba->sli4_hba.cq_ecount);
5064 if (!qdesc) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 "0502 Failed allocate slow-path USOL RX CQ\n");
5067 goto out_free_els_cq;
5068 }
5069 phba->sli4_hba.rxq_cq = qdesc;
5070 5181
5071 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5182 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5183 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5186,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2577 Failed allocate memory for fast-path " 5187 "2577 Failed allocate memory for fast-path "
5077 "CQ record array\n"); 5188 "CQ record array\n");
5078 goto out_free_rxq_cq; 5189 goto out_free_els_cq;
5079 } 5190 }
5080 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5191 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5192 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5299,6 @@ out_free_fcp_cq:
5188 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5299 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 } 5300 }
5190 kfree(phba->sli4_hba.fcp_cq); 5301 kfree(phba->sli4_hba.fcp_cq);
5191out_free_rxq_cq:
5192 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 phba->sli4_hba.rxq_cq = NULL;
5194out_free_els_cq: 5302out_free_els_cq:
5195 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5303 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 phba->sli4_hba.els_cq = NULL; 5304 phba->sli4_hba.els_cq = NULL;
@@ -5247,10 +5355,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5247 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5355 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 phba->sli4_hba.dat_rq = NULL; 5356 phba->sli4_hba.dat_rq = NULL;
5249 5357
5250 /* Release unsolicited receive complete queue */
5251 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 phba->sli4_hba.rxq_cq = NULL;
5253
5254 /* Release ELS complete queue */ 5358 /* Release ELS complete queue */
5255 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5359 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 phba->sli4_hba.els_cq = NULL; 5360 phba->sli4_hba.els_cq = NULL;
@@ -5383,25 +5487,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5383 phba->sli4_hba.els_cq->queue_id, 5487 phba->sli4_hba.els_cq->queue_id,
5384 phba->sli4_hba.sp_eq->queue_id); 5488 phba->sli4_hba.sp_eq->queue_id);
5385 5489
5386 /* Set up slow-path Unsolicited Receive Complete Queue */
5387 if (!phba->sli4_hba.rxq_cq) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0532 USOL RX CQ not allocated\n");
5390 goto out_destroy_els_cq;
5391 }
5392 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 LPFC_RCQ, LPFC_USOL);
5394 if (rc) {
5395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 "0533 Failed setup of slow-path USOL RX CQ: "
5397 "rc = 0x%x\n", rc);
5398 goto out_destroy_els_cq;
5399 }
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 phba->sli4_hba.rxq_cq->queue_id,
5403 phba->sli4_hba.sp_eq->queue_id);
5404
5405 /* Set up fast-path FCP Response Complete Queue */ 5490 /* Set up fast-path FCP Response Complete Queue */
5406 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5491 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5492 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +5592,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5507 goto out_destroy_fcp_wq; 5592 goto out_destroy_fcp_wq;
5508 } 5593 }
5509 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5594 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 phba->sli4_hba.rxq_cq, LPFC_USOL); 5595 phba->sli4_hba.els_cq, LPFC_USOL);
5511 if (rc) { 5596 if (rc) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 "0541 Failed setup of Receive Queue: " 5598 "0541 Failed setup of Receive Queue: "
@@ -5519,7 +5604,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5519 "parent cq-id=%d\n", 5604 "parent cq-id=%d\n",
5520 phba->sli4_hba.hdr_rq->queue_id, 5605 phba->sli4_hba.hdr_rq->queue_id,
5521 phba->sli4_hba.dat_rq->queue_id, 5606 phba->sli4_hba.dat_rq->queue_id,
5522 phba->sli4_hba.rxq_cq->queue_id); 5607 phba->sli4_hba.els_cq->queue_id);
5523 return 0; 5608 return 0;
5524 5609
5525out_destroy_fcp_wq: 5610out_destroy_fcp_wq:
@@ -5531,8 +5616,6 @@ out_destroy_mbx_wq:
5531out_destroy_fcp_cq: 5616out_destroy_fcp_cq:
5532 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5617 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5618 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535out_destroy_els_cq:
5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5619 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537out_destroy_mbx_cq: 5620out_destroy_mbx_cq:
5538 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5621 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5574,8 +5657,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5574 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5657 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 /* Unset ELS complete queue */ 5658 /* Unset ELS complete queue */
5576 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5659 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 /* Unset unsolicited receive complete queue */
5578 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 /* Unset FCP response complete queue */ 5660 /* Unset FCP response complete queue */
5580 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5661 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5662 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -6722,6 +6803,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6722{ 6803{
6723 struct lpfc_hba *phba; 6804 struct lpfc_hba *phba;
6724 struct lpfc_vport *vport = NULL; 6805 struct lpfc_vport *vport = NULL;
6806 struct Scsi_Host *shost = NULL;
6725 int error; 6807 int error;
6726 uint32_t cfg_mode, intr_mode; 6808 uint32_t cfg_mode, intr_mode;
6727 6809
@@ -6800,6 +6882,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6800 goto out_destroy_shost; 6882 goto out_destroy_shost;
6801 } 6883 }
6802 6884
6885 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
6803 /* Now, trying to enable interrupt and bring up the device */ 6886 /* Now, trying to enable interrupt and bring up the device */
6804 cfg_mode = phba->cfg_use_msi; 6887 cfg_mode = phba->cfg_use_msi;
6805 while (true) { 6888 while (true) {
@@ -6866,6 +6949,8 @@ out_unset_pci_mem_s3:
6866 lpfc_sli_pci_mem_unset(phba); 6949 lpfc_sli_pci_mem_unset(phba);
6867out_disable_pci_dev: 6950out_disable_pci_dev:
6868 lpfc_disable_pci_dev(phba); 6951 lpfc_disable_pci_dev(phba);
6952 if (shost)
6953 scsi_host_put(shost);
6869out_free_phba: 6954out_free_phba:
6870 lpfc_hba_free(phba); 6955 lpfc_hba_free(phba);
6871 return error; 6956 return error;
@@ -7036,6 +7121,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7036 /* Restore device state from PCI config space */ 7121 /* Restore device state from PCI config space */
7037 pci_set_power_state(pdev, PCI_D0); 7122 pci_set_power_state(pdev, PCI_D0);
7038 pci_restore_state(pdev); 7123 pci_restore_state(pdev);
7124
7039 if (pdev->is_busmaster) 7125 if (pdev->is_busmaster)
7040 pci_set_master(pdev); 7126 pci_set_master(pdev);
7041 7127
@@ -7070,6 +7156,75 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7070} 7156}
7071 7157
7072/** 7158/**
7159 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7160 * @phba: pointer to lpfc hba data structure.
7161 *
7162 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7163 * aborts and stops all the on-going I/Os on the pci device.
7164 **/
7165static void
7166lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7167{
7168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7169 "2723 PCI channel I/O abort preparing for recovery\n");
7170 /* Prepare for bringing HBA offline */
7171 lpfc_offline_prep(phba);
7172 /* Clear sli active flag to prevent sysfs access to HBA */
7173 spin_lock_irq(&phba->hbalock);
7174 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7175 spin_unlock_irq(&phba->hbalock);
7176 /* Stop and flush all I/Os and bring HBA offline */
7177 lpfc_offline(phba);
7178}
7179
7180/**
7181 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7182 * @phba: pointer to lpfc hba data structure.
7183 *
7184 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7185 * disables the device interrupt and pci device, and aborts the internal FCP
7186 * pending I/Os.
7187 **/
7188static void
7189lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7190{
7191 struct lpfc_sli *psli = &phba->sli;
7192 struct lpfc_sli_ring *pring;
7193
7194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7195 "2710 PCI channel disable preparing for reset\n");
7196 /* Disable interrupt and pci device */
7197 lpfc_sli_disable_intr(phba);
7198 pci_disable_device(phba->pcidev);
7199 /*
7200 * There may be I/Os dropped by the firmware.
7201 * Error iocb (I/O) on txcmplq and let the SCSI layer
7202 * retry it after re-establishing link.
7203 */
7204 pring = &psli->ring[psli->fcp_ring];
7205 lpfc_sli_abort_iocb_ring(phba, pring);
7206}
7207
7208/**
7209 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7210 * @phba: pointer to lpfc hba data structure.
7211 *
7212 * This routine is called to prepare the SLI3 device for PCI slot permanently
7213 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7214 * pending I/Os.
7215 **/
7216static void
7217lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7218{
7219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7220 "2711 PCI channel permanent disable for failure\n");
7221 /* Block all SCSI devices' I/Os on the host */
7222 lpfc_scsi_dev_block(phba);
7223 /* Clean up all driver's outstanding SCSI I/Os */
7224 lpfc_sli_flush_fcp_rings(phba);
7225}
7226
7227/**
7073 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7228 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7074 * @pdev: pointer to PCI device. 7229 * @pdev: pointer to PCI device.
7075 * @state: the current PCI connection state. 7230 * @state: the current PCI connection state.
@@ -7083,6 +7238,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7083 * as desired. 7238 * as desired.
7084 * 7239 *
7085 * Return codes 7240 * Return codes
7241 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7086 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7242 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7087 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7243 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7088 **/ 7244 **/
@@ -7091,33 +7247,27 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7091{ 7247{
7092 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7248 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7093 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7249 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7094 struct lpfc_sli *psli = &phba->sli;
7095 struct lpfc_sli_ring *pring;
7096 7250
7097 if (state == pci_channel_io_perm_failure) { 7251 switch (state) {
7098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7252 case pci_channel_io_normal:
7099 "0472 PCI channel I/O permanent failure\n"); 7253 /* Non-fatal error, prepare for recovery */
7100 /* Block all SCSI devices' I/Os on the host */ 7254 lpfc_sli_prep_dev_for_recover(phba);
7101 lpfc_scsi_dev_block(phba); 7255 return PCI_ERS_RESULT_CAN_RECOVER;
7102 /* Clean up all driver's outstanding SCSI I/Os */ 7256 case pci_channel_io_frozen:
7103 lpfc_sli_flush_fcp_rings(phba); 7257 /* Fatal error, prepare for slot reset */
7258 lpfc_sli_prep_dev_for_reset(phba);
7259 return PCI_ERS_RESULT_NEED_RESET;
7260 case pci_channel_io_perm_failure:
7261 /* Permanent failure, prepare for device down */
7262 lpfc_prep_dev_for_perm_failure(phba);
7104 return PCI_ERS_RESULT_DISCONNECT; 7263 return PCI_ERS_RESULT_DISCONNECT;
7264 default:
7265 /* Unknown state, prepare and request slot reset */
7266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7267 "0472 Unknown PCI error state: x%x\n", state);
7268 lpfc_sli_prep_dev_for_reset(phba);
7269 return PCI_ERS_RESULT_NEED_RESET;
7105 } 7270 }
7106
7107 pci_disable_device(pdev);
7108 /*
7109 * There may be I/Os dropped by the firmware.
7110 * Error iocb (I/O) on txcmplq and let the SCSI layer
7111 * retry it after re-establishing link.
7112 */
7113 pring = &psli->ring[psli->fcp_ring];
7114 lpfc_sli_abort_iocb_ring(phba, pring);
7115
7116 /* Disable interrupt */
7117 lpfc_sli_disable_intr(phba);
7118
7119 /* Request a slot reset. */
7120 return PCI_ERS_RESULT_NEED_RESET;
7121} 7271}
7122 7272
7123/** 7273/**
@@ -7197,7 +7347,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7197 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7347 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7348 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7199 7349
7350 /* Bring the device online */
7200 lpfc_online(phba); 7351 lpfc_online(phba);
7352
7353 /* Clean up Advanced Error Reporting (AER) if needed */
7354 if (phba->hba_flag & HBA_AER_ENABLED)
7355 pci_cleanup_aer_uncorrect_error_status(pdev);
7201} 7356}
7202 7357
7203/** 7358/**
@@ -7213,15 +7368,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7213 7368
7214 if (phba->sli_rev == LPFC_SLI_REV4) { 7369 if (phba->sli_rev == LPFC_SLI_REV4) {
7215 if (max_xri <= 100) 7370 if (max_xri <= 100)
7216 return 4; 7371 return 10;
7217 else if (max_xri <= 256) 7372 else if (max_xri <= 256)
7218 return 8; 7373 return 25;
7219 else if (max_xri <= 512) 7374 else if (max_xri <= 512)
7220 return 16; 7375 return 50;
7221 else if (max_xri <= 1024) 7376 else if (max_xri <= 1024)
7222 return 32; 7377 return 100;
7223 else 7378 else
7224 return 48; 7379 return 150;
7225 } else 7380 } else
7226 return 0; 7381 return 0;
7227} 7382}
@@ -7249,6 +7404,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7249{ 7404{
7250 struct lpfc_hba *phba; 7405 struct lpfc_hba *phba;
7251 struct lpfc_vport *vport = NULL; 7406 struct lpfc_vport *vport = NULL;
7407 struct Scsi_Host *shost = NULL;
7252 int error; 7408 int error;
7253 uint32_t cfg_mode, intr_mode; 7409 uint32_t cfg_mode, intr_mode;
7254 int mcnt; 7410 int mcnt;
@@ -7329,6 +7485,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7329 goto out_destroy_shost; 7485 goto out_destroy_shost;
7330 } 7486 }
7331 7487
7488 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7332 /* Now, trying to enable interrupt and bring up the device */ 7489 /* Now, trying to enable interrupt and bring up the device */
7333 cfg_mode = phba->cfg_use_msi; 7490 cfg_mode = phba->cfg_use_msi;
7334 while (true) { 7491 while (true) {
@@ -7397,6 +7554,8 @@ out_unset_pci_mem_s4:
7397 lpfc_sli4_pci_mem_unset(phba); 7554 lpfc_sli4_pci_mem_unset(phba);
7398out_disable_pci_dev: 7555out_disable_pci_dev:
7399 lpfc_disable_pci_dev(phba); 7556 lpfc_disable_pci_dev(phba);
7557 if (shost)
7558 scsi_host_put(shost);
7400out_free_phba: 7559out_free_phba:
7401 lpfc_hba_free(phba); 7560 lpfc_hba_free(phba);
7402 return error; 7561 return error;
@@ -7971,6 +8130,10 @@ static struct pci_device_id lpfc_id_table[] = {
7971 PCI_ANY_ID, PCI_ANY_ID, }, 8130 PCI_ANY_ID, PCI_ANY_ID, },
7972 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8131 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7973 PCI_ANY_ID, PCI_ANY_ID, }, 8132 PCI_ANY_ID, PCI_ANY_ID, },
8133 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8134 PCI_ANY_ID, PCI_ANY_ID, },
8135 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8136 PCI_ANY_ID, PCI_ANY_ID, },
7974 { 0 } 8137 { 0 }
7975}; 8138};
7976 8139
@@ -8053,15 +8216,15 @@ lpfc_exit(void)
8053 if (lpfc_enable_npiv) 8216 if (lpfc_enable_npiv)
8054 fc_release_transport(lpfc_vport_transport_template); 8217 fc_release_transport(lpfc_vport_transport_template);
8055 if (_dump_buf_data) { 8218 if (_dump_buf_data) {
8056 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8219 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
8057 "at 0x%p\n", 8220 "_dump_buf_data at 0x%p\n",
8058 (1L << _dump_buf_data_order), _dump_buf_data); 8221 (1L << _dump_buf_data_order), _dump_buf_data);
8059 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8222 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8060 } 8223 }
8061 8224
8062 if (_dump_buf_dif) { 8225 if (_dump_buf_dif) {
8063 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8226 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
8064 "at 0x%p\n", 8227 "_dump_buf_dif at 0x%p\n",
8065 (1L << _dump_buf_dif_order), _dump_buf_dif); 8228 (1L << _dump_buf_dif_order), _dump_buf_dif);
8066 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8229 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8067 } 8230 }