aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/scsi/lpfc/lpfc_init.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1348
1 files changed, 1061 insertions, 287 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 562d8cee874b..774663e8e1fe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,6 +28,8 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h>
32#include <linux/slab.h>
31 33
32#include <scsi/scsi.h> 34#include <scsi/scsi.h>
33#include <scsi/scsi_device.h> 35#include <scsi/scsi_device.h>
@@ -349,7 +351,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
349 mb = &pmb->u.mb; 351 mb = &pmb->u.mb;
350 352
351 /* Get login parameters for NID. */ 353 /* Get login parameters for NID. */
352 lpfc_read_sparam(phba, pmb, 0); 354 rc = lpfc_read_sparam(phba, pmb, 0);
355 if (rc) {
356 mempool_free(pmb, phba->mbox_mem_pool);
357 return -ENOMEM;
358 }
359
353 pmb->vport = vport; 360 pmb->vport = vport;
354 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -358,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
358 mb->mbxCommand, mb->mbxStatus); 365 mb->mbxCommand, mb->mbxStatus);
359 phba->link_state = LPFC_HBA_ERROR; 366 phba->link_state = LPFC_HBA_ERROR;
360 mp = (struct lpfc_dmabuf *) pmb->context1; 367 mp = (struct lpfc_dmabuf *) pmb->context1;
361 mempool_free( pmb, phba->mbox_mem_pool); 368 mempool_free(pmb, phba->mbox_mem_pool);
362 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
363 kfree(mp); 370 kfree(mp);
364 return -EIO; 371 return -EIO;
@@ -543,7 +550,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
543 mempool_free(pmb, phba->mbox_mem_pool); 550 mempool_free(pmb, phba->mbox_mem_pool);
544 return -EIO; 551 return -EIO;
545 } 552 }
546 } else { 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
547 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 lpfc_init_link(phba, pmb, phba->cfg_topology,
548 phba->cfg_link_speed); 555 phba->cfg_link_speed);
549 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -570,6 +577,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
570 } 577 }
571 /* MBOX buffer will be freed in mbox compl */ 578 /* MBOX buffer will be freed in mbox compl */
572 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 if (!pmb) {
581 phba->link_state = LPFC_HBA_ERROR;
582 return -ENOMEM;
583 }
584
573 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
574 pmb->mbox_cmpl = lpfc_config_async_cmpl; 586 pmb->mbox_cmpl = lpfc_config_async_cmpl;
575 pmb->vport = phba->pport; 587 pmb->vport = phba->pport;
@@ -587,6 +599,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
587 599
588 /* Get Option rom version */ 600 /* Get Option rom version */
589 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 if (!pmb) {
603 phba->link_state = LPFC_HBA_ERROR;
604 return -ENOMEM;
605 }
606
590 lpfc_dump_wakeup_param(phba, pmb); 607 lpfc_dump_wakeup_param(phba, pmb);
591 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
592 pmb->vport = phba->pport; 609 pmb->vport = phba->pport;
@@ -602,6 +619,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
602} 619}
603 620
604/** 621/**
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
624 *
625 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data
627 * structure for use as a delayed link up mechanism with the
628 * module parameter lpfc_suppress_link_up.
629 *
630 * Return code
631 * 0 - success
632 * Any other value - error
633 **/
634int
635lpfc_hba_init_link(struct lpfc_hba *phba)
636{
637 struct lpfc_vport *vport = phba->pport;
638 LPFC_MBOXQ_t *pmb;
639 MAILBOX_t *mb;
640 int rc;
641
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647 mb = &pmb->u.mb;
648 pmb->vport = vport;
649
650 lpfc_init_link(phba, pmb, phba->cfg_topology,
651 phba->cfg_link_speed);
652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 lpfc_set_loopback_flag(phba);
654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
655 if (rc != MBX_SUCCESS) {
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0498 Adapter failed to init, mbxCmd x%x "
658 "INIT_LINK, mbxStatus x%x\n",
659 mb->mbxCommand, mb->mbxStatus);
660 /* Clear all interrupt enable conditions */
661 writel(0, phba->HCregaddr);
662 readl(phba->HCregaddr); /* flush */
663 /* Clear all pending interrupts */
664 writel(0xffffffff, phba->HAregaddr);
665 readl(phba->HAregaddr); /* flush */
666 phba->link_state = LPFC_HBA_ERROR;
667 if (rc != MBX_BUSY)
668 mempool_free(pmb, phba->mbox_mem_pool);
669 return -EIO;
670 }
671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
672
673 return 0;
674}
675
676/**
677 * lpfc_hba_down_link - this routine downs the FC link
678 *
679 * This routine will issue the DOWN_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data
681 * structure for use to stop the link.
682 *
683 * Return code
684 * 0 - success
685 * Any other value - error
686 **/
687int
688lpfc_hba_down_link(struct lpfc_hba *phba)
689{
690 LPFC_MBOXQ_t *pmb;
691 int rc;
692
693 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
694 if (!pmb) {
695 phba->link_state = LPFC_HBA_ERROR;
696 return -ENOMEM;
697 }
698
699 lpfc_printf_log(phba,
700 KERN_ERR, LOG_INIT,
701 "0491 Adapter Link is disabled.\n");
702 lpfc_down_link(phba, pmb);
703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 lpfc_printf_log(phba,
707 KERN_ERR, LOG_INIT,
708 "2522 Adapter failed to issue DOWN_LINK"
709 " mbox command rc 0x%x\n", rc);
710
711 mempool_free(pmb, phba->mbox_mem_pool);
712 return -EIO;
713 }
714 return 0;
715}
716
717/**
605 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 718 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
606 * @phba: pointer to lpfc HBA data structure. 719 * @phba: pointer to lpfc HBA data structure.
607 * 720 *
@@ -645,7 +758,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
645 * down the SLI Layer. 758 * down the SLI Layer.
646 * 759 *
647 * Return codes 760 * Return codes
648 * 0 - sucess. 761 * 0 - success.
649 * Any other value - error. 762 * Any other value - error.
650 **/ 763 **/
651static int 764static int
@@ -700,7 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
700 * down the SLI Layer. 813 * down the SLI Layer.
701 * 814 *
702 * Return codes 815 * Return codes
703 * 0 - sucess. 816 * 0 - success.
704 * Any other value - error. 817 * Any other value - error.
705 **/ 818 **/
706static int 819static int
@@ -710,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
710 LIST_HEAD(aborts); 823 LIST_HEAD(aborts);
711 int ret; 824 int ret;
712 unsigned long iflag = 0; 825 unsigned long iflag = 0;
826 struct lpfc_sglq *sglq_entry = NULL;
827
713 ret = lpfc_hba_down_post_s3(phba); 828 ret = lpfc_hba_down_post_s3(phba);
714 if (ret) 829 if (ret)
715 return ret; 830 return ret;
@@ -725,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
725 * list. 840 * list.
726 */ 841 */
727 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 list_for_each_entry(sglq_entry,
844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 sglq_entry->state = SGL_FREED;
846
728 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
729 &phba->sli4_hba.lpfc_sgl_list); 848 &phba->sli4_hba.lpfc_sgl_list);
730 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -755,7 +874,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
755 * uninitialization after the HBA is reset when bring down the SLI Layer. 874 * uninitialization after the HBA is reset when bring down the SLI Layer.
756 * 875 *
757 * Return codes 876 * Return codes
758 * 0 - sucess. 877 * 0 - success.
759 * Any other value - error. 878 * Any other value - error.
760 **/ 879 **/
761int 880int
@@ -852,12 +971,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
852void 971void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba) 972lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{ 973{
974 struct lpfc_vport **vports;
855 LPFC_MBOXQ_t *pmboxq; 975 LPFC_MBOXQ_t *pmboxq;
856 struct lpfc_dmabuf *buf_ptr; 976 struct lpfc_dmabuf *buf_ptr;
857 int retval; 977 int retval, i;
858 struct lpfc_sli *psli = &phba->sli; 978 struct lpfc_sli *psli = &phba->sli;
859 LIST_HEAD(completions); 979 LIST_HEAD(completions);
860 980
981 vports = lpfc_create_vport_work_array(phba);
982 if (vports != NULL)
983 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
984 lpfc_rcv_seq_check_edtov(vports[i]);
985 lpfc_destroy_vport_work_array(phba, vports);
986
861 if ((phba->link_state == LPFC_HBA_ERROR) || 987 if ((phba->link_state == LPFC_HBA_ERROR) ||
862 (phba->pport->load_flag & FC_UNLOADING) || 988 (phba->pport->load_flag & FC_UNLOADING) ||
863 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 989 (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -1254,7 +1380,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1254 * routine from the API jump table function pointer from the lpfc_hba struct. 1380 * routine from the API jump table function pointer from the lpfc_hba struct.
1255 * 1381 *
1256 * Return codes 1382 * Return codes
1257 * 0 - sucess. 1383 * 0 - success.
1258 * Any other value - error. 1384 * Any other value - error.
1259 **/ 1385 **/
1260void 1386void
@@ -1521,10 +1647,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1521 int GE = 0; 1647 int GE = 0;
1522 int oneConnect = 0; /* default is not a oneConnect */ 1648 int oneConnect = 0; /* default is not a oneConnect */
1523 struct { 1649 struct {
1524 char * name; 1650 char *name;
1525 int max_speed; 1651 char *bus;
1526 char * bus; 1652 char *function;
1527 } m = {"<Unknown>", 0, ""}; 1653 } m = {"<Unknown>", "", ""};
1528 1654
1529 if (mdp && mdp[0] != '\0' 1655 if (mdp && mdp[0] != '\0'
1530 && descp && descp[0] != '\0') 1656 && descp && descp[0] != '\0')
@@ -1545,132 +1671,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1545 1671
1546 switch (dev_id) { 1672 switch (dev_id) {
1547 case PCI_DEVICE_ID_FIREFLY: 1673 case PCI_DEVICE_ID_FIREFLY:
1548 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1674 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1549 break; 1675 break;
1550 case PCI_DEVICE_ID_SUPERFLY: 1676 case PCI_DEVICE_ID_SUPERFLY:
1551 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1677 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1678 m = (typeof(m)){"LP7000", "PCI",
1679 "Fibre Channel Adapter"};
1553 else 1680 else
1554 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1681 m = (typeof(m)){"LP7000E", "PCI",
1682 "Fibre Channel Adapter"};
1555 break; 1683 break;
1556 case PCI_DEVICE_ID_DRAGONFLY: 1684 case PCI_DEVICE_ID_DRAGONFLY:
1557 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1685 m = (typeof(m)){"LP8000", "PCI",
1686 "Fibre Channel Adapter"};
1558 break; 1687 break;
1559 case PCI_DEVICE_ID_CENTAUR: 1688 case PCI_DEVICE_ID_CENTAUR:
1560 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1689 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1690 m = (typeof(m)){"LP9002", "PCI",
1691 "Fibre Channel Adapter"};
1562 else 1692 else
1563 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1693 m = (typeof(m)){"LP9000", "PCI",
1694 "Fibre Channel Adapter"};
1564 break; 1695 break;
1565 case PCI_DEVICE_ID_RFLY: 1696 case PCI_DEVICE_ID_RFLY:
1566 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1697 m = (typeof(m)){"LP952", "PCI",
1698 "Fibre Channel Adapter"};
1567 break; 1699 break;
1568 case PCI_DEVICE_ID_PEGASUS: 1700 case PCI_DEVICE_ID_PEGASUS:
1569 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1701 m = (typeof(m)){"LP9802", "PCI-X",
1702 "Fibre Channel Adapter"};
1570 break; 1703 break;
1571 case PCI_DEVICE_ID_THOR: 1704 case PCI_DEVICE_ID_THOR:
1572 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1705 m = (typeof(m)){"LP10000", "PCI-X",
1706 "Fibre Channel Adapter"};
1573 break; 1707 break;
1574 case PCI_DEVICE_ID_VIPER: 1708 case PCI_DEVICE_ID_VIPER:
1575 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1709 m = (typeof(m)){"LPX1000", "PCI-X",
1710 "Fibre Channel Adapter"};
1576 break; 1711 break;
1577 case PCI_DEVICE_ID_PFLY: 1712 case PCI_DEVICE_ID_PFLY:
1578 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1713 m = (typeof(m)){"LP982", "PCI-X",
1714 "Fibre Channel Adapter"};
1579 break; 1715 break;
1580 case PCI_DEVICE_ID_TFLY: 1716 case PCI_DEVICE_ID_TFLY:
1581 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1717 m = (typeof(m)){"LP1050", "PCI-X",
1718 "Fibre Channel Adapter"};
1582 break; 1719 break;
1583 case PCI_DEVICE_ID_HELIOS: 1720 case PCI_DEVICE_ID_HELIOS:
1584 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1721 m = (typeof(m)){"LP11000", "PCI-X2",
1722 "Fibre Channel Adapter"};
1585 break; 1723 break;
1586 case PCI_DEVICE_ID_HELIOS_SCSP: 1724 case PCI_DEVICE_ID_HELIOS_SCSP:
1587 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1725 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1726 "Fibre Channel Adapter"};
1588 break; 1727 break;
1589 case PCI_DEVICE_ID_HELIOS_DCSP: 1728 case PCI_DEVICE_ID_HELIOS_DCSP:
1590 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1729 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1730 "Fibre Channel Adapter"};
1591 break; 1731 break;
1592 case PCI_DEVICE_ID_NEPTUNE: 1732 case PCI_DEVICE_ID_NEPTUNE:
1593 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1733 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1594 break; 1734 break;
1595 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1735 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1736 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1597 break; 1737 break;
1598 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1738 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1739 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1600 break; 1740 break;
1601 case PCI_DEVICE_ID_BMID: 1741 case PCI_DEVICE_ID_BMID:
1602 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1742 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1603 break; 1743 break;
1604 case PCI_DEVICE_ID_BSMB: 1744 case PCI_DEVICE_ID_BSMB:
1605 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1745 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1606 break; 1746 break;
1607 case PCI_DEVICE_ID_ZEPHYR: 1747 case PCI_DEVICE_ID_ZEPHYR:
1608 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1748 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1609 break; 1749 break;
1610 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1750 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1751 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1612 break; 1752 break;
1613 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1753 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1754 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1615 GE = 1; 1755 GE = 1;
1616 break; 1756 break;
1617 case PCI_DEVICE_ID_ZMID: 1757 case PCI_DEVICE_ID_ZMID:
1618 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1758 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1619 break; 1759 break;
1620 case PCI_DEVICE_ID_ZSMB: 1760 case PCI_DEVICE_ID_ZSMB:
1621 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1761 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1622 break; 1762 break;
1623 case PCI_DEVICE_ID_LP101: 1763 case PCI_DEVICE_ID_LP101:
1624 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1764 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1625 break; 1765 break;
1626 case PCI_DEVICE_ID_LP10000S: 1766 case PCI_DEVICE_ID_LP10000S:
1627 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1767 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1628 break; 1768 break;
1629 case PCI_DEVICE_ID_LP11000S: 1769 case PCI_DEVICE_ID_LP11000S:
1630 m = (typeof(m)){"LP11000-S", max_speed, 1770 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1631 "PCI-X2"};
1632 break; 1771 break;
1633 case PCI_DEVICE_ID_LPE11000S: 1772 case PCI_DEVICE_ID_LPE11000S:
1634 m = (typeof(m)){"LPe11000-S", max_speed, 1773 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1635 "PCIe"};
1636 break; 1774 break;
1637 case PCI_DEVICE_ID_SAT: 1775 case PCI_DEVICE_ID_SAT:
1638 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1776 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1639 break; 1777 break;
1640 case PCI_DEVICE_ID_SAT_MID: 1778 case PCI_DEVICE_ID_SAT_MID:
1641 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1779 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1642 break; 1780 break;
1643 case PCI_DEVICE_ID_SAT_SMB: 1781 case PCI_DEVICE_ID_SAT_SMB:
1644 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1782 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1645 break; 1783 break;
1646 case PCI_DEVICE_ID_SAT_DCSP: 1784 case PCI_DEVICE_ID_SAT_DCSP:
1647 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1785 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1648 break; 1786 break;
1649 case PCI_DEVICE_ID_SAT_SCSP: 1787 case PCI_DEVICE_ID_SAT_SCSP:
1650 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1788 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1651 break; 1789 break;
1652 case PCI_DEVICE_ID_SAT_S: 1790 case PCI_DEVICE_ID_SAT_S:
1653 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1791 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1654 break; 1792 break;
1655 case PCI_DEVICE_ID_HORNET: 1793 case PCI_DEVICE_ID_HORNET:
1656 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1794 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1657 GE = 1; 1795 GE = 1;
1658 break; 1796 break;
1659 case PCI_DEVICE_ID_PROTEUS_VF: 1797 case PCI_DEVICE_ID_PROTEUS_VF:
1660 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1798 m = (typeof(m)){"LPev12000", "PCIe IOV",
1799 "Fibre Channel Adapter"};
1661 break; 1800 break;
1662 case PCI_DEVICE_ID_PROTEUS_PF: 1801 case PCI_DEVICE_ID_PROTEUS_PF:
1663 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1802 m = (typeof(m)){"LPev12000", "PCIe IOV",
1803 "Fibre Channel Adapter"};
1664 break; 1804 break;
1665 case PCI_DEVICE_ID_PROTEUS_S: 1805 case PCI_DEVICE_ID_PROTEUS_S:
1666 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1806 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1807 "Fibre Channel Adapter"};
1667 break; 1808 break;
1668 case PCI_DEVICE_ID_TIGERSHARK: 1809 case PCI_DEVICE_ID_TIGERSHARK:
1669 oneConnect = 1; 1810 oneConnect = 1;
1670 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1811 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1812 break;
1813 case PCI_DEVICE_ID_TOMCAT:
1814 oneConnect = 1;
1815 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1816 break;
1817 case PCI_DEVICE_ID_FALCON:
1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 "EmulexSecure Fibre"};
1671 break; 1820 break;
1672 default: 1821 default:
1673 m = (typeof(m)){ NULL }; 1822 m = (typeof(m)){"Unknown", "", ""};
1674 break; 1823 break;
1675 } 1824 }
1676 1825
@@ -1682,17 +1831,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1682 if (descp && descp[0] == '\0') { 1831 if (descp && descp[0] == '\0') {
1683 if (oneConnect) 1832 if (oneConnect)
1684 snprintf(descp, 255, 1833 snprintf(descp, 255,
1685 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1834 "Emulex OneConnect %s, %s Initiator, Port %s",
1686 m.name, 1835 m.name, m.function,
1687 phba->Port); 1836 phba->Port);
1688 else 1837 else
1689 snprintf(descp, 255, 1838 snprintf(descp, 255,
1690 "Emulex %s %d%s %s %s", 1839 "Emulex %s %d%s %s %s",
1691 m.name, m.max_speed, 1840 m.name, max_speed, (GE) ? "GE" : "Gb",
1692 (GE) ? "GE" : "Gb", 1841 m.bus, m.function);
1693 m.bus,
1694 (GE) ? "FCoE Adapter" :
1695 "Fibre Channel Adapter");
1696 } 1842 }
1697} 1843}
1698 1844
@@ -2045,6 +2191,46 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2045} 2191}
2046 2192
2047/** 2193/**
2194 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2195 * @phba: pointer to lpfc hba data structure.
2196 *
2197 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2198 * caller of this routine should already hold the host lock.
2199 **/
2200void
2201__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2202{
2203 /* Clear pending FCF rediscovery wait and failover in progress flags */
2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2205 FCF_DEAD_DISC |
2206 FCF_ACVL_DISC);
2207 /* Now, try to stop the timer */
2208 del_timer(&phba->fcf.redisc_wait);
2209}
2210
2211/**
2212 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2213 * @phba: pointer to lpfc hba data structure.
2214 *
2215 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2216 * checks whether the FCF rediscovery wait timer is pending with the host
2217 * lock held before proceeding with disabling the timer and clearing the
2218 * wait timer pendig flag.
2219 **/
2220void
2221lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2222{
2223 spin_lock_irq(&phba->hbalock);
2224 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2225 /* FCF rediscovery timer already fired or stopped */
2226 spin_unlock_irq(&phba->hbalock);
2227 return;
2228 }
2229 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2230 spin_unlock_irq(&phba->hbalock);
2231}
2232
2233/**
2048 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2234 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2049 * @phba: pointer to lpfc hba data structure. 2235 * @phba: pointer to lpfc hba data structure.
2050 * 2236 *
@@ -2068,6 +2254,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
2068 break; 2254 break;
2069 case LPFC_PCI_DEV_OC: 2255 case LPFC_PCI_DEV_OC:
2070 /* Stop any OneConnect device sepcific driver timers */ 2256 /* Stop any OneConnect device sepcific driver timers */
2257 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2071 break; 2258 break;
2072 default: 2259 default:
2073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2200,6 +2387,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2200 struct lpfc_vport *vport = phba->pport; 2387 struct lpfc_vport *vport = phba->pport;
2201 struct lpfc_nodelist *ndlp, *next_ndlp; 2388 struct lpfc_nodelist *ndlp, *next_ndlp;
2202 struct lpfc_vport **vports; 2389 struct lpfc_vport **vports;
2390 struct Scsi_Host *shost;
2203 int i; 2391 int i;
2204 2392
2205 if (vport->fc_flag & FC_OFFLINE_MODE) 2393 if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2213,11 +2401,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2213 vports = lpfc_create_vport_work_array(phba); 2401 vports = lpfc_create_vport_work_array(phba);
2214 if (vports != NULL) { 2402 if (vports != NULL) {
2215 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2403 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2216 struct Scsi_Host *shost;
2217
2218 if (vports[i]->load_flag & FC_UNLOADING) 2404 if (vports[i]->load_flag & FC_UNLOADING)
2219 continue; 2405 continue;
2220 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2406 shost = lpfc_shost_from_vport(vports[i]);
2407 spin_lock_irq(shost->host_lock);
2408 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2409 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2410 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2411 spin_unlock_irq(shost->host_lock);
2412
2221 shost = lpfc_shost_from_vport(vports[i]); 2413 shost = lpfc_shost_from_vport(vports[i]);
2222 list_for_each_entry_safe(ndlp, next_ndlp, 2414 list_for_each_entry_safe(ndlp, next_ndlp,
2223 &vports[i]->fc_nodes, 2415 &vports[i]->fc_nodes,
@@ -2308,6 +2500,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2308 2500
2309 spin_lock_irq(&phba->hbalock); 2501 spin_lock_irq(&phba->hbalock);
2310 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2502 /* Release all the lpfc_scsi_bufs maintained by this host. */
2503 spin_lock(&phba->scsi_buf_list_lock);
2311 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2504 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2312 list_del(&sb->list); 2505 list_del(&sb->list);
2313 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2506 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2315,6 +2508,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2315 kfree(sb); 2508 kfree(sb);
2316 phba->total_scsi_bufs--; 2509 phba->total_scsi_bufs--;
2317 } 2510 }
2511 spin_unlock(&phba->scsi_buf_list_lock);
2318 2512
2319 /* Release all the lpfc_iocbq entries maintained by this host. */ 2513 /* Release all the lpfc_iocbq entries maintained by this host. */
2320 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2514 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2322,9 +2516,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2322 kfree(io); 2516 kfree(io);
2323 phba->total_iocbq_bufs--; 2517 phba->total_iocbq_bufs--;
2324 } 2518 }
2325
2326 spin_unlock_irq(&phba->hbalock); 2519 spin_unlock_irq(&phba->hbalock);
2327
2328 return 0; 2520 return 0;
2329} 2521}
2330 2522
@@ -2373,7 +2565,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2373 shost->this_id = -1; 2565 shost->this_id = -1;
2374 shost->max_cmd_len = 16; 2566 shost->max_cmd_len = 16;
2375 if (phba->sli_rev == LPFC_SLI_REV4) { 2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2376 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; 2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len;
2377 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2378 } 2571 }
2379 2572
@@ -2407,8 +2600,16 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2407 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2408 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2409 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2410 2611
2411 error = scsi_add_host(shost, dev); 2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2412 if (error) 2613 if (error)
2413 goto out_put_shost; 2614 goto out_put_shost;
2414 2615
@@ -2622,8 +2823,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
2622 lpfc_stop_hba_timers(phba); 2823 lpfc_stop_hba_timers(phba);
2623 phba->pport->work_port_events = 0; 2824 phba->pport->work_port_events = 0;
2624 phba->sli4_hba.intr_enable = 0; 2825 phba->sli4_hba.intr_enable = 0;
2625 /* Hard clear it for now, shall have more graceful way to wait later */
2626 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2627} 2826}
2628 2827
2629/** 2828/**
@@ -2675,7 +2874,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2675 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2874 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2676 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2875 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2677 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2876 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2678 phba->fcf.fcf_indx); 2877 phba->fcf.current_rec.fcf_indx);
2679 2878
2680 if (!phba->sli4_hba.intr_enable) 2879 if (!phba->sli4_hba.intr_enable)
2681 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2880 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2699,6 +2898,117 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2699} 2898}
2700 2899
2701/** 2900/**
2901 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2902 * @phba: Pointer to hba for which this call is being executed.
2903 *
2904 * This routine starts the timer waiting for the FCF rediscovery to complete.
2905 **/
2906void
2907lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2908{
2909 unsigned long fcf_redisc_wait_tmo =
2910 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2911 /* Start fcf rediscovery wait period timer */
2912 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2913 spin_lock_irq(&phba->hbalock);
2914 /* Allow action to new fcf asynchronous event */
2915 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2916 /* Mark the FCF rediscovery pending state */
2917 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2918 spin_unlock_irq(&phba->hbalock);
2919}
2920
2921/**
2922 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2923 * @ptr: Map to lpfc_hba data structure pointer.
2924 *
2925 * This routine is invoked when waiting for FCF table rediscover has been
2926 * timed out. If new FCF record(s) has (have) been discovered during the
2927 * wait period, a new FCF event shall be added to the FCOE async event
2928 * list, and then worker thread shall be waked up for processing from the
2929 * worker thread context.
2930 **/
2931void
2932lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2933{
2934 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2935
2936 /* Don't send FCF rediscovery event if timer cancelled */
2937 spin_lock_irq(&phba->hbalock);
2938 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2939 spin_unlock_irq(&phba->hbalock);
2940 return;
2941 }
2942 /* Clear FCF rediscovery timer pending flag */
2943 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2944 /* FCF rediscovery event to worker thread */
2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2946 spin_unlock_irq(&phba->hbalock);
2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 "2776 FCF rediscover wait timer expired, post "
2949 "a worker thread event for FCF table scan\n");
2950 /* wake up worker thread */
2951 lpfc_worker_wake_up(phba);
2952}
2953
2954/**
2955 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2956 * @phba: pointer to lpfc hba data structure.
2957 *
2958 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2959 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2960 * was successful and the firmware supports FCoE. Any other return indicates
2961 * a error. It is assumed that this function will be called before interrupts
2962 * are enabled.
2963 **/
2964static int
2965lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2966{
2967 int rc = 0;
2968 LPFC_MBOXQ_t *mboxq;
2969 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2970 uint32_t length;
2971 uint32_t shdr_status, shdr_add_status;
2972
2973 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2974 if (!mboxq) {
2975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2976 "2621 Failed to allocate mbox for "
2977 "query firmware config cmd\n");
2978 return -ENOMEM;
2979 }
2980 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2981 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2982 sizeof(struct lpfc_sli4_cfg_mhdr));
2983 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2984 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2985 length, LPFC_SLI4_MBX_EMBED);
2986 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2987 /* The IOCTL status is embedded in the mailbox subheader. */
2988 shdr_status = bf_get(lpfc_mbox_hdr_status,
2989 &query_fw_cfg->header.cfg_shdr.response);
2990 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2991 &query_fw_cfg->header.cfg_shdr.response);
2992 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "2622 Query Firmware Config failed "
2995 "mbx status x%x, status x%x add_status x%x\n",
2996 rc, shdr_status, shdr_add_status);
2997 return -EINVAL;
2998 }
2999 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001 "2623 FCoE Function not supported by firmware. "
3002 "Function mode = %08x\n",
3003 query_fw_cfg->function_mode);
3004 return -EINVAL;
3005 }
3006 if (rc != MBX_TIMEOUT)
3007 mempool_free(mboxq, phba->mbox_mem_pool);
3008 return 0;
3009}
3010
3011/**
2702 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3012 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2703 * @phba: pointer to lpfc hba data structure. 3013 * @phba: pointer to lpfc hba data structure.
2704 * @acqe_link: pointer to the async link completion queue entry. 3014 * @acqe_link: pointer to the async link completion queue entry.
@@ -2893,6 +3203,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2893 bf_get(lpfc_acqe_link_physical, acqe_link); 3203 bf_get(lpfc_acqe_link_physical, acqe_link);
2894 phba->sli4_hba.link_state.fault = 3204 phba->sli4_hba.link_state.fault =
2895 bf_get(lpfc_acqe_link_fault, acqe_link); 3205 bf_get(lpfc_acqe_link_fault, acqe_link);
3206 phba->sli4_hba.link_state.logical_speed =
3207 bf_get(lpfc_acqe_qos_link_speed, acqe_link);
2896 3208
2897 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3209 /* Invoke the lpfc_handle_latt mailbox command callback function */
2898 lpfc_mbx_cmpl_read_la(phba, pmb); 3210 lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -2906,6 +3218,68 @@ out_free_pmb:
2906} 3218}
2907 3219
2908/** 3220/**
3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222 * @vport: pointer to vport data structure.
3223 *
3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225 * response to a CVL event.
3226 *
3227 * Return the pointer to the ndlp with the vport if successful, otherwise
3228 * return NULL.
3229 **/
3230static struct lpfc_nodelist *
3231lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3232{
3233 struct lpfc_nodelist *ndlp;
3234 struct Scsi_Host *shost;
3235 struct lpfc_hba *phba;
3236
3237 if (!vport)
3238 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba;
3243 if (!phba)
3244 return NULL;
3245 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */
3248 if (vport->port_state <= LPFC_FDISC)
3249 return NULL;
3250 shost = lpfc_shost_from_vport(vport);
3251 if (!shost)
3252 return NULL;
3253 lpfc_linkdown_port(vport);
3254 lpfc_cleanup_pending_mbox(vport);
3255 spin_lock_irq(shost->host_lock);
3256 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 spin_unlock_irq(shost->host_lock);
3258
3259 return ndlp;
3260}
3261
3262/**
3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264 * @vport: pointer to lpfc hba data structure.
3265 *
3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267 * response to a FCF dead event.
3268 **/
3269static void
3270lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3271{
3272 struct lpfc_vport **vports;
3273 int i;
3274
3275 vports = lpfc_create_vport_work_array(phba);
3276 if (vports)
3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 lpfc_sli4_perform_vport_cvl(vports[i]);
3279 lpfc_destroy_vport_work_array(phba, vports);
3280}
3281
3282/**
2909 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2910 * @phba: pointer to lpfc hba data structure. 3284 * @phba: pointer to lpfc hba data structure.
2911 * @acqe_link: pointer to the async fcoe completion queue entry. 3285 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -2918,33 +3292,71 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2918{ 3292{
2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3293 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 int rc; 3294 int rc;
3295 struct lpfc_vport *vport;
3296 struct lpfc_nodelist *ndlp;
3297 struct Scsi_Host *shost;
3298 int active_vlink_present;
3299 struct lpfc_vport **vports;
3300 int i;
2921 3301
3302 phba->fc_eventTag = acqe_fcoe->event_tag;
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3303 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 switch (event_type) { 3304 switch (event_type) {
2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
2926 "2546 New FCF found index 0x%x tag 0x%x\n", 3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2927 acqe_fcoe->fcf_index, 3308 "2546 New FCF found/FCF parameter modified event: "
2928 acqe_fcoe->event_tag); 3309 "evt_tag:x%x, fcf_index:x%x\n",
2929 /* 3310 acqe_fcoe->event_tag, acqe_fcoe->index);
2930 * If the current FCF is in discovered state, or 3311
2931 * FCF discovery is in progress do nothing.
2932 */
2933 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
2934 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) || 3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
2935 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3315 /*
3316 * If the current FCF is in discovered state or
3317 * FCF discovery is in progress, do nothing.
3318 */
3319 spin_unlock_irq(&phba->hbalock);
3320 break;
3321 }
3322
3323 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3324 /*
3325 * If fast FCF failover rescan event is pending,
3326 * do nothing.
3327 */
2936 spin_unlock_irq(&phba->hbalock); 3328 spin_unlock_irq(&phba->hbalock);
2937 break; 3329 break;
2938 } 3330 }
2939 spin_unlock_irq(&phba->hbalock); 3331 spin_unlock_irq(&phba->hbalock);
2940 3332
2941 /* Read the FCF table and re-discover SAN. */ 3333 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
2942 rc = lpfc_sli4_read_fcf_record(phba, 3334 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2943 LPFC_FCOE_FCF_GET_FIRST); 3335 /*
3336 * During period of FCF discovery, read the FCF
3337 * table record indexed by the event to update
3338 * FCF round robin failover eligible FCF bmask.
3339 */
3340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3341 LOG_DISCOVERY,
3342 "2779 Read new FCF record with "
3343 "fcf_index:x%x for updating FCF "
3344 "round robin failover bmask\n",
3345 acqe_fcoe->index);
3346 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3347 }
3348
3349 /* Otherwise, scan the entire FCF table and re-discover SAN */
3350 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3351 "2770 Start FCF table scan due to new FCF "
3352 "event: evt_tag:x%x, fcf_index:x%x\n",
3353 acqe_fcoe->event_tag, acqe_fcoe->index);
3354 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3355 LPFC_FCOE_FCF_GET_FIRST);
2944 if (rc) 3356 if (rc)
2945 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3357 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2946 "2547 Read FCF record failed 0x%x\n", 3358 "2547 Issue FCF scan read FCF mailbox "
2947 rc); 3359 "command failed 0x%x\n", rc);
2948 break; 3360 break;
2949 3361
2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3362 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -2955,22 +3367,130 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2955 break; 3367 break;
2956 3368
2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3369 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3370 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
2959 "2549 FCF disconnected fron network index 0x%x" 3371 "2549 FCF disconnected from network index 0x%x"
2960 " tag 0x%x\n", acqe_fcoe->fcf_index, 3372 " tag 0x%x\n", acqe_fcoe->index,
2961 acqe_fcoe->event_tag); 3373 acqe_fcoe->event_tag);
2962 /* If the event is not for currently used fcf do nothing */ 3374 /* If the event is not for currently used fcf do nothing */
2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 3375 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
2964 break; 3376 break;
2965 /* 3377 /* We request port to rediscover the entire FCF table for
2966 * Currently, driver support only one FCF - so treat this as 3378 * a fast recovery from case that the current FCF record
2967 * a link down. 3379 * is no longer valid if we are not in the middle of FCF
3380 * failover process already.
2968 */ 3381 */
2969 lpfc_linkdown(phba); 3382 spin_lock_irq(&phba->hbalock);
2970 /* Unregister FCF if no devices connected to it */ 3383 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
2971 lpfc_unregister_unused_fcf(phba); 3384 spin_unlock_irq(&phba->hbalock);
3385 /* Update FLOGI FCF failover eligible FCF bmask */
3386 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3387 break;
3388 }
3389 /* Mark the fast failover process in progress */
3390 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3391 spin_unlock_irq(&phba->hbalock);
3392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3393 "2771 Start FCF fast failover process due to "
3394 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3395 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3396 rc = lpfc_sli4_redisc_fcf_table(phba);
3397 if (rc) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3399 LOG_DISCOVERY,
3400 "2772 Issue FCF rediscover mabilbox "
3401 "command failed, fail through to FCF "
3402 "dead event\n");
3403 spin_lock_irq(&phba->hbalock);
3404 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3405 spin_unlock_irq(&phba->hbalock);
3406 /*
3407 * Last resort will fail over by treating this
3408 * as a link down to FCF registration.
3409 */
3410 lpfc_sli4_fcf_dead_failthrough(phba);
3411 } else
3412 /* Handling fast FCF failover to a DEAD FCF event
3413 * is considered equalivant to receiving CVL to all
3414 * vports.
3415 */
3416 lpfc_sli4_perform_all_vport_cvl(phba);
2972 break; 3417 break;
3418 case LPFC_FCOE_EVENT_TYPE_CVL:
3419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3420 "2718 Clear Virtual Link Received for VPI 0x%x"
3421 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3422 vport = lpfc_find_vport_by_vpid(phba,
3423 acqe_fcoe->index - phba->vpi_base);
3424 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3425 if (!ndlp)
3426 break;
3427 active_vlink_present = 0;
3428
3429 vports = lpfc_create_vport_work_array(phba);
3430 if (vports) {
3431 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3432 i++) {
3433 if ((!(vports[i]->fc_flag &
3434 FC_VPORT_CVL_RCVD)) &&
3435 (vports[i]->port_state > LPFC_FDISC)) {
3436 active_vlink_present = 1;
3437 break;
3438 }
3439 }
3440 lpfc_destroy_vport_work_array(phba, vports);
3441 }
2973 3442
3443 if (active_vlink_present) {
3444 /*
3445 * If there are other active VLinks present,
3446 * re-instantiate the Vlink using FDISC.
3447 */
3448 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3449 shost = lpfc_shost_from_vport(vport);
3450 spin_lock_irq(shost->host_lock);
3451 ndlp->nlp_flag |= NLP_DELAY_TMO;
3452 spin_unlock_irq(shost->host_lock);
3453 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3454 vport->port_state = LPFC_FDISC;
3455 } else {
3456 /*
3457 * Otherwise, we request port to rediscover
3458 * the entire FCF table for a fast recovery
3459 * from possible case that the current FCF
3460 * is no longer valid if we are not already
3461 * in the FCF failover process.
3462 */
3463 spin_lock_irq(&phba->hbalock);
3464 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3465 spin_unlock_irq(&phba->hbalock);
3466 break;
3467 }
3468 /* Mark the fast failover process in progress */
3469 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3470 spin_unlock_irq(&phba->hbalock);
3471 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3472 LOG_DISCOVERY,
3473 "2773 Start FCF fast failover due "
3474 "to CVL event: evt_tag:x%x\n",
3475 acqe_fcoe->event_tag);
3476 rc = lpfc_sli4_redisc_fcf_table(phba);
3477 if (rc) {
3478 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3479 LOG_DISCOVERY,
3480 "2774 Issue FCF rediscover "
3481 "mabilbox command failed, "
3482 "through to CVL event\n");
3483 spin_lock_irq(&phba->hbalock);
3484 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3485 spin_unlock_irq(&phba->hbalock);
3486 /*
3487 * Last resort will be re-try on the
3488 * the current registered FCF entry.
3489 */
3490 lpfc_retry_pport_discovery(phba);
3491 }
3492 }
3493 break;
2974 default: 3494 default:
2975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3495 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976 "0288 Unknown FCoE event type 0x%x event tag " 3496 "0288 Unknown FCoE event type 0x%x event tag "
@@ -2990,6 +3510,7 @@ static void
2990lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3510lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 struct lpfc_acqe_dcbx *acqe_dcbx) 3511 struct lpfc_acqe_dcbx *acqe_dcbx)
2992{ 3512{
3513 phba->fc_eventTag = acqe_dcbx->event_tag;
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3514 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "0290 The SLI4 DCBX asynchronous event is not " 3515 "0290 The SLI4 DCBX asynchronous event is not "
2995 "handled yet\n"); 3516 "handled yet\n");
@@ -3044,6 +3565,37 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3044} 3565}
3045 3566
3046/** 3567/**
3568 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3569 * @phba: pointer to lpfc hba data structure.
3570 *
3571 * This routine is invoked by the worker thread to process FCF table
3572 * rediscovery pending completion event.
3573 **/
3574void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3575{
3576 int rc;
3577
3578 spin_lock_irq(&phba->hbalock);
3579 /* Clear FCF rediscovery timeout event */
3580 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3581 /* Clear driver fast failover FCF record flag */
3582 phba->fcf.failover_rec.flag = 0;
3583 /* Set state for FCF fast failover */
3584 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3585 spin_unlock_irq(&phba->hbalock);
3586
3587 /* Scan FCF table from the first entry to re-discover SAN */
3588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3589 "2777 Start FCF table scan after FCF "
3590 "rediscovery quiescent period over\n");
3591 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3592 if (rc)
3593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3594 "2747 Issue FCF scan read FCF mailbox "
3595 "command failed 0x%x\n", rc);
3596}
3597
3598/**
3047 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3599 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3048 * @phba: pointer to lpfc hba data structure. 3600 * @phba: pointer to lpfc hba data structure.
3049 * @dev_grp: The HBA PCI-Device group number. 3601 * @dev_grp: The HBA PCI-Device group number.
@@ -3124,7 +3676,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3124 * PCI devices. 3676 * PCI devices.
3125 * 3677 *
3126 * Return codes 3678 * Return codes
3127 * 0 - sucessful 3679 * 0 - successful
3128 * other values - error 3680 * other values - error
3129 **/ 3681 **/
3130static int 3682static int
@@ -3220,7 +3772,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3220 * support the SLI-3 HBA device it attached to. 3772 * support the SLI-3 HBA device it attached to.
3221 * 3773 *
3222 * Return codes 3774 * Return codes
3223 * 0 - sucessful 3775 * 0 - successful
3224 * other values - error 3776 * other values - error
3225 **/ 3777 **/
3226static int 3778static int
@@ -3321,15 +3873,18 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3321 * support the SLI-4 HBA device it attached to. 3873 * support the SLI-4 HBA device it attached to.
3322 * 3874 *
3323 * Return codes 3875 * Return codes
3324 * 0 - sucessful 3876 * 0 - successful
3325 * other values - error 3877 * other values - error
3326 **/ 3878 **/
3327static int 3879static int
3328lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 3880lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3329{ 3881{
3330 struct lpfc_sli *psli; 3882 struct lpfc_sli *psli;
3331 int rc; 3883 LPFC_MBOXQ_t *mboxq;
3332 int i, hbq_count; 3884 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3885 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3886 struct lpfc_mqe *mqe;
3887 int longs;
3333 3888
3334 /* Before proceed, wait for POST done and device ready */ 3889 /* Before proceed, wait for POST done and device ready */
3335 rc = lpfc_sli4_post_status_check(phba); 3890 rc = lpfc_sli4_post_status_check(phba);
@@ -3358,6 +3913,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3358 init_timer(&phba->eratt_poll); 3913 init_timer(&phba->eratt_poll);
3359 phba->eratt_poll.function = lpfc_poll_eratt; 3914 phba->eratt_poll.function = lpfc_poll_eratt;
3360 phba->eratt_poll.data = (unsigned long) phba; 3915 phba->eratt_poll.data = (unsigned long) phba;
3916 /* FCF rediscover timer */
3917 init_timer(&phba->fcf.redisc_wait);
3918 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3919 phba->fcf.redisc_wait.data = (unsigned long)phba;
3920
3361 /* 3921 /*
3362 * We need to do a READ_CONFIG mailbox command here before 3922 * We need to do a READ_CONFIG mailbox command here before
3363 * calling lpfc_get_cfgparam. For VFs this will report the 3923 * calling lpfc_get_cfgparam. For VFs this will report the
@@ -3382,31 +3942,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3382 * used to create the sg_dma_buf_pool must be dynamically calculated. 3942 * used to create the sg_dma_buf_pool must be dynamically calculated.
3383 * 2 segments are added since the IOCB needs a command and response bde. 3943 * 2 segments are added since the IOCB needs a command and response bde.
3384 * To insure that the scsi sgl does not cross a 4k page boundary only 3944 * To insure that the scsi sgl does not cross a 4k page boundary only
3385 * sgl sizes of 1k, 2k, 4k, and 8k are supported. 3945 * sgl sizes of must be a power of 2.
3386 * Table of sgl sizes and seg_cnt:
3387 * sgl size, sg_seg_cnt total seg
3388 * 1k 50 52
3389 * 2k 114 116
3390 * 4k 242 244
3391 * 8k 498 500
3392 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3393 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3394 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3395 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3396 */ 3946 */
3397 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) 3947 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3398 phba->cfg_sg_seg_cnt = 50; 3948 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3399 else if (phba->cfg_sg_seg_cnt <= 114) 3949 /* Feature Level 1 hardware is limited to 2 pages */
3400 phba->cfg_sg_seg_cnt = 114; 3950 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3401 else if (phba->cfg_sg_seg_cnt <= 242) 3951 LPFC_SLI_INTF_FEATURELEVEL1_1))
3402 phba->cfg_sg_seg_cnt = 242; 3952 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3403 else 3953 else
3404 phba->cfg_sg_seg_cnt = 498; 3954 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3405 3955 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3406 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) 3956 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3407 + sizeof(struct fcp_rsp); 3957 dma_buf_size = dma_buf_size << 1)
3408 phba->cfg_sg_dma_buf_size += 3958 ;
3409 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 3959 if (dma_buf_size == max_buf_size)
3960 phba->cfg_sg_seg_cnt = (dma_buf_size -
3961 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3962 (2 * sizeof(struct sli4_sge))) /
3963 sizeof(struct sli4_sge);
3964 phba->cfg_sg_dma_buf_size = dma_buf_size;
3410 3965
3411 /* Initialize buffer queue management fields */ 3966 /* Initialize buffer queue management fields */
3412 hbq_count = lpfc_sli_hbq_count(); 3967 hbq_count = lpfc_sli_hbq_count();
@@ -3432,7 +3987,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3432 /* Driver internel slow-path CQ Event pool */ 3987 /* Driver internel slow-path CQ Event pool */
3433 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3988 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3434 /* Response IOCB work queue list */ 3989 /* Response IOCB work queue list */
3435 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3990 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3436 /* Asynchronous event CQ Event work queue list */ 3991 /* Asynchronous event CQ Event work queue list */
3437 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3992 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3438 /* Fast-path XRI aborted CQ Event work queue list */ 3993 /* Fast-path XRI aborted CQ Event work queue list */
@@ -3461,6 +4016,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3461 if (unlikely(rc)) 4016 if (unlikely(rc))
3462 goto out_free_bsmbx; 4017 goto out_free_bsmbx;
3463 4018
4019 rc = lpfc_sli4_fw_cfg_check(phba);
4020 if (unlikely(rc))
4021 goto out_free_bsmbx;
4022
3464 /* Set up the hba's configuration parameters. */ 4023 /* Set up the hba's configuration parameters. */
3465 rc = lpfc_sli4_read_config(phba); 4024 rc = lpfc_sli4_read_config(phba);
3466 if (unlikely(rc)) 4025 if (unlikely(rc))
@@ -3502,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3502 goto out_free_active_sgl; 4061 goto out_free_active_sgl;
3503 } 4062 }
3504 4063
4064 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4065 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4066 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4067 GFP_KERNEL);
4068 if (!phba->fcf.fcf_rr_bmask) {
4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070 "2759 Failed allocate memory for FCF round "
4071 "robin failover bmask\n");
4072 goto out_remove_rpi_hdrs;
4073 }
4074
3505 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4075 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3506 phba->cfg_fcp_eq_count), GFP_KERNEL); 4076 phba->cfg_fcp_eq_count), GFP_KERNEL);
3507 if (!phba->sli4_hba.fcp_eq_hdl) { 4077 if (!phba->sli4_hba.fcp_eq_hdl) {
3508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3509 "2572 Failed allocate memory for fast-path " 4079 "2572 Failed allocate memory for fast-path "
3510 "per-EQ handle array\n"); 4080 "per-EQ handle array\n");
3511 goto out_remove_rpi_hdrs; 4081 goto out_free_fcf_rr_bmask;
3512 } 4082 }
3513 4083
3514 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4084 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3520,10 +4090,49 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3520 goto out_free_fcp_eq_hdl; 4090 goto out_free_fcp_eq_hdl;
3521 } 4091 }
3522 4092
4093 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4094 GFP_KERNEL);
4095 if (!mboxq) {
4096 rc = -ENOMEM;
4097 goto out_free_fcp_eq_hdl;
4098 }
4099
4100 /* Get the Supported Pages. It is always available. */
4101 lpfc_supported_pages(mboxq);
4102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4103 if (unlikely(rc)) {
4104 rc = -EIO;
4105 mempool_free(mboxq, phba->mbox_mem_pool);
4106 goto out_free_fcp_eq_hdl;
4107 }
4108
4109 mqe = &mboxq->u.mqe;
4110 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4111 LPFC_MAX_SUPPORTED_PAGES);
4112 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4113 switch (pn_page[i]) {
4114 case LPFC_SLI4_PARAMETERS:
4115 phba->sli4_hba.pc_sli4_params.supported = 1;
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121
4122 /* Read the port's SLI4 Parameters capabilities if supported. */
4123 if (phba->sli4_hba.pc_sli4_params.supported)
4124 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4125 mempool_free(mboxq, phba->mbox_mem_pool);
4126 if (rc) {
4127 rc = -EIO;
4128 goto out_free_fcp_eq_hdl;
4129 }
3523 return rc; 4130 return rc;
3524 4131
3525out_free_fcp_eq_hdl: 4132out_free_fcp_eq_hdl:
3526 kfree(phba->sli4_hba.fcp_eq_hdl); 4133 kfree(phba->sli4_hba.fcp_eq_hdl);
4134out_free_fcf_rr_bmask:
4135 kfree(phba->fcf.fcf_rr_bmask);
3527out_remove_rpi_hdrs: 4136out_remove_rpi_hdrs:
3528 lpfc_sli4_remove_rpi_hdrs(phba); 4137 lpfc_sli4_remove_rpi_hdrs(phba);
3529out_free_active_sgl: 4138out_free_active_sgl:
@@ -3569,6 +4178,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3569 lpfc_sli4_remove_rpi_hdrs(phba); 4178 lpfc_sli4_remove_rpi_hdrs(phba);
3570 lpfc_sli4_remove_rpis(phba); 4179 lpfc_sli4_remove_rpis(phba);
3571 4180
4181 /* Free eligible FCF index bmask */
4182 kfree(phba->fcf.fcf_rr_bmask);
4183
3572 /* Free the ELS sgl list */ 4184 /* Free the ELS sgl list */
3573 lpfc_free_active_sgl(phba); 4185 lpfc_free_active_sgl(phba);
3574 lpfc_free_sgl_list(phba); 4186 lpfc_free_sgl_list(phba);
@@ -3594,8 +4206,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3594 4206
3595 /* Free the current connect table */ 4207 /* Free the current connect table */
3596 list_for_each_entry_safe(conn_entry, next_conn_entry, 4208 list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 &phba->fcf_conn_rec_list, list) 4209 &phba->fcf_conn_rec_list, list) {
4210 list_del_init(&conn_entry->list);
3598 kfree(conn_entry); 4211 kfree(conn_entry);
4212 }
3599 4213
3600 return; 4214 return;
3601} 4215}
@@ -3613,6 +4227,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3613int 4227int
3614lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4228lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3615{ 4229{
4230 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4231 phba->lpfc_hba_down_link = lpfc_hba_down_link;
3616 switch (dev_grp) { 4232 switch (dev_grp) {
3617 case LPFC_PCI_DEV_LP: 4233 case LPFC_PCI_DEV_LP:
3618 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4234 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -3642,7 +4258,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3642 * device specific resource setup to support the HBA device it attached to. 4258 * device specific resource setup to support the HBA device it attached to.
3643 * 4259 *
3644 * Return codes 4260 * Return codes
3645 * 0 - sucessful 4261 * 0 - successful
3646 * other values - error 4262 * other values - error
3647 **/ 4263 **/
3648static int 4264static int
@@ -3688,7 +4304,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3688 * device specific resource setup to support the HBA device it attached to. 4304 * device specific resource setup to support the HBA device it attached to.
3689 * 4305 *
3690 * Return codes 4306 * Return codes
3691 * 0 - sucessful 4307 * 0 - successful
3692 * other values - error 4308 * other values - error
3693 **/ 4309 **/
3694static int 4310static int
@@ -3753,7 +4369,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
3753 * list and set up the IOCB tag array accordingly. 4369 * list and set up the IOCB tag array accordingly.
3754 * 4370 *
3755 * Return codes 4371 * Return codes
3756 * 0 - sucessful 4372 * 0 - successful
3757 * other values - error 4373 * other values - error
3758 **/ 4374 **/
3759static int 4375static int
@@ -3824,7 +4440,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
3824 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4440 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3825 if (rc) { 4441 if (rc) {
3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4442 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827 "2005 Unable to deregister pages from HBA: %x", rc); 4443 "2005 Unable to deregister pages from HBA: %x\n", rc);
3828 } 4444 }
3829 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4445 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3830} 4446}
@@ -3872,7 +4488,7 @@ lpfc_free_active_sgl(struct lpfc_hba *phba)
3872 * list and set up the sgl xritag tag array accordingly. 4488 * list and set up the sgl xritag tag array accordingly.
3873 * 4489 *
3874 * Return codes 4490 * Return codes
3875 * 0 - sucessful 4491 * 0 - successful
3876 * other values - error 4492 * other values - error
3877 **/ 4493 **/
3878static int 4494static int
@@ -3960,6 +4576,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
3960 4576
3961 /* The list order is used by later block SGL registraton */ 4577 /* The list order is used by later block SGL registraton */
3962 spin_lock_irq(&phba->hbalock); 4578 spin_lock_irq(&phba->hbalock);
4579 sglq_entry->state = SGL_FREED;
3963 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4580 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3964 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4581 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3965 phba->sli4_hba.total_sglq_bufs++; 4582 phba->sli4_hba.total_sglq_bufs++;
@@ -3986,7 +4603,7 @@ out_free_mem:
3986 * enabled and the driver is reinitializing the device. 4603 * enabled and the driver is reinitializing the device.
3987 * 4604 *
3988 * Return codes 4605 * Return codes
3989 * 0 - sucessful 4606 * 0 - successful
3990 * ENOMEM - No availble memory 4607 * ENOMEM - No availble memory
3991 * EIO - The mailbox failed to complete successfully. 4608 * EIO - The mailbox failed to complete successfully.
3992 **/ 4609 **/
@@ -4146,7 +4763,7 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4146 * PCI device data structure is set. 4763 * PCI device data structure is set.
4147 * 4764 *
4148 * Return codes 4765 * Return codes
4149 * pointer to @phba - sucessful 4766 * pointer to @phba - successful
4150 * NULL - error 4767 * NULL - error
4151 **/ 4768 **/
4152static struct lpfc_hba * 4769static struct lpfc_hba *
@@ -4171,7 +4788,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4171 return NULL; 4788 return NULL;
4172 } 4789 }
4173 4790
4174 mutex_init(&phba->ct_event_mutex); 4791 spin_lock_init(&phba->ct_ev_lock);
4175 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4792 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4176 4793
4177 return phba; 4794 return phba;
@@ -4202,7 +4819,7 @@ lpfc_hba_free(struct lpfc_hba *phba)
4202 * host with it. 4819 * host with it.
4203 * 4820 *
4204 * Return codes 4821 * Return codes
4205 * 0 - sucessful 4822 * 0 - successful
4206 * other values - error 4823 * other values - error
4207 **/ 4824 **/
4208static int 4825static int
@@ -4273,7 +4890,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4273 _dump_buf_data = 4890 _dump_buf_data =
4274 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4891 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4275 if (_dump_buf_data) { 4892 if (_dump_buf_data) {
4276 printk(KERN_ERR "BLKGRD allocated %d pages for " 4893 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4894 "9043 BLKGRD: allocated %d pages for "
4277 "_dump_buf_data at 0x%p\n", 4895 "_dump_buf_data at 0x%p\n",
4278 (1 << pagecnt), _dump_buf_data); 4896 (1 << pagecnt), _dump_buf_data);
4279 _dump_buf_data_order = pagecnt; 4897 _dump_buf_data_order = pagecnt;
@@ -4284,17 +4902,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4284 --pagecnt; 4902 --pagecnt;
4285 } 4903 }
4286 if (!_dump_buf_data_order) 4904 if (!_dump_buf_data_order)
4287 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4905 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4906 "9044 BLKGRD: ERROR unable to allocate "
4288 "memory for hexdump\n"); 4907 "memory for hexdump\n");
4289 } else 4908 } else
4290 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4909 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4910 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4291 "\n", _dump_buf_data); 4911 "\n", _dump_buf_data);
4292 if (!_dump_buf_dif) { 4912 if (!_dump_buf_dif) {
4293 while (pagecnt) { 4913 while (pagecnt) {
4294 _dump_buf_dif = 4914 _dump_buf_dif =
4295 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4915 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4296 if (_dump_buf_dif) { 4916 if (_dump_buf_dif) {
4297 printk(KERN_ERR "BLKGRD allocated %d pages for " 4917 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4918 "9046 BLKGRD: allocated %d pages for "
4298 "_dump_buf_dif at 0x%p\n", 4919 "_dump_buf_dif at 0x%p\n",
4299 (1 << pagecnt), _dump_buf_dif); 4920 (1 << pagecnt), _dump_buf_dif);
4300 _dump_buf_dif_order = pagecnt; 4921 _dump_buf_dif_order = pagecnt;
@@ -4305,10 +4926,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4305 --pagecnt; 4926 --pagecnt;
4306 } 4927 }
4307 if (!_dump_buf_dif_order) 4928 if (!_dump_buf_dif_order)
4308 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4929 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4930 "9047 BLKGRD: ERROR unable to allocate "
4309 "memory for hexdump\n"); 4931 "memory for hexdump\n");
4310 } else 4932 } else
4311 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4933 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4934 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4312 _dump_buf_dif); 4935 _dump_buf_dif);
4313} 4936}
4314 4937
@@ -4365,7 +4988,7 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
4365 * with SLI-3 interface spec. 4988 * with SLI-3 interface spec.
4366 * 4989 *
4367 * Return codes 4990 * Return codes
4368 * 0 - sucessful 4991 * 0 - successful
4369 * other values - error 4992 * other values - error
4370 **/ 4993 **/
4371static int 4994static int
@@ -4384,9 +5007,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4384 pdev = phba->pcidev; 5007 pdev = phba->pcidev;
4385 5008
4386 /* Set the device DMA mask size */ 5009 /* Set the device DMA mask size */
4387 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 5010 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4388 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5011 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5012 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5013 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4389 return error; 5014 return error;
5015 }
5016 }
4390 5017
4391 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5018 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4392 * required by each mapping. 5019 * required by each mapping.
@@ -4511,8 +5138,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4511int 5138int
4512lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5139lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4513{ 5140{
4514 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 5141 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
4515 uint32_t onlnreg0, onlnreg1;
4516 int i, port_error = -ENODEV; 5142 int i, port_error = -ENODEV;
4517 5143
4518 if (!phba->sli4_hba.STAregaddr) 5144 if (!phba->sli4_hba.STAregaddr)
@@ -4548,29 +5174,35 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4548 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5174 bf_get(lpfc_hst_state_port_status, &sta_reg));
4549 5175
4550 /* Log device information */ 5176 /* Log device information */
4551 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); 5177 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
4552 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5178 if (bf_get(lpfc_sli_intf_valid,
4553 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5179 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
4554 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5180 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4555 bf_get(lpfc_scratchpad_chiptype, &scratchpad), 5181 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4556 bf_get(lpfc_scratchpad_slirev, &scratchpad), 5182 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 5183 bf_get(lpfc_sli_intf_sli_family,
4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 5184 &phba->sli4_hba.sli_intf),
4559 5185 bf_get(lpfc_sli_intf_slirev,
5186 &phba->sli4_hba.sli_intf),
5187 bf_get(lpfc_sli_intf_featurelevel1,
5188 &phba->sli4_hba.sli_intf),
5189 bf_get(lpfc_sli_intf_featurelevel2,
5190 &phba->sli4_hba.sli_intf));
5191 }
5192 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5193 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
4560 /* With uncoverable error, log the error message and return error */ 5194 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 5195 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 5196 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 5197 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5198 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 5200 "1422 HBA Unrecoverable error: "
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5201 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4568 "1422 HBA Unrecoverable error: " 5202 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5203 uerrlo_reg.word0, uerrhi_reg.word0,
4570 "online0_reg=0x%x, online1_reg=0x%x\n", 5204 phba->sli4_hba.ue_mask_lo,
4571 uerrlo_reg.word0, uerrhi_reg.word0, 5205 phba->sli4_hba.ue_mask_hi);
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV; 5206 return -ENODEV;
4575 } 5207 }
4576 5208
@@ -4591,12 +5223,12 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4591 LPFC_UERR_STATUS_LO; 5223 LPFC_UERR_STATUS_LO;
4592 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5224 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4593 LPFC_UERR_STATUS_HI; 5225 LPFC_UERR_STATUS_HI;
4594 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 5226 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4595 LPFC_ONLINE0; 5227 LPFC_UE_MASK_LO;
4596 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 5228 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4597 LPFC_ONLINE1; 5229 LPFC_UE_MASK_HI;
4598 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 5230 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
4599 LPFC_SCRATCHPAD; 5231 LPFC_SLI_INTF;
4600} 5232}
4601 5233
4602/** 5234/**
@@ -4662,7 +5294,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4662 * this routine. 5294 * this routine.
4663 * 5295 *
4664 * Return codes 5296 * Return codes
4665 * 0 - sucessful 5297 * 0 - successful
4666 * ENOMEM - could not allocated memory. 5298 * ENOMEM - could not allocated memory.
4667 **/ 5299 **/
4668static int 5300static int
@@ -4761,7 +5393,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4761 * allocation for the port. 5393 * allocation for the port.
4762 * 5394 *
4763 * Return codes 5395 * Return codes
4764 * 0 - sucessful 5396 * 0 - successful
4765 * ENOMEM - No availble memory 5397 * ENOMEM - No availble memory
4766 * EIO - The mailbox failed to complete successfully. 5398 * EIO - The mailbox failed to complete successfully.
4767 **/ 5399 **/
@@ -4825,7 +5457,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4825 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5457 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4826 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5458 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4827 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5459 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4828 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 5460 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5461 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
4829 phba->max_vports = phba->max_vpi; 5462 phba->max_vports = phba->max_vpi;
4830 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4831 "2003 cfg params XRI(B:%d M:%d), " 5464 "2003 cfg params XRI(B:%d M:%d), "
@@ -4861,7 +5494,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4861 * HBA consistent with the SLI-4 interface spec. 5494 * HBA consistent with the SLI-4 interface spec.
4862 * 5495 *
4863 * Return codes 5496 * Return codes
4864 * 0 - sucessful 5497 * 0 - successful
4865 * ENOMEM - No availble memory 5498 * ENOMEM - No availble memory
4866 * EIO - The mailbox failed to complete successfully. 5499 * EIO - The mailbox failed to complete successfully.
4867 **/ 5500 **/
@@ -4910,7 +5543,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
4910 * we just use some constant number as place holder. 5543 * we just use some constant number as place holder.
4911 * 5544 *
4912 * Return codes 5545 * Return codes
4913 * 0 - sucessful 5546 * 0 - successful
4914 * ENOMEM - No availble memory 5547 * ENOMEM - No availble memory
4915 * EIO - The mailbox failed to complete successfully. 5548 * EIO - The mailbox failed to complete successfully.
4916 **/ 5549 **/
@@ -4979,10 +5612,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
4979 /* It does not make sense to have more EQs than WQs */ 5612 /* It does not make sense to have more EQs than WQs */
4980 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5613 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5614 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 "2593 The number of FCP EQs (%d) is more " 5615 "2593 The FCP EQ count(%d) cannot be greater "
4983 "than the number of FCP WQs (%d), take " 5616 "than the FCP WQ count(%d), limiting the "
4984 "the number of FCP EQs same as than of " 5617 "FCP EQ count to %d\n", cfg_fcp_eq_count,
4985 "WQs (%d)\n", cfg_fcp_eq_count,
4986 phba->cfg_fcp_wq_count, 5618 phba->cfg_fcp_wq_count,
4987 phba->cfg_fcp_wq_count); 5619 phba->cfg_fcp_wq_count);
4988 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5620 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5058,15 +5690,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5058 } 5690 }
5059 phba->sli4_hba.els_cq = qdesc; 5691 phba->sli4_hba.els_cq = qdesc;
5060 5692
5061 /* Create slow-path Unsolicited Receive Complete Queue */
5062 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 phba->sli4_hba.cq_ecount);
5064 if (!qdesc) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 "0502 Failed allocate slow-path USOL RX CQ\n");
5067 goto out_free_els_cq;
5068 }
5069 phba->sli4_hba.rxq_cq = qdesc;
5070 5693
5071 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5694 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5695 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5698,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2577 Failed allocate memory for fast-path " 5699 "2577 Failed allocate memory for fast-path "
5077 "CQ record array\n"); 5700 "CQ record array\n");
5078 goto out_free_rxq_cq; 5701 goto out_free_els_cq;
5079 } 5702 }
5080 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5703 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5704 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5811,6 @@ out_free_fcp_cq:
5188 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5811 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 } 5812 }
5190 kfree(phba->sli4_hba.fcp_cq); 5813 kfree(phba->sli4_hba.fcp_cq);
5191out_free_rxq_cq:
5192 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 phba->sli4_hba.rxq_cq = NULL;
5194out_free_els_cq: 5814out_free_els_cq:
5195 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5815 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 phba->sli4_hba.els_cq = NULL; 5816 phba->sli4_hba.els_cq = NULL;
@@ -5218,7 +5838,7 @@ out_error:
5218 * operation. 5838 * operation.
5219 * 5839 *
5220 * Return codes 5840 * Return codes
5221 * 0 - sucessful 5841 * 0 - successful
5222 * ENOMEM - No availble memory 5842 * ENOMEM - No availble memory
5223 * EIO - The mailbox failed to complete successfully. 5843 * EIO - The mailbox failed to complete successfully.
5224 **/ 5844 **/
@@ -5247,10 +5867,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5247 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5867 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 phba->sli4_hba.dat_rq = NULL; 5868 phba->sli4_hba.dat_rq = NULL;
5249 5869
5250 /* Release unsolicited receive complete queue */
5251 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 phba->sli4_hba.rxq_cq = NULL;
5253
5254 /* Release ELS complete queue */ 5870 /* Release ELS complete queue */
5255 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5871 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 phba->sli4_hba.els_cq = NULL; 5872 phba->sli4_hba.els_cq = NULL;
@@ -5286,7 +5902,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5286 * operation. 5902 * operation.
5287 * 5903 *
5288 * Return codes 5904 * Return codes
5289 * 0 - sucessful 5905 * 0 - successful
5290 * ENOMEM - No availble memory 5906 * ENOMEM - No availble memory
5291 * EIO - The mailbox failed to complete successfully. 5907 * EIO - The mailbox failed to complete successfully.
5292 **/ 5908 **/
@@ -5383,25 +5999,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5383 phba->sli4_hba.els_cq->queue_id, 5999 phba->sli4_hba.els_cq->queue_id,
5384 phba->sli4_hba.sp_eq->queue_id); 6000 phba->sli4_hba.sp_eq->queue_id);
5385 6001
5386 /* Set up slow-path Unsolicited Receive Complete Queue */
5387 if (!phba->sli4_hba.rxq_cq) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0532 USOL RX CQ not allocated\n");
5390 goto out_destroy_els_cq;
5391 }
5392 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 LPFC_RCQ, LPFC_USOL);
5394 if (rc) {
5395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 "0533 Failed setup of slow-path USOL RX CQ: "
5397 "rc = 0x%x\n", rc);
5398 goto out_destroy_els_cq;
5399 }
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 phba->sli4_hba.rxq_cq->queue_id,
5403 phba->sli4_hba.sp_eq->queue_id);
5404
5405 /* Set up fast-path FCP Response Complete Queue */ 6002 /* Set up fast-path FCP Response Complete Queue */
5406 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6003 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6004 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +6104,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5507 goto out_destroy_fcp_wq; 6104 goto out_destroy_fcp_wq;
5508 } 6105 }
5509 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6106 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 phba->sli4_hba.rxq_cq, LPFC_USOL); 6107 phba->sli4_hba.els_cq, LPFC_USOL);
5511 if (rc) { 6108 if (rc) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 "0541 Failed setup of Receive Queue: " 6110 "0541 Failed setup of Receive Queue: "
@@ -5519,7 +6116,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5519 "parent cq-id=%d\n", 6116 "parent cq-id=%d\n",
5520 phba->sli4_hba.hdr_rq->queue_id, 6117 phba->sli4_hba.hdr_rq->queue_id,
5521 phba->sli4_hba.dat_rq->queue_id, 6118 phba->sli4_hba.dat_rq->queue_id,
5522 phba->sli4_hba.rxq_cq->queue_id); 6119 phba->sli4_hba.els_cq->queue_id);
5523 return 0; 6120 return 0;
5524 6121
5525out_destroy_fcp_wq: 6122out_destroy_fcp_wq:
@@ -5531,8 +6128,6 @@ out_destroy_mbx_wq:
5531out_destroy_fcp_cq: 6128out_destroy_fcp_cq:
5532 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6129 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6130 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535out_destroy_els_cq:
5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6131 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537out_destroy_mbx_cq: 6132out_destroy_mbx_cq:
5538 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6133 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5552,7 +6147,7 @@ out_error:
5552 * operation. 6147 * operation.
5553 * 6148 *
5554 * Return codes 6149 * Return codes
5555 * 0 - sucessful 6150 * 0 - successful
5556 * ENOMEM - No availble memory 6151 * ENOMEM - No availble memory
5557 * EIO - The mailbox failed to complete successfully. 6152 * EIO - The mailbox failed to complete successfully.
5558 **/ 6153 **/
@@ -5574,8 +6169,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5574 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6169 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 /* Unset ELS complete queue */ 6170 /* Unset ELS complete queue */
5576 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6171 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 /* Unset unsolicited receive complete queue */
5578 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 /* Unset FCP response complete queue */ 6172 /* Unset FCP response complete queue */
5580 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6173 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6174 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -5599,7 +6192,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5599 * Later, this can be used for all the slow-path events. 6192 * Later, this can be used for all the slow-path events.
5600 * 6193 *
5601 * Return codes 6194 * Return codes
5602 * 0 - sucessful 6195 * 0 - successful
5603 * -ENOMEM - No availble memory 6196 * -ENOMEM - No availble memory
5604 **/ 6197 **/
5605static int 6198static int
@@ -5760,7 +6353,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5760 * all resources assigned to the PCI function which originates this request. 6353 * all resources assigned to the PCI function which originates this request.
5761 * 6354 *
5762 * Return codes 6355 * Return codes
5763 * 0 - sucessful 6356 * 0 - successful
5764 * ENOMEM - No availble memory 6357 * ENOMEM - No availble memory
5765 * EIO - The mailbox failed to complete successfully. 6358 * EIO - The mailbox failed to complete successfully.
5766 **/ 6359 **/
@@ -5910,7 +6503,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5910 spin_lock_irqsave(&phba->hbalock, flags); 6503 spin_lock_irqsave(&phba->hbalock, flags);
5911 /* Mark the FCFI is no longer registered */ 6504 /* Mark the FCFI is no longer registered */
5912 phba->fcf.fcf_flag &= 6505 phba->fcf.fcf_flag &=
5913 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); 6506 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
5914 spin_unlock_irqrestore(&phba->hbalock, flags); 6507 spin_unlock_irqrestore(&phba->hbalock, flags);
5915 } 6508 }
5916} 6509}
@@ -5923,7 +6516,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5923 * with SLI-4 interface spec. 6516 * with SLI-4 interface spec.
5924 * 6517 *
5925 * Return codes 6518 * Return codes
5926 * 0 - sucessful 6519 * 0 - successful
5927 * other values - error 6520 * other values - error
5928 **/ 6521 **/
5929static int 6522static int
@@ -5940,22 +6533,30 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5940 pdev = phba->pcidev; 6533 pdev = phba->pcidev;
5941 6534
5942 /* Set the device DMA mask size */ 6535 /* Set the device DMA mask size */
5943 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 6536 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5944 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6537 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6538 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6539 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5945 return error; 6540 return error;
6541 }
6542 }
5946 6543
5947 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6544 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5948 * number of bytes required by each mapping. They are actually 6545 * number of bytes required by each mapping. They are actually
5949 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. 6546 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
5950 */ 6547 */
5951 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); 6548 if (pci_resource_start(pdev, 0)) {
5952 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); 6549 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5953 6550 bar0map_len = pci_resource_len(pdev, 0);
5954 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); 6551 } else {
5955 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); 6552 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6553 bar0map_len = pci_resource_len(pdev, 1);
6554 }
6555 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6556 bar1map_len = pci_resource_len(pdev, 2);
5956 6557
5957 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); 6558 phba->pci_bar2_map = pci_resource_start(pdev, 4);
5958 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); 6559 bar2map_len = pci_resource_len(pdev, 4);
5959 6560
5960 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6561 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5961 phba->sli4_hba.conf_regs_memmap_p = 6562 phba->sli4_hba.conf_regs_memmap_p =
@@ -6052,7 +6653,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6052 * will be left with MSI-X enabled and leaks its vectors. 6653 * will be left with MSI-X enabled and leaks its vectors.
6053 * 6654 *
6054 * Return codes 6655 * Return codes
6055 * 0 - sucessful 6656 * 0 - successful
6056 * other values - error 6657 * other values - error
6057 **/ 6658 **/
6058static int 6659static int
@@ -6184,7 +6785,7 @@ lpfc_sli_disable_msix(struct lpfc_hba *phba)
6184 * is done in this function. 6785 * is done in this function.
6185 * 6786 *
6186 * Return codes 6787 * Return codes
6187 * 0 - sucessful 6788 * 0 - successful
6188 * other values - error 6789 * other values - error
6189 */ 6790 */
6190static int 6791static int
@@ -6243,7 +6844,7 @@ lpfc_sli_disable_msi(struct lpfc_hba *phba)
6243 * MSI-X -> MSI -> IRQ. 6844 * MSI-X -> MSI -> IRQ.
6244 * 6845 *
6245 * Return codes 6846 * Return codes
6246 * 0 - sucessful 6847 * 0 - successful
6247 * other values - error 6848 * other values - error
6248 **/ 6849 **/
6249static uint32_t 6850static uint32_t
@@ -6333,7 +6934,7 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
6333 * enabled and leaks its vectors. 6934 * enabled and leaks its vectors.
6334 * 6935 *
6335 * Return codes 6936 * Return codes
6336 * 0 - sucessful 6937 * 0 - successful
6337 * other values - error 6938 * other values - error
6338 **/ 6939 **/
6339static int 6940static int
@@ -6443,7 +7044,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
6443 * which is done in this function. 7044 * which is done in this function.
6444 * 7045 *
6445 * Return codes 7046 * Return codes
6446 * 0 - sucessful 7047 * 0 - successful
6447 * other values - error 7048 * other values - error
6448 **/ 7049 **/
6449static int 7050static int
@@ -6508,7 +7109,7 @@ lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6508 * MSI-X -> MSI -> IRQ. 7109 * MSI-X -> MSI -> IRQ.
6509 * 7110 *
6510 * Return codes 7111 * Return codes
6511 * 0 - sucessful 7112 * 0 - successful
6512 * other values - error 7113 * other values - error
6513 **/ 7114 **/
6514static uint32_t 7115static uint32_t
@@ -6700,6 +7301,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6700 phba->pport->work_port_events = 0; 7301 phba->pport->work_port_events = 0;
6701} 7302}
6702 7303
7304 /**
7305 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7306 * @phba: Pointer to HBA context object.
7307 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7308 *
7309 * This function is called in the SLI4 code path to read the port's
7310 * sli4 capabilities.
7311 *
7312 * This function may be be called from any context that can block-wait
7313 * for the completion. The expectation is that this routine is called
7314 * typically from probe_one or from the online routine.
7315 **/
7316int
7317lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7318{
7319 int rc;
7320 struct lpfc_mqe *mqe;
7321 struct lpfc_pc_sli4_params *sli4_params;
7322 uint32_t mbox_tmo;
7323
7324 rc = 0;
7325 mqe = &mboxq->u.mqe;
7326
7327 /* Read the port's SLI4 Parameters port capabilities */
7328 lpfc_sli4_params(mboxq);
7329 if (!phba->sli4_hba.intr_enable)
7330 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7331 else {
7332 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7333 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7334 }
7335
7336 if (unlikely(rc))
7337 return 1;
7338
7339 sli4_params = &phba->sli4_hba.pc_sli4_params;
7340 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7341 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7342 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7343 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7344 &mqe->un.sli4_params);
7345 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7346 &mqe->un.sli4_params);
7347 sli4_params->proto_types = mqe->un.sli4_params.word3;
7348 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7349 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7350 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7351 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7352 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7353 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7354 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7355 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7356 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7357 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7358 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7359 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7360 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7361 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7362 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7363 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7364 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7365 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7366 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7367 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7368 return rc;
7369}
7370
6703/** 7371/**
6704 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7372 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
6705 * @pdev: pointer to PCI device 7373 * @pdev: pointer to PCI device
@@ -6722,6 +7390,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6722{ 7390{
6723 struct lpfc_hba *phba; 7391 struct lpfc_hba *phba;
6724 struct lpfc_vport *vport = NULL; 7392 struct lpfc_vport *vport = NULL;
7393 struct Scsi_Host *shost = NULL;
6725 int error; 7394 int error;
6726 uint32_t cfg_mode, intr_mode; 7395 uint32_t cfg_mode, intr_mode;
6727 7396
@@ -6800,6 +7469,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6800 goto out_destroy_shost; 7469 goto out_destroy_shost;
6801 } 7470 }
6802 7471
7472 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
6803 /* Now, trying to enable interrupt and bring up the device */ 7473 /* Now, trying to enable interrupt and bring up the device */
6804 cfg_mode = phba->cfg_use_msi; 7474 cfg_mode = phba->cfg_use_msi;
6805 while (true) { 7475 while (true) {
@@ -6866,6 +7536,8 @@ out_unset_pci_mem_s3:
6866 lpfc_sli_pci_mem_unset(phba); 7536 lpfc_sli_pci_mem_unset(phba);
6867out_disable_pci_dev: 7537out_disable_pci_dev:
6868 lpfc_disable_pci_dev(phba); 7538 lpfc_disable_pci_dev(phba);
7539 if (shost)
7540 scsi_host_put(shost);
6869out_free_phba: 7541out_free_phba:
6870 lpfc_hba_free(phba); 7542 lpfc_hba_free(phba);
6871 return error; 7543 return error;
@@ -7036,6 +7708,13 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7036 /* Restore device state from PCI config space */ 7708 /* Restore device state from PCI config space */
7037 pci_set_power_state(pdev, PCI_D0); 7709 pci_set_power_state(pdev, PCI_D0);
7038 pci_restore_state(pdev); 7710 pci_restore_state(pdev);
7711
7712 /*
7713 * As the new kernel behavior of pci_restore_state() API call clears
7714 * device saved_state flag, need to save the restored state again.
7715 */
7716 pci_save_state(pdev);
7717
7039 if (pdev->is_busmaster) 7718 if (pdev->is_busmaster)
7040 pci_set_master(pdev); 7719 pci_set_master(pdev);
7041 7720
@@ -7070,6 +7749,73 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7070} 7749}
7071 7750
7072/** 7751/**
7752 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7753 * @phba: pointer to lpfc hba data structure.
7754 *
7755 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7756 * aborts and stops all the on-going I/Os on the pci device.
7757 **/
7758static void
7759lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7760{
7761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7762 "2723 PCI channel I/O abort preparing for recovery\n");
7763 /* Prepare for bringing HBA offline */
7764 lpfc_offline_prep(phba);
7765 /* Clear sli active flag to prevent sysfs access to HBA */
7766 spin_lock_irq(&phba->hbalock);
7767 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7768 spin_unlock_irq(&phba->hbalock);
7769 /* Stop and flush all I/Os and bring HBA offline */
7770 lpfc_offline(phba);
7771}
7772
7773/**
7774 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7775 * @phba: pointer to lpfc hba data structure.
7776 *
7777 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7778 * disables the device interrupt and pci device, and aborts the internal FCP
7779 * pending I/Os.
7780 **/
7781static void
7782lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7783{
7784 struct lpfc_sli *psli = &phba->sli;
7785 struct lpfc_sli_ring *pring;
7786
7787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7788 "2710 PCI channel disable preparing for reset\n");
7789 /* Disable interrupt and pci device */
7790 lpfc_sli_disable_intr(phba);
7791 pci_disable_device(phba->pcidev);
7792 /*
7793 * There may be I/Os dropped by the firmware.
7794 * Error iocb (I/O) on txcmplq and let the SCSI layer
7795 * retry it after re-establishing link.
7796 */
7797 pring = &psli->ring[psli->fcp_ring];
7798 lpfc_sli_abort_iocb_ring(phba, pring);
7799}
7800
7801/**
7802 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7803 * @phba: pointer to lpfc hba data structure.
7804 *
7805 * This routine is called to prepare the SLI3 device for PCI slot permanently
7806 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7807 * pending I/Os.
7808 **/
7809static void
7810lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7811{
7812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813 "2711 PCI channel permanent disable for failure\n");
7814 /* Clean up all driver's outstanding SCSI I/Os */
7815 lpfc_sli_flush_fcp_rings(phba);
7816}
7817
7818/**
7073 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7819 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7074 * @pdev: pointer to PCI device. 7820 * @pdev: pointer to PCI device.
7075 * @state: the current PCI connection state. 7821 * @state: the current PCI connection state.
@@ -7083,6 +7829,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7083 * as desired. 7829 * as desired.
7084 * 7830 *
7085 * Return codes 7831 * Return codes
7832 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7086 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7833 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7087 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7834 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7088 **/ 7835 **/
@@ -7091,33 +7838,30 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7091{ 7838{
7092 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7839 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7093 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7840 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7094 struct lpfc_sli *psli = &phba->sli;
7095 struct lpfc_sli_ring *pring;
7096 7841
7097 if (state == pci_channel_io_perm_failure) { 7842 /* Block all SCSI devices' I/Os on the host */
7098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7843 lpfc_scsi_dev_block(phba);
7099 "0472 PCI channel I/O permanent failure\n"); 7844
7100 /* Block all SCSI devices' I/Os on the host */ 7845 switch (state) {
7101 lpfc_scsi_dev_block(phba); 7846 case pci_channel_io_normal:
7102 /* Clean up all driver's outstanding SCSI I/Os */ 7847 /* Non-fatal error, prepare for recovery */
7103 lpfc_sli_flush_fcp_rings(phba); 7848 lpfc_sli_prep_dev_for_recover(phba);
7849 return PCI_ERS_RESULT_CAN_RECOVER;
7850 case pci_channel_io_frozen:
7851 /* Fatal error, prepare for slot reset */
7852 lpfc_sli_prep_dev_for_reset(phba);
7853 return PCI_ERS_RESULT_NEED_RESET;
7854 case pci_channel_io_perm_failure:
7855 /* Permanent failure, prepare for device down */
7856 lpfc_prep_dev_for_perm_failure(phba);
7104 return PCI_ERS_RESULT_DISCONNECT; 7857 return PCI_ERS_RESULT_DISCONNECT;
7858 default:
7859 /* Unknown state, prepare and request slot reset */
7860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7861 "0472 Unknown PCI error state: x%x\n", state);
7862 lpfc_sli_prep_dev_for_reset(phba);
7863 return PCI_ERS_RESULT_NEED_RESET;
7105 } 7864 }
7106
7107 pci_disable_device(pdev);
7108 /*
7109 * There may be I/Os dropped by the firmware.
7110 * Error iocb (I/O) on txcmplq and let the SCSI layer
7111 * retry it after re-establishing link.
7112 */
7113 pring = &psli->ring[psli->fcp_ring];
7114 lpfc_sli_abort_iocb_ring(phba, pring);
7115
7116 /* Disable interrupt */
7117 lpfc_sli_disable_intr(phba);
7118
7119 /* Request a slot reset. */
7120 return PCI_ERS_RESULT_NEED_RESET;
7121} 7865}
7122 7866
7123/** 7867/**
@@ -7154,6 +7898,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7154 } 7898 }
7155 7899
7156 pci_restore_state(pdev); 7900 pci_restore_state(pdev);
7901
7902 /*
7903 * As the new kernel behavior of pci_restore_state() API call clears
7904 * device saved_state flag, need to save the restored state again.
7905 */
7906 pci_save_state(pdev);
7907
7157 if (pdev->is_busmaster) 7908 if (pdev->is_busmaster)
7158 pci_set_master(pdev); 7909 pci_set_master(pdev);
7159 7910
@@ -7197,7 +7948,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7197 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7948 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7199 7950
7951 /* Bring the device online */
7200 lpfc_online(phba); 7952 lpfc_online(phba);
7953
7954 /* Clean up Advanced Error Reporting (AER) if needed */
7955 if (phba->hba_flag & HBA_AER_ENABLED)
7956 pci_cleanup_aer_uncorrect_error_status(pdev);
7201} 7957}
7202 7958
7203/** 7959/**
@@ -7213,15 +7969,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7213 7969
7214 if (phba->sli_rev == LPFC_SLI_REV4) { 7970 if (phba->sli_rev == LPFC_SLI_REV4) {
7215 if (max_xri <= 100) 7971 if (max_xri <= 100)
7216 return 4; 7972 return 10;
7217 else if (max_xri <= 256) 7973 else if (max_xri <= 256)
7218 return 8; 7974 return 25;
7219 else if (max_xri <= 512) 7975 else if (max_xri <= 512)
7220 return 16; 7976 return 50;
7221 else if (max_xri <= 1024) 7977 else if (max_xri <= 1024)
7222 return 32; 7978 return 100;
7223 else 7979 else
7224 return 48; 7980 return 150;
7225 } else 7981 } else
7226 return 0; 7982 return 0;
7227} 7983}
@@ -7249,6 +8005,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7249{ 8005{
7250 struct lpfc_hba *phba; 8006 struct lpfc_hba *phba;
7251 struct lpfc_vport *vport = NULL; 8007 struct lpfc_vport *vport = NULL;
8008 struct Scsi_Host *shost = NULL;
7252 int error; 8009 int error;
7253 uint32_t cfg_mode, intr_mode; 8010 uint32_t cfg_mode, intr_mode;
7254 int mcnt; 8011 int mcnt;
@@ -7329,6 +8086,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7329 goto out_destroy_shost; 8086 goto out_destroy_shost;
7330 } 8087 }
7331 8088
8089 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7332 /* Now, trying to enable interrupt and bring up the device */ 8090 /* Now, trying to enable interrupt and bring up the device */
7333 cfg_mode = phba->cfg_use_msi; 8091 cfg_mode = phba->cfg_use_msi;
7334 while (true) { 8092 while (true) {
@@ -7342,6 +8100,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7342 error = -ENODEV; 8100 error = -ENODEV;
7343 goto out_free_sysfs_attr; 8101 goto out_free_sysfs_attr;
7344 } 8102 }
8103 /* Default to single FCP EQ for non-MSI-X */
8104 if (phba->intr_type != MSIX)
8105 phba->cfg_fcp_eq_count = 1;
7345 /* Set up SLI-4 HBA */ 8106 /* Set up SLI-4 HBA */
7346 if (lpfc_sli4_hba_setup(phba)) { 8107 if (lpfc_sli4_hba_setup(phba)) {
7347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7397,6 +8158,8 @@ out_unset_pci_mem_s4:
7397 lpfc_sli4_pci_mem_unset(phba); 8158 lpfc_sli4_pci_mem_unset(phba);
7398out_disable_pci_dev: 8159out_disable_pci_dev:
7399 lpfc_disable_pci_dev(phba); 8160 lpfc_disable_pci_dev(phba);
8161 if (shost)
8162 scsi_host_put(shost);
7400out_free_phba: 8163out_free_phba:
7401 lpfc_hba_free(phba); 8164 lpfc_hba_free(phba);
7402 return error; 8165 return error;
@@ -7551,6 +8314,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7551 /* Restore device state from PCI config space */ 8314 /* Restore device state from PCI config space */
7552 pci_set_power_state(pdev, PCI_D0); 8315 pci_set_power_state(pdev, PCI_D0);
7553 pci_restore_state(pdev); 8316 pci_restore_state(pdev);
8317
8318 /*
8319 * As the new kernel behavior of pci_restore_state() API call clears
8320 * device saved_state flag, need to save the restored state again.
8321 */
8322 pci_save_state(pdev);
8323
7554 if (pdev->is_busmaster) 8324 if (pdev->is_busmaster)
7555 pci_set_master(pdev); 8325 pci_set_master(pdev);
7556 8326
@@ -7670,11 +8440,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7670 int rc; 8440 int rc;
7671 struct lpfc_sli_intf intf; 8441 struct lpfc_sli_intf intf;
7672 8442
7673 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0)) 8443 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
7674 return -ENODEV; 8444 return -ENODEV;
7675 8445
7676 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8446 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7677 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4)) 8447 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
7678 rc = lpfc_pci_probe_one_s4(pdev, pid); 8448 rc = lpfc_pci_probe_one_s4(pdev, pid);
7679 else 8449 else
7680 rc = lpfc_pci_probe_one_s3(pdev, pid); 8450 rc = lpfc_pci_probe_one_s3(pdev, pid);
@@ -7971,6 +8741,10 @@ static struct pci_device_id lpfc_id_table[] = {
7971 PCI_ANY_ID, PCI_ANY_ID, }, 8741 PCI_ANY_ID, PCI_ANY_ID, },
7972 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8742 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7973 PCI_ANY_ID, PCI_ANY_ID, }, 8743 PCI_ANY_ID, PCI_ANY_ID, },
8744 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8745 PCI_ANY_ID, PCI_ANY_ID, },
8746 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8747 PCI_ANY_ID, PCI_ANY_ID, },
7974 { 0 } 8748 { 0 }
7975}; 8749};
7976 8750
@@ -8053,15 +8827,15 @@ lpfc_exit(void)
8053 if (lpfc_enable_npiv) 8827 if (lpfc_enable_npiv)
8054 fc_release_transport(lpfc_vport_transport_template); 8828 fc_release_transport(lpfc_vport_transport_template);
8055 if (_dump_buf_data) { 8829 if (_dump_buf_data) {
8056 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8830 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
8057 "at 0x%p\n", 8831 "_dump_buf_data at 0x%p\n",
8058 (1L << _dump_buf_data_order), _dump_buf_data); 8832 (1L << _dump_buf_data_order), _dump_buf_data);
8059 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8833 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8060 } 8834 }
8061 8835
8062 if (_dump_buf_dif) { 8836 if (_dump_buf_dif) {
8063 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8837 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
8064 "at 0x%p\n", 8838 "_dump_buf_dif at 0x%p\n",
8065 (1L << _dump_buf_dif_order), _dump_buf_dif); 8839 (1L << _dump_buf_dif_order), _dump_buf_dif);
8066 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8840 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8067 } 8841 }