aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h434
-rw-r--r--drivers/net/qlge/qlge_dbg.c1171
-rw-r--r--drivers/net/qlge/qlge_main.c353
-rw-r--r--drivers/net/qlge/qlge_mpi.c165
4 files changed, 2103 insertions, 20 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf3860..9169c4cf413a 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -54,12 +54,8 @@
54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ 54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ 55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 512
58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_MAX_SIZE 8192 57#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048 58#define LARGE_BUFFER_MIN_SIZE 2048
61#define MAX_SPLIT_SIZE 1023
62#define QLGE_SB_PAD 32
63 59
64#define MAX_CQ 128 60#define MAX_CQ 128
65#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ 61#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +75,43 @@
79#define TX_DESC_PER_OAL 0 75#define TX_DESC_PER_OAL 0
80#endif 76#endif
81 77
78/* Word shifting for converting 64-bit
79 * address to a series of 16-bit words.
80 * This is used for some MPI firmware
81 * mailbox commands.
82 */
83#define LSW(x) ((u16)(x))
84#define MSW(x) ((u16)((u32)(x) >> 16))
85#define LSD(x) ((u32)((u64)(x)))
86#define MSD(x) ((u32)((((u64)(x)) >> 32)))
87
82/* MPI test register definitions. This register 88/* MPI test register definitions. This register
83 * is used for determining alternate NIC function's 89 * is used for determining alternate NIC function's
84 * PCI->func number. 90 * PCI->func number.
85 */ 91 */
86enum { 92enum {
87 MPI_TEST_FUNC_PORT_CFG = 0x1002, 93 MPI_TEST_FUNC_PORT_CFG = 0x1002,
94 MPI_TEST_FUNC_PRB_CTL = 0x100e,
95 MPI_TEST_FUNC_PRB_EN = 0x18a20000,
96 MPI_TEST_FUNC_RST_STS = 0x100a,
97 MPI_TEST_FUNC_RST_FRC = 0x00000003,
98 MPI_TEST_NIC_FUNC_MASK = 0x00000007,
99 MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
100 MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
88 MPI_TEST_NIC1_FUNC_SHIFT = 1, 101 MPI_TEST_NIC1_FUNC_SHIFT = 1,
102 MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
103 MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
89 MPI_TEST_NIC2_FUNC_SHIFT = 5, 104 MPI_TEST_NIC2_FUNC_SHIFT = 5,
90 MPI_TEST_NIC_FUNC_MASK = 0x00000007, 105 MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
106 MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
107 MPI_TEST_FC1_FUNCTION_SHIFT = 9,
108 MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
109 MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
110 MPI_TEST_FC2_FUNCTION_SHIFT = 13,
111
112 MPI_NIC_READ = 0x00000000,
113 MPI_NIC_REG_BLOCK = 0x00020000,
114 MPI_NIC_FUNCTION_SHIFT = 6,
91}; 115};
92 116
93/* 117/*
@@ -468,7 +492,7 @@ enum {
468 MDIO_PORT = 0x00000440, 492 MDIO_PORT = 0x00000440,
469 MDIO_STATUS = 0x00000450, 493 MDIO_STATUS = 0x00000450,
470 494
471 /* XGMAC AUX statistics registers */ 495 XGMAC_REGISTER_END = 0x00000740,
472}; 496};
473 497
474/* 498/*
@@ -509,6 +533,7 @@ enum {
509enum { 533enum {
510 MAC_ADDR_IDX_SHIFT = 4, 534 MAC_ADDR_IDX_SHIFT = 4,
511 MAC_ADDR_TYPE_SHIFT = 16, 535 MAC_ADDR_TYPE_SHIFT = 16,
536 MAC_ADDR_TYPE_COUNT = 10,
512 MAC_ADDR_TYPE_MASK = 0x000f0000, 537 MAC_ADDR_TYPE_MASK = 0x000f0000,
513 MAC_ADDR_TYPE_CAM_MAC = 0x00000000, 538 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
514 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, 539 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +551,30 @@ enum {
526 MAC_ADDR_MR = (1 << 30), 551 MAC_ADDR_MR = (1 << 30),
527 MAC_ADDR_MW = (1 << 31), 552 MAC_ADDR_MW = (1 << 31),
528 MAX_MULTICAST_ENTRIES = 32, 553 MAX_MULTICAST_ENTRIES = 32,
554
555 /* Entry count and words per entry
556 * for each address type in the filter.
557 */
558 MAC_ADDR_MAX_CAM_ENTRIES = 512,
559 MAC_ADDR_MAX_CAM_WCOUNT = 3,
560 MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
561 MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
562 MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
563 MAC_ADDR_MAX_VLAN_WCOUNT = 1,
564 MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
565 MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
566 MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
567 MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
568 MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
569 MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
570 MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
571 MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
572 MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
573 MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
574 MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
575 MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
576 MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
577 MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
529}; 578};
530 579
531/* 580/*
@@ -596,6 +645,7 @@ enum {
596enum { 645enum {
597 RT_IDX_IDX_SHIFT = 8, 646 RT_IDX_IDX_SHIFT = 8,
598 RT_IDX_TYPE_MASK = 0x000f0000, 647 RT_IDX_TYPE_MASK = 0x000f0000,
648 RT_IDX_TYPE_SHIFT = 16,
599 RT_IDX_TYPE_RT = 0x00000000, 649 RT_IDX_TYPE_RT = 0x00000000,
600 RT_IDX_TYPE_RT_INV = 0x00010000, 650 RT_IDX_TYPE_RT_INV = 0x00010000,
601 RT_IDX_TYPE_NICQ = 0x00020000, 651 RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +714,89 @@ enum {
664 RT_IDX_UNUSED013 = 13, 714 RT_IDX_UNUSED013 = 13,
665 RT_IDX_UNUSED014 = 14, 715 RT_IDX_UNUSED014 = 14,
666 RT_IDX_PROMISCUOUS_SLOT = 15, 716 RT_IDX_PROMISCUOUS_SLOT = 15,
667 RT_IDX_MAX_SLOTS = 16, 717 RT_IDX_MAX_RT_SLOTS = 8,
718 RT_IDX_MAX_NIC_SLOTS = 16,
719};
720
721/*
722 * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
723 */
724enum {
725 XG_SERDES_ADDR_RDY = (1 << 31),
726 XG_SERDES_ADDR_R = (1 << 30),
727
728 XG_SERDES_ADDR_STS = 0x00001E06,
729 XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
730 XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
731 XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
732
733 /* Serdes coredump definitions. */
734 XG_SERDES_XAUI_AN_START = 0x00000000,
735 XG_SERDES_XAUI_AN_END = 0x00000034,
736 XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
737 XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
738 XG_SERDES_XFI_AN_START = 0x00001000,
739 XG_SERDES_XFI_AN_END = 0x00001034,
740 XG_SERDES_XFI_TRAIN_START = 0x10001050,
741 XG_SERDES_XFI_TRAIN_END = 0x1000107C,
742 XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
743 XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
744 XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
745 XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
746 XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
747 XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
748 XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
749 XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
750};
751
752/*
753 * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
754 */
755enum {
756 PRB_MX_ADDR_ARE = (1 << 16),
757 PRB_MX_ADDR_UP = (1 << 15),
758 PRB_MX_ADDR_SWP = (1 << 14),
759
760 /* Module select values. */
761 PRB_MX_ADDR_MAX_MODS = 21,
762 PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
763 PRB_MX_ADDR_MOD_SEL_TBD = 0,
764 PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
765 PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
766 PRB_MX_ADDR_MOD_SEL_FRB = 3,
767 PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
768 PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
769 PRB_MX_ADDR_MOD_SEL_DA1 = 6,
770 PRB_MX_ADDR_MOD_SEL_DA2 = 7,
771 PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
772 PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
773 PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
774 PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
775 PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
776 PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
777 PRB_MX_ADDR_MOD_SEL_REG = 14,
778 PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
779 PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
780 PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
781 PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
782 PRB_MX_ADDR_MOD_SEL_MOP = 20,
783 /* Bit fields indicating which modules
784 * are valid for each clock domain.
785 */
786 PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
787 PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
788 PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
789 PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
790 PRB_MX_ADDR_VALID_TOTAL = 34,
791
792 /* Clock domain values. */
793 PRB_MX_ADDR_CLOCK_SHIFT = 6,
794 PRB_MX_ADDR_SYS_CLOCK = 0,
795 PRB_MX_ADDR_PCI_CLOCK = 2,
796 PRB_MX_ADDR_FC_CLOCK = 5,
797 PRB_MX_ADDR_XGM_CLOCK = 6,
798
799 PRB_MX_ADDR_MAX_MUX = 64,
668}; 800};
669 801
670/* 802/*
@@ -737,6 +869,21 @@ enum {
737 PRB_MX_DATA = 0xfc, /* Use semaphore */ 869 PRB_MX_DATA = 0xfc, /* Use semaphore */
738}; 870};
739 871
872#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
873#define SMALL_BUFFER_SIZE 256
874#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
875#define SPLT_SETTING FSC_DBRST_1024
876#define SPLT_LEN 0
877#define QLGE_SB_PAD 0
878#else
879#define SMALL_BUFFER_SIZE 512
880#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
881#define SPLT_SETTING FSC_SH
882#define SPLT_LEN (SPLT_HDR_EP | \
883 min(SMALL_BUF_MAP_SIZE, 1023))
884#define QLGE_SB_PAD 32
885#endif
886
740/* 887/*
741 * CAM output format. 888 * CAM output format.
742 */ 889 */
@@ -1421,7 +1568,7 @@ struct nic_stats {
1421 u64 rx_nic_fifo_drop; 1568 u64 rx_nic_fifo_drop;
1422}; 1569};
1423 1570
1424/* Address/Length pairs for the coredump. */ 1571/* Firmware coredump internal register address/length pairs. */
1425enum { 1572enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000, 1573 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127, 1574 MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1623,7 @@ struct mpi_coredump_segment_header {
1476 u8 description[16]; 1623 u8 description[16];
1477}; 1624};
1478 1625
1479/* Reg dump segment numbers. */ 1626/* Firmware coredump header segment numbers. */
1480enum { 1627enum {
1481 CORE_SEG_NUM = 1, 1628 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2, 1629 TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1674,67 @@ enum {
1527 1674
1528}; 1675};
1529 1676
1677/* There are 64 generic NIC registers. */
1678#define NIC_REGS_DUMP_WORD_COUNT 64
1679/* XGMAC word count. */
1680#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
1681/* Word counts for the SERDES blocks. */
1682#define XG_SERDES_XAUI_AN_COUNT 14
1683#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
1684#define XG_SERDES_XFI_AN_COUNT 14
1685#define XG_SERDES_XFI_TRAIN_COUNT 12
1686#define XG_SERDES_XFI_HSS_PCS_COUNT 15
1687#define XG_SERDES_XFI_HSS_TX_COUNT 32
1688#define XG_SERDES_XFI_HSS_RX_COUNT 32
1689#define XG_SERDES_XFI_HSS_PLL_COUNT 32
1690
1691/* There are 2 CNA ETS and 8 NIC ETS registers. */
1692#define ETS_REGS_DUMP_WORD_COUNT 10
1693
1694/* Each probe mux entry stores the probe type plus 64 entries
1695 * that are each each 64-bits in length. There are a total of
1696 * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
1697 */
1698#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
1699#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
1700 PRB_MX_ADDR_VALID_TOTAL)
1701/* Each routing entry consists of 4 32-bit words.
1702 * They are route type, index, index word, and result.
1703 * There are 2 route blocks with 8 entries each and
1704 * 2 NIC blocks with 16 entries each.
1705 * The totol entries is 48 with 4 words each.
1706 */
1707#define RT_IDX_DUMP_ENTRIES 48
1708#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
1709#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
1710 RT_IDX_DUMP_WORDS_PER_ENTRY)
1711/* There are 10 address blocks in filter, each with
1712 * different entry counts and different word-count-per-entry.
1713 */
1714#define MAC_ADDR_DUMP_ENTRIES \
1715 ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
1716 (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
1717 (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
1718 (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
1719 (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
1720 (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
1721 (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
1722 (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
1723 (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
1724 (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
1725#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
1726#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
1727 MAC_ADDR_DUMP_WORDS_PER_ENTRY)
1728/* Maximum of 4 functions whose semaphore registeres are
1729 * in the coredump.
1730 */
1731#define MAX_SEMAPHORE_FUNCTIONS 4
1732/* Defines for access the MPI shadow registers. */
1733#define RISC_124 0x0003007c
1734#define RISC_127 0x0003007f
1735#define SHADOW_OFFSET 0xb0000000
1736#define SHADOW_REG_SHIFT 20
1737
1530struct ql_nic_misc { 1738struct ql_nic_misc {
1531 u32 rx_ring_count; 1739 u32 rx_ring_count;
1532 u32 tx_ring_count; 1740 u32 tx_ring_count;
@@ -1568,6 +1776,199 @@ struct ql_reg_dump {
1568 u32 ets[8+2]; 1776 u32 ets[8+2];
1569}; 1777};
1570 1778
1779struct ql_mpi_coredump {
1780 /* segment 0 */
1781 struct mpi_coredump_global_header mpi_global_header;
1782
1783 /* segment 1 */
1784 struct mpi_coredump_segment_header core_regs_seg_hdr;
1785 u32 mpi_core_regs[MPI_CORE_REGS_CNT];
1786 u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
1787
1788 /* segment 2 */
1789 struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
1790 u32 test_logic_regs[TEST_REGS_CNT];
1791
1792 /* segment 3 */
1793 struct mpi_coredump_segment_header rmii_regs_seg_hdr;
1794 u32 rmii_regs[RMII_REGS_CNT];
1795
1796 /* segment 4 */
1797 struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
1798 u32 fcmac1_regs[FCMAC_REGS_CNT];
1799
1800 /* segment 5 */
1801 struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
1802 u32 fcmac2_regs[FCMAC_REGS_CNT];
1803
1804 /* segment 6 */
1805 struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
1806 u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
1807
1808 /* segment 7 */
1809 struct mpi_coredump_segment_header ide_regs_seg_hdr;
1810 u32 ide_regs[IDE_REGS_CNT];
1811
1812 /* segment 8 */
1813 struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
1814 u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
1815
1816 /* segment 9 */
1817 struct mpi_coredump_segment_header smbus_regs_seg_hdr;
1818 u32 smbus_regs[SMBUS_REGS_CNT];
1819
1820 /* segment 10 */
1821 struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
1822 u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
1823
1824 /* segment 11 */
1825 struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
1826 u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
1827
1828 /* segment 12 */
1829 struct mpi_coredump_segment_header i2c_regs_seg_hdr;
1830 u32 i2c_regs[I2C_REGS_CNT];
1831 /* segment 13 */
1832 struct mpi_coredump_segment_header memc_regs_seg_hdr;
1833 u32 memc_regs[MEMC_REGS_CNT];
1834
1835 /* segment 14 */
1836 struct mpi_coredump_segment_header pbus_regs_seg_hdr;
1837 u32 pbus_regs[PBUS_REGS_CNT];
1838
1839 /* segment 15 */
1840 struct mpi_coredump_segment_header mde_regs_seg_hdr;
1841 u32 mde_regs[MDE_REGS_CNT];
1842
1843 /* segment 16 */
1844 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1845 u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
1846
1847 /* segment 17 */
1848 struct mpi_coredump_segment_header nic2_regs_seg_hdr;
1849 u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
1850
1851 /* segment 18 */
1852 struct mpi_coredump_segment_header xgmac1_seg_hdr;
1853 u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
1854
1855 /* segment 19 */
1856 struct mpi_coredump_segment_header xgmac2_seg_hdr;
1857 u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
1858
1859 /* segment 20 */
1860 struct mpi_coredump_segment_header code_ram_seg_hdr;
1861 u32 code_ram[CODE_RAM_CNT];
1862
1863 /* segment 21 */
1864 struct mpi_coredump_segment_header memc_ram_seg_hdr;
1865 u32 memc_ram[MEMC_RAM_CNT];
1866
1867 /* segment 22 */
1868 struct mpi_coredump_segment_header xaui_an_hdr;
1869 u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1870
1871 /* segment 23 */
1872 struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
1873 u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1874
1875 /* segment 24 */
1876 struct mpi_coredump_segment_header xfi_an_hdr;
1877 u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
1878
1879 /* segment 25 */
1880 struct mpi_coredump_segment_header xfi_train_hdr;
1881 u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1882
1883 /* segment 26 */
1884 struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
1885 u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1886
1887 /* segment 27 */
1888 struct mpi_coredump_segment_header xfi_hss_tx_hdr;
1889 u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1890
1891 /* segment 28 */
1892 struct mpi_coredump_segment_header xfi_hss_rx_hdr;
1893 u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1894
1895 /* segment 29 */
1896 struct mpi_coredump_segment_header xfi_hss_pll_hdr;
1897 u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1898
1899 /* segment 30 */
1900 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1901 struct ql_nic_misc misc_nic_info;
1902
1903 /* segment 31 */
1904 /* one interrupt state for each CQ */
1905 struct mpi_coredump_segment_header intr_states_seg_hdr;
1906 u32 intr_states[MAX_RX_RINGS];
1907
1908 /* segment 32 */
1909 /* 3 cam words each for 16 unicast,
1910 * 2 cam words for each of 32 multicast.
1911 */
1912 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1913 u32 cam_entries[(16 * 3) + (32 * 3)];
1914
1915 /* segment 33 */
1916 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1917 u32 nic_routing_words[16];
1918 /* segment 34 */
1919 struct mpi_coredump_segment_header ets_seg_hdr;
1920 u32 ets[ETS_REGS_DUMP_WORD_COUNT];
1921
1922 /* segment 35 */
1923 struct mpi_coredump_segment_header probe_dump_seg_hdr;
1924 u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
1925
1926 /* segment 36 */
1927 struct mpi_coredump_segment_header routing_reg_seg_hdr;
1928 u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
1929
1930 /* segment 37 */
1931 struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
1932 u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
1933
1934 /* segment 38 */
1935 struct mpi_coredump_segment_header xaui2_an_hdr;
1936 u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
1937
1938 /* segment 39 */
1939 struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
1940 u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
1941
1942 /* segment 40 */
1943 struct mpi_coredump_segment_header xfi2_an_hdr;
1944 u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
1945
1946 /* segment 41 */
1947 struct mpi_coredump_segment_header xfi2_train_hdr;
1948 u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
1949
1950 /* segment 42 */
1951 struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
1952 u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
1953
1954 /* segment 43 */
1955 struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
1956 u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
1957
1958 /* segment 44 */
1959 struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
1960 u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
1961
1962 /* segment 45 */
1963 struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
1964 u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
1965
1966 /* segment 50 */
1967 /* semaphore register for all 5 functions */
1968 struct mpi_coredump_segment_header sem_regs_seg_hdr;
1969 u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
1970};
1971
1571/* 1972/*
1572 * intr_context structure is used during initialization 1973 * intr_context structure is used during initialization
1573 * to hook the interrupts. It is also used in a single 1974 * to hook the interrupts. It is also used in a single
@@ -1603,6 +2004,7 @@ enum {
1603 QL_CAM_RT_SET = 8, 2004 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9, 2005 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10, 2006 QL_LB_LINK_UP = 10,
2007 QL_FRC_COREDUMP = 11,
1606}; 2008};
1607 2009
1608/* link_status bit definitions */ 2010/* link_status bit definitions */
@@ -1724,6 +2126,8 @@ struct ql_adapter {
1724 u32 port_link_up; 2126 u32 port_link_up;
1725 u32 port_init; 2127 u32 port_init;
1726 u32 link_status; 2128 u32 link_status;
2129 struct ql_mpi_coredump *mpi_coredump;
2130 u32 core_is_dumped;
1727 u32 link_config; 2131 u32 link_config;
1728 u32 led_config; 2132 u32 led_config;
1729 u32 max_frame_size; 2133 u32 max_frame_size;
@@ -1736,6 +2140,7 @@ struct ql_adapter {
1736 struct delayed_work mpi_work; 2140 struct delayed_work mpi_work;
1737 struct delayed_work mpi_port_cfg_work; 2141 struct delayed_work mpi_port_cfg_work;
1738 struct delayed_work mpi_idc_work; 2142 struct delayed_work mpi_idc_work;
2143 struct delayed_work mpi_core_to_log;
1739 struct completion ide_completion; 2144 struct completion ide_completion;
1740 struct nic_operations *nic_ops; 2145 struct nic_operations *nic_ops;
1741 u16 device_id; 2146 u16 device_id;
@@ -1807,6 +2212,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1807void ql_queue_fw_error(struct ql_adapter *qdev); 2212void ql_queue_fw_error(struct ql_adapter *qdev);
1808void ql_mpi_work(struct work_struct *work); 2213void ql_mpi_work(struct work_struct *work);
1809void ql_mpi_reset_work(struct work_struct *work); 2214void ql_mpi_reset_work(struct work_struct *work);
2215void ql_mpi_core_to_log(struct work_struct *work);
1810int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 2216int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1811void ql_queue_asic_error(struct ql_adapter *qdev); 2217void ql_queue_asic_error(struct ql_adapter *qdev);
1812u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 2218u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2223,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
1817int ql_mb_get_fw_state(struct ql_adapter *qdev); 2223int ql_mb_get_fw_state(struct ql_adapter *qdev);
1818int ql_cam_route_initialize(struct ql_adapter *qdev); 2224int ql_cam_route_initialize(struct ql_adapter *qdev);
1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2225int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
2226int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2227int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2228int ql_pause_mpi_risc(struct ql_adapter *qdev);
2229int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2230int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2231 u32 ram_addr, int word_count);
2232int ql_core_dump(struct ql_adapter *qdev,
2233 struct ql_mpi_coredump *mpi_coredump);
2234int ql_mb_sys_err(struct ql_adapter *qdev);
1820int ql_mb_about_fw(struct ql_adapter *qdev); 2235int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev); 2236int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2237int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2248,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump); 2248 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2249netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2250void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2251int ql_own_firmware(struct ql_adapter *qdev);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2252int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1837 2253
1838#if 1 2254#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c4710761..57df835147eb 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3/* Read a NIC register from the alternate function. */
4static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
5 u32 reg)
6{
7 u32 register_to_read;
8 u32 reg_val;
9 unsigned int status = 0;
10
11 register_to_read = MPI_NIC_REG_BLOCK
12 | MPI_NIC_READ
13 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
14 | reg;
15 status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
16 if (status != 0)
17 return 0xffffffff;
18
19 return reg_val;
20}
21
22/* Write a NIC register from the alternate function. */
23static int ql_write_other_func_reg(struct ql_adapter *qdev,
24 u32 reg, u32 reg_val)
25{
26 u32 register_to_read;
27 int status = 0;
28
29 register_to_read = MPI_NIC_REG_BLOCK
30 | MPI_NIC_READ
31 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
32 | reg;
33 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
34
35 return status;
36}
37
38static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
39 u32 bit, u32 err_bit)
40{
41 u32 temp;
42 int count = 10;
43
44 while (count) {
45 temp = ql_read_other_func_reg(qdev, reg);
46
47 /* check for errors */
48 if (temp & err_bit)
49 return -1;
50 else if (temp & bit)
51 return 0;
52 mdelay(10);
53 count--;
54 }
55 return -1;
56}
57
58static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
59 u32 *data)
60{
61 int status;
62
63 /* wait for reg to come ready */
64 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
65 XG_SERDES_ADDR_RDY, 0);
66 if (status)
67 goto exit;
68
69 /* set up for reg read */
70 ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
71
72 /* wait for reg to come ready */
73 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
74 XG_SERDES_ADDR_RDY, 0);
75 if (status)
76 goto exit;
77
78 /* get the data */
79 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
80exit:
81 return status;
82}
83
84/* Read out the SERDES registers */
85static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
86{
87 int status;
88
89 /* wait for reg to come ready */
90 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
91 if (status)
92 goto exit;
93
94 /* set up for reg read */
95 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
96
97 /* wait for reg to come ready */
98 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
99 if (status)
100 goto exit;
101
102 /* get the data */
103 *data = ql_read32(qdev, XG_SERDES_DATA);
104exit:
105 return status;
106}
107
108static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
109 u32 *direct_ptr, u32 *indirect_ptr,
110 unsigned int direct_valid, unsigned int indirect_valid)
111{
112 unsigned int status;
113
114 status = 1;
115 if (direct_valid)
116 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
117 /* Dead fill any failures or invalids. */
118 if (status)
119 *direct_ptr = 0xDEADBEEF;
120
121 status = 1;
122 if (indirect_valid)
123 status = ql_read_other_func_serdes_reg(
124 qdev, addr, indirect_ptr);
125 /* Dead fill any failures or invalids. */
126 if (status)
127 *indirect_ptr = 0xDEADBEEF;
128}
129
130static int ql_get_serdes_regs(struct ql_adapter *qdev,
131 struct ql_mpi_coredump *mpi_coredump)
132{
133 int status;
134 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
135 unsigned int xaui_indirect_valid, i;
136 u32 *direct_ptr, temp;
137 u32 *indirect_ptr;
138
139 xfi_direct_valid = xfi_indirect_valid = 0;
140 xaui_direct_valid = xaui_indirect_valid = 1;
141
142 /* The XAUI needs to be read out per port */
143 if (qdev->func & 1) {
144 /* We are NIC 2 */
145 status = ql_read_other_func_serdes_reg(qdev,
146 XG_SERDES_XAUI_HSS_PCS_START, &temp);
147 if (status)
148 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
149 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
150 XG_SERDES_ADDR_XAUI_PWR_DOWN)
151 xaui_indirect_valid = 0;
152
153 status = ql_read_serdes_reg(qdev,
154 XG_SERDES_XAUI_HSS_PCS_START, &temp);
155 if (status)
156 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
157
158 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
159 XG_SERDES_ADDR_XAUI_PWR_DOWN)
160 xaui_direct_valid = 0;
161 } else {
162 /* We are NIC 1 */
163 status = ql_read_other_func_serdes_reg(qdev,
164 XG_SERDES_XAUI_HSS_PCS_START, &temp);
165 if (status)
166 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
167 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
168 XG_SERDES_ADDR_XAUI_PWR_DOWN)
169 xaui_indirect_valid = 0;
170
171 status = ql_read_serdes_reg(qdev,
172 XG_SERDES_XAUI_HSS_PCS_START, &temp);
173 if (status)
174 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
175 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
176 XG_SERDES_ADDR_XAUI_PWR_DOWN)
177 xaui_direct_valid = 0;
178 }
179
180 /*
181 * XFI register is shared so only need to read one
182 * functions and then check the bits.
183 */
184 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
185 if (status)
186 temp = 0;
187
188 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
189 XG_SERDES_ADDR_XFI1_PWR_UP) {
190 /* now see if i'm NIC 1 or NIC 2 */
191 if (qdev->func & 1)
192 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
193 xfi_indirect_valid = 1;
194 else
195 xfi_direct_valid = 1;
196 }
197 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
198 XG_SERDES_ADDR_XFI2_PWR_UP) {
199 /* now see if i'm NIC 1 or NIC 2 */
200 if (qdev->func & 1)
201 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
202 xfi_direct_valid = 1;
203 else
204 xfi_indirect_valid = 1;
205 }
206
207 /* Get XAUI_AN register block. */
208 if (qdev->func & 1) {
209 /* Function 2 is direct */
210 direct_ptr = mpi_coredump->serdes2_xaui_an;
211 indirect_ptr = mpi_coredump->serdes_xaui_an;
212 } else {
213 /* Function 1 is direct */
214 direct_ptr = mpi_coredump->serdes_xaui_an;
215 indirect_ptr = mpi_coredump->serdes2_xaui_an;
216 }
217
218 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
219 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
220 xaui_direct_valid, xaui_indirect_valid);
221
222 /* Get XAUI_HSS_PCS register block. */
223 if (qdev->func & 1) {
224 direct_ptr =
225 mpi_coredump->serdes2_xaui_hss_pcs;
226 indirect_ptr =
227 mpi_coredump->serdes_xaui_hss_pcs;
228 } else {
229 direct_ptr =
230 mpi_coredump->serdes_xaui_hss_pcs;
231 indirect_ptr =
232 mpi_coredump->serdes2_xaui_hss_pcs;
233 }
234
235 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
236 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
237 xaui_direct_valid, xaui_indirect_valid);
238
239 /* Get XAUI_XFI_AN register block. */
240 if (qdev->func & 1) {
241 direct_ptr = mpi_coredump->serdes2_xfi_an;
242 indirect_ptr = mpi_coredump->serdes_xfi_an;
243 } else {
244 direct_ptr = mpi_coredump->serdes_xfi_an;
245 indirect_ptr = mpi_coredump->serdes2_xfi_an;
246 }
247
248 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
249 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
250 xfi_direct_valid, xfi_indirect_valid);
251
252 /* Get XAUI_XFI_TRAIN register block. */
253 if (qdev->func & 1) {
254 direct_ptr = mpi_coredump->serdes2_xfi_train;
255 indirect_ptr =
256 mpi_coredump->serdes_xfi_train;
257 } else {
258 direct_ptr = mpi_coredump->serdes_xfi_train;
259 indirect_ptr =
260 mpi_coredump->serdes2_xfi_train;
261 }
262
263 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
264 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
265 xfi_direct_valid, xfi_indirect_valid);
266
267 /* Get XAUI_XFI_HSS_PCS register block. */
268 if (qdev->func & 1) {
269 direct_ptr =
270 mpi_coredump->serdes2_xfi_hss_pcs;
271 indirect_ptr =
272 mpi_coredump->serdes_xfi_hss_pcs;
273 } else {
274 direct_ptr =
275 mpi_coredump->serdes_xfi_hss_pcs;
276 indirect_ptr =
277 mpi_coredump->serdes2_xfi_hss_pcs;
278 }
279
280 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
281 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
282 xfi_direct_valid, xfi_indirect_valid);
283
284 /* Get XAUI_XFI_HSS_TX register block. */
285 if (qdev->func & 1) {
286 direct_ptr =
287 mpi_coredump->serdes2_xfi_hss_tx;
288 indirect_ptr =
289 mpi_coredump->serdes_xfi_hss_tx;
290 } else {
291 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
292 indirect_ptr =
293 mpi_coredump->serdes2_xfi_hss_tx;
294 }
295 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
296 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
297 xfi_direct_valid, xfi_indirect_valid);
298
299 /* Get XAUI_XFI_HSS_RX register block. */
300 if (qdev->func & 1) {
301 direct_ptr =
302 mpi_coredump->serdes2_xfi_hss_rx;
303 indirect_ptr =
304 mpi_coredump->serdes_xfi_hss_rx;
305 } else {
306 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
307 indirect_ptr =
308 mpi_coredump->serdes2_xfi_hss_rx;
309 }
310
311 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
312 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
313 xfi_direct_valid, xfi_indirect_valid);
314
315
316 /* Get XAUI_XFI_HSS_PLL register block. */
317 if (qdev->func & 1) {
318 direct_ptr =
319 mpi_coredump->serdes2_xfi_hss_pll;
320 indirect_ptr =
321 mpi_coredump->serdes_xfi_hss_pll;
322 } else {
323 direct_ptr =
324 mpi_coredump->serdes_xfi_hss_pll;
325 indirect_ptr =
326 mpi_coredump->serdes2_xfi_hss_pll;
327 }
328 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
329 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
330 xfi_direct_valid, xfi_indirect_valid);
331 return 0;
332}
333
334static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
335 u32 *data)
336{
337 int status = 0;
338
339 /* wait for reg to come ready */
340 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
341 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
342 if (status)
343 goto exit;
344
345 /* set up for reg read */
346 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
347
348 /* wait for reg to come ready */
349 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
350 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
351 if (status)
352 goto exit;
353
354 /* get the data */
355 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
356exit:
357 return status;
358}
359
360/* Read the 400 xgmac control/statistics registers
361 * skipping unused locations.
362 */
363static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
364 unsigned int other_function)
365{
366 int status = 0;
367 int i;
368
369 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
370 /* We're reading 400 xgmac registers, but we filter out
371 * serveral locations that are non-responsive to reads.
372 */
373 if ((i == 0x00000114) ||
374 (i == 0x00000118) ||
375 (i == 0x0000013c) ||
376 (i == 0x00000140) ||
377 (i > 0x00000150 && i < 0x000001fc) ||
378 (i > 0x00000278 && i < 0x000002a0) ||
379 (i > 0x000002c0 && i < 0x000002cf) ||
380 (i > 0x000002dc && i < 0x000002f0) ||
381 (i > 0x000003c8 && i < 0x00000400) ||
382 (i > 0x00000400 && i < 0x00000410) ||
383 (i > 0x00000410 && i < 0x00000420) ||
384 (i > 0x00000420 && i < 0x00000430) ||
385 (i > 0x00000430 && i < 0x00000440) ||
386 (i > 0x00000440 && i < 0x00000450) ||
387 (i > 0x00000450 && i < 0x00000500) ||
388 (i > 0x0000054c && i < 0x00000568) ||
389 (i > 0x000005c8 && i < 0x00000600)) {
390 if (other_function)
391 status =
392 ql_read_other_func_xgmac_reg(qdev, i, buf);
393 else
394 status = ql_read_xgmac_reg(qdev, i, buf);
395
396 if (status)
397 *buf = 0xdeadbeef;
398 break;
399 }
400 }
401 return status;
402}
3 403
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) 404static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{ 405{
@@ -91,6 +491,226 @@ err:
91 return status; 491 return status;
92} 492}
93 493
494/* Read the MPI Processor shadow registers */
495static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
496{
497 u32 i;
498 int status;
499
500 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
501 status = ql_write_mpi_reg(qdev, RISC_124,
502 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
503 if (status)
504 goto end;
505 status = ql_read_mpi_reg(qdev, RISC_127, buf);
506 if (status)
507 goto end;
508 }
509end:
510 return status;
511}
512
513/* Read the MPI Processor core registers */
514static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
515 u32 offset, u32 count)
516{
517 int i, status = 0;
518 for (i = 0; i < count; i++, buf++) {
519 status = ql_read_mpi_reg(qdev, offset + i, buf);
520 if (status)
521 return status;
522 }
523 return status;
524}
525
526/* Read the ASIC probe dump */
527static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
528 u32 valid, u32 *buf)
529{
530 u32 module, mux_sel, probe, lo_val, hi_val;
531
532 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
533 if (!((valid >> module) & 1))
534 continue;
535 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
536 probe = clock
537 | PRB_MX_ADDR_ARE
538 | mux_sel
539 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
540 ql_write32(qdev, PRB_MX_ADDR, probe);
541 lo_val = ql_read32(qdev, PRB_MX_DATA);
542 if (mux_sel == 0) {
543 *buf = probe;
544 buf++;
545 }
546 probe |= PRB_MX_ADDR_UP;
547 ql_write32(qdev, PRB_MX_ADDR, probe);
548 hi_val = ql_read32(qdev, PRB_MX_DATA);
549 *buf = lo_val;
550 buf++;
551 *buf = hi_val;
552 buf++;
553 }
554 }
555 return buf;
556}
557
558static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
559{
560 /* First we have to enable the probe mux */
561 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
562 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
563 PRB_MX_ADDR_VALID_SYS_MOD, buf);
564 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
565 PRB_MX_ADDR_VALID_PCI_MOD, buf);
566 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
567 PRB_MX_ADDR_VALID_XGM_MOD, buf);
568 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
569 PRB_MX_ADDR_VALID_FC_MOD, buf);
570 return 0;
571
572}
573
574/* Read out the routing index registers */
575static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
576{
577 int status;
578 u32 type, index, index_max;
579 u32 result_index;
580 u32 result_data;
581 u32 val;
582
583 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
584 if (status)
585 return status;
586
587 for (type = 0; type < 4; type++) {
588 if (type < 2)
589 index_max = 8;
590 else
591 index_max = 16;
592 for (index = 0; index < index_max; index++) {
593 val = RT_IDX_RS
594 | (type << RT_IDX_TYPE_SHIFT)
595 | (index << RT_IDX_IDX_SHIFT);
596 ql_write32(qdev, RT_IDX, val);
597 result_index = 0;
598 while ((result_index & RT_IDX_MR) == 0)
599 result_index = ql_read32(qdev, RT_IDX);
600 result_data = ql_read32(qdev, RT_DATA);
601 *buf = type;
602 buf++;
603 *buf = index;
604 buf++;
605 *buf = result_index;
606 buf++;
607 *buf = result_data;
608 buf++;
609 }
610 }
611 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
612 return status;
613}
614
615/* Read out the MAC protocol registers */
616static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
617{
618 u32 result_index, result_data;
619 u32 type;
620 u32 index;
621 u32 offset;
622 u32 val;
623 u32 initial_val = MAC_ADDR_RS;
624 u32 max_index;
625 u32 max_offset;
626
627 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
628 switch (type) {
629
630 case 0: /* CAM */
631 initial_val |= MAC_ADDR_ADR;
632 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
633 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
634 break;
635 case 1: /* Multicast MAC Address */
636 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
637 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
638 break;
639 case 2: /* VLAN filter mask */
640 case 3: /* MC filter mask */
641 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
642 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
643 break;
644 case 4: /* FC MAC addresses */
645 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
646 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
647 break;
648 case 5: /* Mgmt MAC addresses */
649 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
650 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
651 break;
652 case 6: /* Mgmt VLAN addresses */
653 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
654 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
655 break;
656 case 7: /* Mgmt IPv4 address */
657 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
658 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
659 break;
660 case 8: /* Mgmt IPv6 address */
661 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
662 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
663 break;
664 case 9: /* Mgmt TCP/UDP Dest port */
665 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
666 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
667 break;
668 default:
669 printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
670 max_index = 0;
671 max_offset = 0;
672 break;
673 }
674 for (index = 0; index < max_index; index++) {
675 for (offset = 0; offset < max_offset; offset++) {
676 val = initial_val
677 | (type << MAC_ADDR_TYPE_SHIFT)
678 | (index << MAC_ADDR_IDX_SHIFT)
679 | (offset);
680 ql_write32(qdev, MAC_ADDR_IDX, val);
681 result_index = 0;
682 while ((result_index & MAC_ADDR_MR) == 0) {
683 result_index = ql_read32(qdev,
684 MAC_ADDR_IDX);
685 }
686 result_data = ql_read32(qdev, MAC_ADDR_DATA);
687 *buf = result_index;
688 buf++;
689 *buf = result_data;
690 buf++;
691 }
692 }
693 }
694}
695
696static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
697{
698 u32 func_num, reg, reg_val;
699 int status;
700
701 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
702 reg = MPI_NIC_REG_BLOCK
703 | (func_num << MPI_NIC_FUNCTION_SHIFT)
704 | (SEM / 4);
705 status = ql_read_mpi_reg(qdev, reg, &reg_val);
706 *buf = reg_val;
707 /* if the read failed then dead fill the element. */
708 if (!status)
709 *buf = 0xdeadbeef;
710 buf++;
711 }
712}
713
94/* Create a coredump segment header */ 714/* Create a coredump segment header */
95static void ql_build_coredump_seg_header( 715static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr, 716 struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,527 @@ static void ql_build_coredump_seg_header(
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); 723 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104} 724}
105 725
726/*
727 * This function should be called when a coredump / probedump
728 * is to be extracted from the HBA. It is assumed there is a
729 * qdev structure that contains the base address of the register
730 * space for this function as well as a coredump structure that
731 * will contain the dump.
732 */
733int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
734{
735 int status;
736 int i;
737
738 if (!mpi_coredump) {
739 QPRINTK(qdev, DRV, ERR,
740 "No memory available.\n");
741 return -ENOMEM;
742 }
743
744 /* Try to get the spinlock, but dont worry if
745 * it isn't available. If the firmware died it
746 * might be holding the sem.
747 */
748 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
749
750 status = ql_pause_mpi_risc(qdev);
751 if (status) {
752 QPRINTK(qdev, DRV, ERR,
753 "Failed RISC pause. Status = 0x%.08x\n", status);
754 goto err;
755 }
756
757 /* Insert the global header */
758 memset(&(mpi_coredump->mpi_global_header), 0,
759 sizeof(struct mpi_coredump_global_header));
760 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
761 mpi_coredump->mpi_global_header.headerSize =
762 sizeof(struct mpi_coredump_global_header);
763 mpi_coredump->mpi_global_header.imageSize =
764 sizeof(struct ql_mpi_coredump);
765 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
766 sizeof(mpi_coredump->mpi_global_header.idString));
767
768 /* Get generic NIC reg dump */
769 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
770 NIC1_CONTROL_SEG_NUM,
771 sizeof(struct mpi_coredump_segment_header) +
772 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
773
774 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
775 NIC2_CONTROL_SEG_NUM,
776 sizeof(struct mpi_coredump_segment_header) +
777 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
778
779 /* Get XGMac registers. (Segment 18, Rev C. step 21) */
780 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
781 NIC1_XGMAC_SEG_NUM,
782 sizeof(struct mpi_coredump_segment_header) +
783 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
784
785 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
786 NIC2_XGMAC_SEG_NUM,
787 sizeof(struct mpi_coredump_segment_header) +
788 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
789
790 if (qdev->func & 1) {
791 /* Odd means our function is NIC 2 */
792 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
793 mpi_coredump->nic2_regs[i] =
794 ql_read32(qdev, i * sizeof(u32));
795
796 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
797 mpi_coredump->nic_regs[i] =
798 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
799
800 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
801 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
802 } else {
803 /* Even means our function is NIC 1 */
804 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
805 mpi_coredump->nic_regs[i] =
806 ql_read32(qdev, i * sizeof(u32));
807 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
808 mpi_coredump->nic2_regs[i] =
809 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
810
811 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
812 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
813 }
814
815 /* Rev C. Step 20a */
816 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
817 XAUI_AN_SEG_NUM,
818 sizeof(struct mpi_coredump_segment_header) +
819 sizeof(mpi_coredump->serdes_xaui_an),
820 "XAUI AN Registers");
821
822 /* Rev C. Step 20b */
823 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
824 XAUI_HSS_PCS_SEG_NUM,
825 sizeof(struct mpi_coredump_segment_header) +
826 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
827 "XAUI HSS PCS Registers");
828
829 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
830 sizeof(struct mpi_coredump_segment_header) +
831 sizeof(mpi_coredump->serdes_xfi_an),
832 "XFI AN Registers");
833
834 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
835 XFI_TRAIN_SEG_NUM,
836 sizeof(struct mpi_coredump_segment_header) +
837 sizeof(mpi_coredump->serdes_xfi_train),
838 "XFI TRAIN Registers");
839
840 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
841 XFI_HSS_PCS_SEG_NUM,
842 sizeof(struct mpi_coredump_segment_header) +
843 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
844 "XFI HSS PCS Registers");
845
846 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
847 XFI_HSS_TX_SEG_NUM,
848 sizeof(struct mpi_coredump_segment_header) +
849 sizeof(mpi_coredump->serdes_xfi_hss_tx),
850 "XFI HSS TX Registers");
851
852 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
853 XFI_HSS_RX_SEG_NUM,
854 sizeof(struct mpi_coredump_segment_header) +
855 sizeof(mpi_coredump->serdes_xfi_hss_rx),
856 "XFI HSS RX Registers");
857
858 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
859 XFI_HSS_PLL_SEG_NUM,
860 sizeof(struct mpi_coredump_segment_header) +
861 sizeof(mpi_coredump->serdes_xfi_hss_pll),
862 "XFI HSS PLL Registers");
863
864 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
865 XAUI2_AN_SEG_NUM,
866 sizeof(struct mpi_coredump_segment_header) +
867 sizeof(mpi_coredump->serdes2_xaui_an),
868 "XAUI2 AN Registers");
869
870 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
871 XAUI2_HSS_PCS_SEG_NUM,
872 sizeof(struct mpi_coredump_segment_header) +
873 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
874 "XAUI2 HSS PCS Registers");
875
876 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
877 XFI2_AN_SEG_NUM,
878 sizeof(struct mpi_coredump_segment_header) +
879 sizeof(mpi_coredump->serdes2_xfi_an),
880 "XFI2 AN Registers");
881
882 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
883 XFI2_TRAIN_SEG_NUM,
884 sizeof(struct mpi_coredump_segment_header) +
885 sizeof(mpi_coredump->serdes2_xfi_train),
886 "XFI2 TRAIN Registers");
887
888 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
889 XFI2_HSS_PCS_SEG_NUM,
890 sizeof(struct mpi_coredump_segment_header) +
891 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
892 "XFI2 HSS PCS Registers");
893
894 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
895 XFI2_HSS_TX_SEG_NUM,
896 sizeof(struct mpi_coredump_segment_header) +
897 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
898 "XFI2 HSS TX Registers");
899
900 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
901 XFI2_HSS_RX_SEG_NUM,
902 sizeof(struct mpi_coredump_segment_header) +
903 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
904 "XFI2 HSS RX Registers");
905
906 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
907 XFI2_HSS_PLL_SEG_NUM,
908 sizeof(struct mpi_coredump_segment_header) +
909 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
910 "XFI2 HSS PLL Registers");
911
912 status = ql_get_serdes_regs(qdev, mpi_coredump);
913 if (status) {
914 QPRINTK(qdev, DRV, ERR,
915 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
916 status);
917 goto err;
918 }
919
920 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
921 CORE_SEG_NUM,
922 sizeof(mpi_coredump->core_regs_seg_hdr) +
923 sizeof(mpi_coredump->mpi_core_regs) +
924 sizeof(mpi_coredump->mpi_core_sh_regs),
925 "Core Registers");
926
927 /* Get the MPI Core Registers */
928 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
929 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
930 if (status)
931 goto err;
932 /* Get the 16 MPI shadow registers */
933 status = ql_get_mpi_shadow_regs(qdev,
934 &mpi_coredump->mpi_core_sh_regs[0]);
935 if (status)
936 goto err;
937
938 /* Get the Test Logic Registers */
939 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
940 TEST_LOGIC_SEG_NUM,
941 sizeof(struct mpi_coredump_segment_header)
942 + sizeof(mpi_coredump->test_logic_regs),
943 "Test Logic Regs");
944 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
945 TEST_REGS_ADDR, TEST_REGS_CNT);
946 if (status)
947 goto err;
948
949 /* Get the RMII Registers */
950 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
951 RMII_SEG_NUM,
952 sizeof(struct mpi_coredump_segment_header)
953 + sizeof(mpi_coredump->rmii_regs),
954 "RMII Registers");
955 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
956 RMII_REGS_ADDR, RMII_REGS_CNT);
957 if (status)
958 goto err;
959
960 /* Get the FCMAC1 Registers */
961 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
962 FCMAC1_SEG_NUM,
963 sizeof(struct mpi_coredump_segment_header)
964 + sizeof(mpi_coredump->fcmac1_regs),
965 "FCMAC1 Registers");
966 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
967 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
968 if (status)
969 goto err;
970
971 /* Get the FCMAC2 Registers */
972
973 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
974 FCMAC2_SEG_NUM,
975 sizeof(struct mpi_coredump_segment_header)
976 + sizeof(mpi_coredump->fcmac2_regs),
977 "FCMAC2 Registers");
978
979 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
980 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
981 if (status)
982 goto err;
983
984 /* Get the FC1 MBX Registers */
985 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
986 FC1_MBOX_SEG_NUM,
987 sizeof(struct mpi_coredump_segment_header)
988 + sizeof(mpi_coredump->fc1_mbx_regs),
989 "FC1 MBox Regs");
990 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
991 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
992 if (status)
993 goto err;
994
995 /* Get the IDE Registers */
996 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
997 IDE_SEG_NUM,
998 sizeof(struct mpi_coredump_segment_header)
999 + sizeof(mpi_coredump->ide_regs),
1000 "IDE Registers");
1001 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
1002 IDE_REGS_ADDR, IDE_REGS_CNT);
1003 if (status)
1004 goto err;
1005
1006 /* Get the NIC1 MBX Registers */
1007 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
1008 NIC1_MBOX_SEG_NUM,
1009 sizeof(struct mpi_coredump_segment_header)
1010 + sizeof(mpi_coredump->nic1_mbx_regs),
1011 "NIC1 MBox Regs");
1012 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
1013 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1014 if (status)
1015 goto err;
1016
1017 /* Get the SMBus Registers */
1018 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1019 SMBUS_SEG_NUM,
1020 sizeof(struct mpi_coredump_segment_header)
1021 + sizeof(mpi_coredump->smbus_regs),
1022 "SMBus Registers");
1023 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1024 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1025 if (status)
1026 goto err;
1027
1028 /* Get the FC2 MBX Registers */
1029 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1030 FC2_MBOX_SEG_NUM,
1031 sizeof(struct mpi_coredump_segment_header)
1032 + sizeof(mpi_coredump->fc2_mbx_regs),
1033 "FC2 MBox Regs");
1034 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1035 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1036 if (status)
1037 goto err;
1038
1039 /* Get the NIC2 MBX Registers */
1040 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1041 NIC2_MBOX_SEG_NUM,
1042 sizeof(struct mpi_coredump_segment_header)
1043 + sizeof(mpi_coredump->nic2_mbx_regs),
1044 "NIC2 MBox Regs");
1045 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1046 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1047 if (status)
1048 goto err;
1049
1050 /* Get the I2C Registers */
1051 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1052 I2C_SEG_NUM,
1053 sizeof(struct mpi_coredump_segment_header)
1054 + sizeof(mpi_coredump->i2c_regs),
1055 "I2C Registers");
1056 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1057 I2C_REGS_ADDR, I2C_REGS_CNT);
1058 if (status)
1059 goto err;
1060
1061 /* Get the MEMC Registers */
1062 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1063 MEMC_SEG_NUM,
1064 sizeof(struct mpi_coredump_segment_header)
1065 + sizeof(mpi_coredump->memc_regs),
1066 "MEMC Registers");
1067 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1068 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1069 if (status)
1070 goto err;
1071
1072 /* Get the PBus Registers */
1073 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1074 PBUS_SEG_NUM,
1075 sizeof(struct mpi_coredump_segment_header)
1076 + sizeof(mpi_coredump->pbus_regs),
1077 "PBUS Registers");
1078 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1079 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1080 if (status)
1081 goto err;
1082
1083 /* Get the MDE Registers */
1084 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1085 MDE_SEG_NUM,
1086 sizeof(struct mpi_coredump_segment_header)
1087 + sizeof(mpi_coredump->mde_regs),
1088 "MDE Registers");
1089 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1090 MDE_REGS_ADDR, MDE_REGS_CNT);
1091 if (status)
1092 goto err;
1093
1094 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1095 MISC_NIC_INFO_SEG_NUM,
1096 sizeof(struct mpi_coredump_segment_header)
1097 + sizeof(mpi_coredump->misc_nic_info),
1098 "MISC NIC INFO");
1099 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1100 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1101 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1102 mpi_coredump->misc_nic_info.function = qdev->func;
1103
1104 /* Segment 31 */
1105 /* Get indexed register values. */
1106 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1107 INTR_STATES_SEG_NUM,
1108 sizeof(struct mpi_coredump_segment_header)
1109 + sizeof(mpi_coredump->intr_states),
1110 "INTR States");
1111 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1112
1113 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1114 CAM_ENTRIES_SEG_NUM,
1115 sizeof(struct mpi_coredump_segment_header)
1116 + sizeof(mpi_coredump->cam_entries),
1117 "CAM Entries");
1118 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1119 if (status)
1120 goto err;
1121
1122 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1123 ROUTING_WORDS_SEG_NUM,
1124 sizeof(struct mpi_coredump_segment_header)
1125 + sizeof(mpi_coredump->nic_routing_words),
1126 "Routing Words");
1127 status = ql_get_routing_entries(qdev,
1128 &mpi_coredump->nic_routing_words[0]);
1129 if (status)
1130 goto err;
1131
1132 /* Segment 34 (Rev C. step 23) */
1133 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1134 ETS_SEG_NUM,
1135 sizeof(struct mpi_coredump_segment_header)
1136 + sizeof(mpi_coredump->ets),
1137 "ETS Registers");
1138 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1139 if (status)
1140 goto err;
1141
1142 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1143 PROBE_DUMP_SEG_NUM,
1144 sizeof(struct mpi_coredump_segment_header)
1145 + sizeof(mpi_coredump->probe_dump),
1146 "Probe Dump");
1147 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1148
1149 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1150 ROUTING_INDEX_SEG_NUM,
1151 sizeof(struct mpi_coredump_segment_header)
1152 + sizeof(mpi_coredump->routing_regs),
1153 "Routing Regs");
1154 status = ql_get_routing_index_registers(qdev,
1155 &mpi_coredump->routing_regs[0]);
1156 if (status)
1157 goto err;
1158
1159 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1160 MAC_PROTOCOL_SEG_NUM,
1161 sizeof(struct mpi_coredump_segment_header)
1162 + sizeof(mpi_coredump->mac_prot_regs),
1163 "MAC Prot Regs");
1164 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1165
1166 /* Get the semaphore registers for all 5 functions */
1167 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1168 SEM_REGS_SEG_NUM,
1169 sizeof(struct mpi_coredump_segment_header) +
1170 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1171
1172 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1173
1174 /* Prevent the mpi restarting while we dump the memory.*/
1175 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1176
1177 /* clear the pause */
1178 status = ql_unpause_mpi_risc(qdev);
1179 if (status) {
1180 QPRINTK(qdev, DRV, ERR,
1181 "Failed RISC unpause. Status = 0x%.08x\n", status);
1182 goto err;
1183 }
1184
1185 /* Reset the RISC so we can dump RAM */
1186 status = ql_hard_reset_mpi_risc(qdev);
1187 if (status) {
1188 QPRINTK(qdev, DRV, ERR,
1189 "Failed RISC reset. Status = 0x%.08x\n", status);
1190 goto err;
1191 }
1192
1193 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1194 WCS_RAM_SEG_NUM,
1195 sizeof(struct mpi_coredump_segment_header)
1196 + sizeof(mpi_coredump->code_ram),
1197 "WCS RAM");
1198 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1199 CODE_RAM_ADDR, CODE_RAM_CNT);
1200 if (status) {
1201 QPRINTK(qdev, DRV, ERR,
1202 "Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
1203 goto err;
1204 }
1205
1206 /* Insert the segment header */
1207 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1208 MEMC_RAM_SEG_NUM,
1209 sizeof(struct mpi_coredump_segment_header)
1210 + sizeof(mpi_coredump->memc_ram),
1211 "MEMC RAM");
1212 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1213 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1214 if (status) {
1215 QPRINTK(qdev, DRV, ERR,
1216 "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
1217 goto err;
1218 }
1219err:
1220 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1221 return status;
1222
1223}
1224
1225static void ql_get_core_dump(struct ql_adapter *qdev)
1226{
1227 if (!ql_own_firmware(qdev)) {
1228 QPRINTK(qdev, DRV, ERR, "%s: Don't own firmware!\n",
1229 qdev->ndev->name);
1230 return;
1231 }
1232
1233 if (!netif_running(qdev->ndev)) {
1234 QPRINTK(qdev, IFUP, ERR,
1235 "Force Coredump can only be done from interface "
1236 "that is up.\n");
1237 return;
1238 }
1239
1240 if (ql_mb_sys_err(qdev)) {
1241 QPRINTK(qdev, IFUP, ERR,
1242 "Fail force coredump with ql_mb_sys_err().\n");
1243 return;
1244 }
1245}
1246
106void ql_gen_reg_dump(struct ql_adapter *qdev, 1247void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump) 1248 struct ql_reg_dump *mpi_coredump)
108{ 1249{
@@ -178,6 +1319,36 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1319 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status) 1320 if (status)
180 return; 1321 return;
1322
1323 if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
1324 ql_get_core_dump(qdev);
1325}
1326
1327/* Coredump to messages log file using separate worker thread */
1328void ql_mpi_core_to_log(struct work_struct *work)
1329{
1330 struct ql_adapter *qdev =
1331 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1332 u32 *tmp, count;
1333 int i;
1334
1335 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1336 tmp = (u32 *)qdev->mpi_coredump;
1337 QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
1338
1339 for (i = 0; i < count; i += 8) {
1340 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
1341 "%.08x %.08x %.08x \n", i,
1342 tmp[i + 0],
1343 tmp[i + 1],
1344 tmp[i + 2],
1345 tmp[i + 3],
1346 tmp[i + 4],
1347 tmp[i + 5],
1348 tmp[i + 6],
1349 tmp[i + 7]);
1350 msleep(5);
1351 }
181} 1352}
182 1353
183#ifdef QL_REG_DUMP 1354#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 894a7c84faef..4adca94a521f 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
80 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
87
88static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79 /* required last entry */ 91 /* required last entry */
@@ -452,9 +464,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 if (set) { 464 if (set) {
453 addr = &qdev->ndev->dev_addr[0]; 465 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG, 466 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n", 467 "Set Mac addr %pM\n", addr);
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else { 468 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN); 469 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0]; 470 addr = &zero_mac_addr[0];
@@ -1433,6 +1443,254 @@ map_error:
1433 return NETDEV_TX_BUSY; 1443 return NETDEV_TX_BUSY;
1434} 1444}
1435 1445
1446/* Process an inbound completion from an rx ring. */
1447static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1448 struct rx_ring *rx_ring,
1449 struct ib_mac_iocb_rsp *ib_mac_rsp,
1450 u32 length,
1451 u16 vlan_id)
1452{
1453 struct sk_buff *skb;
1454 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1455 struct skb_frag_struct *rx_frag;
1456 int nr_frags;
1457 struct napi_struct *napi = &rx_ring->napi;
1458
1459 napi->dev = qdev->ndev;
1460
1461 skb = napi_get_frags(napi);
1462 if (!skb) {
1463 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1464 rx_ring->rx_dropped++;
1465 put_page(lbq_desc->p.pg_chunk.page);
1466 return;
1467 }
1468 prefetch(lbq_desc->p.pg_chunk.va);
1469 rx_frag = skb_shinfo(skb)->frags;
1470 nr_frags = skb_shinfo(skb)->nr_frags;
1471 rx_frag += nr_frags;
1472 rx_frag->page = lbq_desc->p.pg_chunk.page;
1473 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1474 rx_frag->size = length;
1475
1476 skb->len += length;
1477 skb->data_len += length;
1478 skb->truesize += length;
1479 skb_shinfo(skb)->nr_frags++;
1480
1481 rx_ring->rx_packets++;
1482 rx_ring->rx_bytes += length;
1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb_record_rx_queue(skb, rx_ring->cq_id);
1485 if (qdev->vlgrp && (vlan_id != 0xffff))
1486 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1487 else
1488 napi_gro_frags(napi);
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct net_device *ndev = qdev->ndev;
1499 struct sk_buff *skb = NULL;
1500 void *addr;
1501 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1502 struct napi_struct *napi = &rx_ring->napi;
1503
1504 skb = netdev_alloc_skb(ndev, length);
1505 if (!skb) {
1506 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1507 "need to unwind!.\n");
1508 rx_ring->rx_dropped++;
1509 put_page(lbq_desc->p.pg_chunk.page);
1510 return;
1511 }
1512
1513 addr = lbq_desc->p.pg_chunk.va;
1514 prefetch(addr);
1515
1516
1517 /* Frame error, so drop the packet. */
1518 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1519 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1520 ib_mac_rsp->flags2);
1521 rx_ring->rx_errors++;
1522 goto err_out;
1523 }
1524
1525 /* The max framesize filter on this chip is set higher than
1526 * MTU since FCoE uses 2k frames.
1527 */
1528 if (skb->len > ndev->mtu + ETH_HLEN) {
1529 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1530 rx_ring->rx_dropped++;
1531 goto err_out;
1532 }
1533 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1534 QPRINTK(qdev, RX_STATUS, DEBUG,
1535 "%d bytes of headers and data in large. Chain "
1536 "page to new skb and pull tail.\n", length);
1537 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1538 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1539 length-ETH_HLEN);
1540 skb->len += length-ETH_HLEN;
1541 skb->data_len += length-ETH_HLEN;
1542 skb->truesize += length-ETH_HLEN;
1543
1544 rx_ring->rx_packets++;
1545 rx_ring->rx_bytes += skb->len;
1546 skb->protocol = eth_type_trans(skb, ndev);
1547 skb->ip_summed = CHECKSUM_NONE;
1548
1549 if (qdev->rx_csum &&
1550 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1551 /* TCP frame. */
1552 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1553 QPRINTK(qdev, RX_STATUS, DEBUG,
1554 "TCP checksum done!\n");
1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
1556 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1557 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1558 /* Unfragmented ipv4 UDP frame. */
1559 struct iphdr *iph = (struct iphdr *) skb->data;
1560 if (!(iph->frag_off &
1561 cpu_to_be16(IP_MF|IP_OFFSET))) {
1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563 QPRINTK(qdev, RX_STATUS, DEBUG,
1564 "TCP checksum done!\n");
1565 }
1566 }
1567 }
1568
1569 skb_record_rx_queue(skb, rx_ring->cq_id);
1570 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1571 if (qdev->vlgrp && (vlan_id != 0xffff))
1572 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1573 else
1574 napi_gro_receive(napi, skb);
1575 } else {
1576 if (qdev->vlgrp && (vlan_id != 0xffff))
1577 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1578 else
1579 netif_receive_skb(skb);
1580 }
1581 return;
1582err_out:
1583 dev_kfree_skb_any(skb);
1584 put_page(lbq_desc->p.pg_chunk.page);
1585}
1586
1587/* Process an inbound completion from an rx ring. */
1588static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1589 struct rx_ring *rx_ring,
1590 struct ib_mac_iocb_rsp *ib_mac_rsp,
1591 u32 length,
1592 u16 vlan_id)
1593{
1594 struct net_device *ndev = qdev->ndev;
1595 struct sk_buff *skb = NULL;
1596 struct sk_buff *new_skb = NULL;
1597 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1598
1599 skb = sbq_desc->p.skb;
1600 /* Allocate new_skb and copy */
1601 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1602 if (new_skb == NULL) {
1603 QPRINTK(qdev, PROBE, ERR,
1604 "No skb available, drop the packet.\n");
1605 rx_ring->rx_dropped++;
1606 return;
1607 }
1608 skb_reserve(new_skb, NET_IP_ALIGN);
1609 memcpy(skb_put(new_skb, length), skb->data, length);
1610 skb = new_skb;
1611
1612 /* Frame error, so drop the packet. */
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1614 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1615 ib_mac_rsp->flags2);
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_errors++;
1618 return;
1619 }
1620
1621 /* loopback self test for ethtool */
1622 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1623 ql_check_lb_frame(qdev, skb);
1624 dev_kfree_skb_any(skb);
1625 return;
1626 }
1627
1628 /* The max framesize filter on this chip is set higher than
1629 * MTU since FCoE uses 2k frames.
1630 */
1631 if (skb->len > ndev->mtu + ETH_HLEN) {
1632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_dropped++;
1634 return;
1635 }
1636
1637 prefetch(skb->data);
1638 skb->dev = ndev;
1639 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1640 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1645 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1646 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1647 }
1648 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1649 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1650
1651 rx_ring->rx_packets++;
1652 rx_ring->rx_bytes += skb->len;
1653 skb->protocol = eth_type_trans(skb, ndev);
1654 skb->ip_summed = CHECKSUM_NONE;
1655
1656 /* If rx checksum is on, and there are no
1657 * csum or frame errors.
1658 */
1659 if (qdev->rx_csum &&
1660 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1661 /* TCP frame. */
1662 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1663 QPRINTK(qdev, RX_STATUS, DEBUG,
1664 "TCP checksum done!\n");
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1667 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1668 /* Unfragmented ipv4 UDP frame. */
1669 struct iphdr *iph = (struct iphdr *) skb->data;
1670 if (!(iph->frag_off &
1671 cpu_to_be16(IP_MF|IP_OFFSET))) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 QPRINTK(qdev, RX_STATUS, DEBUG,
1674 "TCP checksum done!\n");
1675 }
1676 }
1677 }
1678
1679 skb_record_rx_queue(skb, rx_ring->cq_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1681 if (qdev->vlgrp && (vlan_id != 0xffff))
1682 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1683 vlan_id, skb);
1684 else
1685 napi_gro_receive(&rx_ring->napi, skb);
1686 } else {
1687 if (qdev->vlgrp && (vlan_id != 0xffff))
1688 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1689 else
1690 netif_receive_skb(skb);
1691 }
1692}
1693
1436static void ql_realign_skb(struct sk_buff *skb, int len) 1694static void ql_realign_skb(struct sk_buff *skb, int len)
1437{ 1695{
1438 void *temp_addr = skb->data; 1696 void *temp_addr = skb->data;
@@ -1646,14 +1904,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1646} 1904}
1647 1905
1648/* Process an inbound completion from an rx ring. */ 1906/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev, 1907static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring, 1908 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp) 1909 struct ib_mac_iocb_rsp *ib_mac_rsp,
1910 u16 vlan_id)
1652{ 1911{
1653 struct net_device *ndev = qdev->ndev; 1912 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL; 1913 struct sk_buff *skb = NULL;
1655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
1657 1914
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 1915 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659 1916
@@ -1753,6 +2010,65 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1753 } 2010 }
1754} 2011}
1755 2012
2013/* Process an inbound completion from an rx ring. */
2014static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015 struct rx_ring *rx_ring,
2016 struct ib_mac_iocb_rsp *ib_mac_rsp)
2017{
2018 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2022
2023 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2024
2025 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026 /* The data and headers are split into
2027 * separate buffers.
2028 */
2029 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030 vlan_id);
2031 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032 /* The data fit in a single small buffer.
2033 * Allocate a new skb, copy the data and
2034 * return the buffer to the free pool.
2035 */
2036 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037 length, vlan_id);
2038 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041 /* TCP packet in a page chunk that's been checksummed.
2042 * Tack it on to our GRO skb and let it go.
2043 */
2044 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045 length, vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047 /* Non-TCP packet in a page chunk. Allocate an
2048 * skb, tack it on frags, and send it up.
2049 */
2050 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051 length, vlan_id);
2052 } else {
2053 struct bq_desc *lbq_desc;
2054
2055 /* Free small buffer that holds the IAL */
2056 lbq_desc = ql_get_curr_sbuf(rx_ring);
2057 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2058 length, qdev->ndev->mtu);
2059
2060 /* Unwind the large buffers for this frame. */
2061 while (length > 0) {
2062 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2063 length -= (length < rx_ring->lbq_buf_size) ?
2064 length : rx_ring->lbq_buf_size;
2065 put_page(lbq_desc->p.pg_chunk.page);
2066 }
2067 }
2068
2069 return (unsigned long)length;
2070}
2071
1756/* Process an outbound completion from an rx ring. */ 2072/* Process an outbound completion from an rx ring. */
1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1758 struct ob_mac_iocb_rsp *mac_rsp) 2074 struct ob_mac_iocb_rsp *mac_rsp)
@@ -3332,15 +3648,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3332 3648
3333 /* Enable the function, set pagesize, enable error checking. */ 3649 /* Enable the function, set pagesize, enable error checking. */
3334 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | 3650 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3335 FSC_EC | FSC_VM_PAGE_4K | FSC_SH; 3651 FSC_EC | FSC_VM_PAGE_4K;
3652 value |= SPLT_SETTING;
3336 3653
3337 /* Set/clear header splitting. */ 3654 /* Set/clear header splitting. */
3338 mask = FSC_VM_PAGESIZE_MASK | 3655 mask = FSC_VM_PAGESIZE_MASK |
3339 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 3656 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3340 ql_write32(qdev, FSC, mask | value); 3657 ql_write32(qdev, FSC, mask | value);
3341 3658
3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3659 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3344 3660
3345 /* Set RX packet routing to use port/pci function on which the 3661 /* Set RX packet routing to use port/pci function on which the
3346 * packet arrived on in addition to usual frame routing. 3662 * packet arrived on in addition to usual frame routing.
@@ -3538,6 +3854,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3538 cancel_delayed_work_sync(&qdev->mpi_reset_work); 3854 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3539 cancel_delayed_work_sync(&qdev->mpi_work); 3855 cancel_delayed_work_sync(&qdev->mpi_work);
3540 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3856 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3857 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3541 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3858 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3542 3859
3543 for (i = 0; i < qdev->rss_ring_count; i++) 3860 for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4094,6 +4411,7 @@ static void ql_release_all(struct pci_dev *pdev)
4094 iounmap(qdev->reg_base); 4411 iounmap(qdev->reg_base);
4095 if (qdev->doorbell_area) 4412 if (qdev->doorbell_area)
4096 iounmap(qdev->doorbell_area); 4413 iounmap(qdev->doorbell_area);
4414 vfree(qdev->mpi_coredump);
4097 pci_release_regions(pdev); 4415 pci_release_regions(pdev);
4098 pci_set_drvdata(pdev, NULL); 4416 pci_set_drvdata(pdev, NULL);
4099} 4417}
@@ -4175,6 +4493,17 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4175 spin_lock_init(&qdev->hw_lock); 4493 spin_lock_init(&qdev->hw_lock);
4176 spin_lock_init(&qdev->stats_lock); 4494 spin_lock_init(&qdev->stats_lock);
4177 4495
4496 if (qlge_mpi_coredump) {
4497 qdev->mpi_coredump =
4498 vmalloc(sizeof(struct ql_mpi_coredump));
4499 if (qdev->mpi_coredump == NULL) {
4500 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4501 err = -ENOMEM;
4502 goto err_out;
4503 }
4504 if (qlge_force_coredump)
4505 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4506 }
4178 /* make sure the EEPROM is good */ 4507 /* make sure the EEPROM is good */
4179 err = qdev->nic_ops->get_flash(qdev); 4508 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4509 if (err) {
@@ -4204,6 +4533,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4204 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 4533 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4205 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 4534 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4206 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4535 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4536 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4207 init_completion(&qdev->ide_completion); 4537 init_completion(&qdev->ide_completion);
4208 4538
4209 if (!cards_found) { 4539 if (!cards_found) {
@@ -4327,6 +4657,7 @@ static void ql_eeh_close(struct net_device *ndev)
4327 cancel_delayed_work_sync(&qdev->mpi_reset_work); 4657 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4328 cancel_delayed_work_sync(&qdev->mpi_work); 4658 cancel_delayed_work_sync(&qdev->mpi_work);
4329 cancel_delayed_work_sync(&qdev->mpi_idc_work); 4659 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4660 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4330 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 4661 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4331 4662
4332 for (i = 0; i < qdev->rss_ring_count; i++) 4663 for (i = 0; i < qdev->rss_ring_count; i++)
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d4..e2c846f17fc7 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3int ql_unpause_mpi_risc(struct ql_adapter *qdev)
4{
5 u32 tmp;
6
7 /* Un-pause the RISC */
8 tmp = ql_read32(qdev, CSR);
9 if (!(tmp & CSR_RP))
10 return -EIO;
11
12 ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
13 return 0;
14}
15
16int ql_pause_mpi_risc(struct ql_adapter *qdev)
17{
18 u32 tmp;
19 int count = UDELAY_COUNT;
20
21 /* Pause the RISC */
22 ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
23 do {
24 tmp = ql_read32(qdev, CSR);
25 if (tmp & CSR_RP)
26 break;
27 mdelay(UDELAY_DELAY);
28 count--;
29 } while (count);
30 return (count == 0) ? -ETIMEDOUT : 0;
31}
32
33int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
34{
35 u32 tmp;
36 int count = UDELAY_COUNT;
37
38 /* Reset the RISC */
39 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
40 do {
41 tmp = ql_read32(qdev, CSR);
42 if (tmp & CSR_RR) {
43 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
44 break;
45 }
46 mdelay(UDELAY_DELAY);
47 count--;
48 } while (count);
49 return (count == 0) ? -ETIMEDOUT : 0;
50}
51
3int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 52int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{ 53{
5 int status; 54 int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
45 return status; 94 return status;
46} 95}
47 96
97/* Determine if we are in charge of the firwmare. If
98 * we are the lower of the 2 NIC pcie functions, or if
99 * we are the higher function and the lower function
100 * is not enabled.
101 */
102int ql_own_firmware(struct ql_adapter *qdev)
103{
104 u32 temp;
105
106 /* If we are the lower of the 2 NIC functions
107 * on the chip the we are responsible for
108 * core dump and firmware reset after an error.
109 */
110 if (qdev->func < qdev->alt_func)
111 return 1;
112
113 /* If we are the higher of the 2 NIC functions
114 * on the chip and the lower function is not
115 * enabled, then we are responsible for
116 * core dump and firmware reset after an error.
117 */
118 temp = ql_read32(qdev, STS);
119 if (!(temp & (1 << (8 + qdev->alt_func))))
120 return 1;
121
122 return 0;
123
124}
125
48static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 126static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
49{ 127{
50 int i, status; 128 int i, status;
@@ -529,6 +607,22 @@ end:
529 return status; 607 return status;
530} 608}
531 609
610int ql_mb_sys_err(struct ql_adapter *qdev)
611{
612 struct mbox_params mbc;
613 struct mbox_params *mbcp = &mbc;
614 int status;
615
616 memset(mbcp, 0, sizeof(struct mbox_params));
617
618 mbcp->in_count = 1;
619 mbcp->out_count = 0;
620
621 mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
622
623 status = ql_mailbox_command(qdev, mbcp);
624 return status;
625}
532 626
533/* Get MPI firmware version. This will be used for 627/* Get MPI firmware version. This will be used for
534 * driver banner and for ethtool info. 628 * driver banner and for ethtool info.
@@ -669,6 +763,63 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
669 return status; 763 return status;
670} 764}
671 765
766int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
767 u32 size)
768{
769 int status = 0;
770 struct mbox_params mbc;
771 struct mbox_params *mbcp = &mbc;
772
773 memset(mbcp, 0, sizeof(struct mbox_params));
774
775 mbcp->in_count = 9;
776 mbcp->out_count = 1;
777
778 mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
779 mbcp->mbox_in[1] = LSW(addr);
780 mbcp->mbox_in[2] = MSW(req_dma);
781 mbcp->mbox_in[3] = LSW(req_dma);
782 mbcp->mbox_in[4] = MSW(size);
783 mbcp->mbox_in[5] = LSW(size);
784 mbcp->mbox_in[6] = MSW(MSD(req_dma));
785 mbcp->mbox_in[7] = LSW(MSD(req_dma));
786 mbcp->mbox_in[8] = MSW(addr);
787
788
789 status = ql_mailbox_command(qdev, mbcp);
790 if (status)
791 return status;
792
793 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
794 QPRINTK(qdev, DRV, ERR,
795 "Failed to dump risc RAM.\n");
796 status = -EIO;
797 }
798 return status;
799}
800
801/* Issue a mailbox command to dump RISC RAM. */
802int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
803 u32 ram_addr, int word_count)
804{
805 int status;
806 char *my_buf;
807 dma_addr_t buf_dma;
808
809 my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
810 &buf_dma);
811 if (!my_buf)
812 return -EIO;
813
814 status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
815 if (!status)
816 memcpy(buf, my_buf, word_count * sizeof(u32));
817
818 pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
819 buf_dma);
820 return status;
821}
822
672/* Get link settings and maximum frame size settings 823/* Get link settings and maximum frame size settings
673 * for the current port. 824 * for the current port.
674 * Most likely will block. 825 * Most likely will block.
@@ -1143,5 +1294,19 @@ void ql_mpi_reset_work(struct work_struct *work)
1143 cancel_delayed_work_sync(&qdev->mpi_work); 1294 cancel_delayed_work_sync(&qdev->mpi_work);
1144 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 1295 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
1145 cancel_delayed_work_sync(&qdev->mpi_idc_work); 1296 cancel_delayed_work_sync(&qdev->mpi_idc_work);
1297 /* If we're not the dominant NIC function,
1298 * then there is nothing to do.
1299 */
1300 if (!ql_own_firmware(qdev)) {
1301 QPRINTK(qdev, DRV, ERR, "Don't own firmware!\n");
1302 return;
1303 }
1304
1305 if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
1306 QPRINTK(qdev, DRV, ERR, "Core is dumped!\n");
1307 qdev->core_is_dumped = 1;
1308 queue_delayed_work(qdev->workqueue,
1309 &qdev->mpi_core_to_log, 5 * HZ);
1310 }
1146 ql_soft_reset_mpi_risc(qdev); 1311 ql_soft_reset_mpi_risc(qdev);
1147} 1312}