diff options
author | David S. Miller <davem@davemloft.net> | 2011-09-16 15:18:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-09-16 15:18:02 -0400 |
commit | e3b37a1bdac434c2ceeb2fa859a3bbc790530ce5 (patch) | |
tree | b27da242213cc4ec784389a61ee9a0fde0ceb038 | |
parent | 9c223f9bbad78aabfe9c4dfd75ca2660f78f20f9 (diff) | |
parent | 2c4af694fe1723501e19426d0d891bdae9194c71 (diff) |
Merge git://github.com/Jkirsher/net-next
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 40 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 775 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 72 |
7 files changed, 341 insertions, 580 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 58482fc3024b..bfdd42b7f985 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -53,6 +53,7 @@ | |||
53 | 53 | ||
54 | /* TX/RX descriptor defines */ | 54 | /* TX/RX descriptor defines */ |
55 | #define IXGBE_DEFAULT_TXD 512 | 55 | #define IXGBE_DEFAULT_TXD 512 |
56 | #define IXGBE_DEFAULT_TX_WORK 256 | ||
56 | #define IXGBE_MAX_TXD 4096 | 57 | #define IXGBE_MAX_TXD 4096 |
57 | #define IXGBE_MIN_TXD 64 | 58 | #define IXGBE_MIN_TXD 64 |
58 | 59 | ||
@@ -490,7 +491,6 @@ struct ixgbe_adapter { | |||
490 | int node; | 491 | int node; |
491 | u32 led_reg; | 492 | u32 led_reg; |
492 | u32 interrupt_event; | 493 | u32 interrupt_event; |
493 | char lsc_int_name[IFNAMSIZ + 9]; | ||
494 | 494 | ||
495 | /* SR-IOV */ | 495 | /* SR-IOV */ |
496 | DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); | 496 | DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 0d4e38264492..22504f2db25e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | |||
@@ -820,8 +820,8 @@ mac_reset_top: | |||
820 | * Issue global reset to the MAC. This needs to be a SW reset. | 820 | * Issue global reset to the MAC. This needs to be a SW reset. |
821 | * If link reset is used, it might reset the MAC when mng is using it | 821 | * If link reset is used, it might reset the MAC when mng is using it |
822 | */ | 822 | */ |
823 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 823 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; |
824 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | 824 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); |
825 | IXGBE_WRITE_FLUSH(hw); | 825 | IXGBE_WRITE_FLUSH(hw); |
826 | 826 | ||
827 | /* Poll for reset bit to self-clear indicating reset is complete */ | 827 | /* Poll for reset bit to self-clear indicating reset is complete */ |
@@ -836,21 +836,18 @@ mac_reset_top: | |||
836 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 836 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
837 | } | 837 | } |
838 | 838 | ||
839 | msleep(50); | ||
840 | |||
839 | /* | 841 | /* |
840 | * Double resets are required for recovery from certain error | 842 | * Double resets are required for recovery from certain error |
841 | * conditions. Between resets, it is necessary to stall to allow time | 843 | * conditions. Between resets, it is necessary to stall to allow time |
842 | * for any pending HW events to complete. We use 1usec since that is | 844 | * for any pending HW events to complete. |
843 | * what is needed for ixgbe_disable_pcie_master(). The second reset | ||
844 | * then clears out any effects of those events. | ||
845 | */ | 845 | */ |
846 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { | 846 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { |
847 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; | 847 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; |
848 | udelay(1); | ||
849 | goto mac_reset_top; | 848 | goto mac_reset_top; |
850 | } | 849 | } |
851 | 850 | ||
852 | msleep(50); | ||
853 | |||
854 | gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); | 851 | gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); |
855 | gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); | 852 | gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); |
856 | IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); | 853 | IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index f193fc2f28fb..a5ff4358357c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | |||
@@ -904,11 +904,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, | |||
904 | **/ | 904 | **/ |
905 | static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) | 905 | static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) |
906 | { | 906 | { |
907 | s32 status = 0; | 907 | ixgbe_link_speed link_speed; |
908 | u32 ctrl; | 908 | s32 status; |
909 | u32 i; | 909 | u32 ctrl, i, autoc, autoc2; |
910 | u32 autoc; | 910 | bool link_up = false; |
911 | u32 autoc2; | ||
912 | 911 | ||
913 | /* Call adapter stop to disable tx/rx and clear interrupts */ | 912 | /* Call adapter stop to disable tx/rx and clear interrupts */ |
914 | hw->mac.ops.stop_adapter(hw); | 913 | hw->mac.ops.stop_adapter(hw); |
@@ -942,40 +941,47 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) | |||
942 | 941 | ||
943 | mac_reset_top: | 942 | mac_reset_top: |
944 | /* | 943 | /* |
945 | * Issue global reset to the MAC. This needs to be a SW reset. | 944 | * Issue global reset to the MAC. Needs to be SW reset if link is up. |
946 | * If link reset is used, it might reset the MAC when mng is using it | 945 | * If link reset is used when link is up, it might reset the PHY when |
946 | * mng is using it. If link is down or the flag to force full link | ||
947 | * reset is set, then perform link reset. | ||
947 | */ | 948 | */ |
948 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 949 | ctrl = IXGBE_CTRL_LNK_RST; |
949 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | 950 | if (!hw->force_full_reset) { |
951 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | ||
952 | if (link_up) | ||
953 | ctrl = IXGBE_CTRL_RST; | ||
954 | } | ||
955 | |||
956 | ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); | ||
957 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); | ||
950 | IXGBE_WRITE_FLUSH(hw); | 958 | IXGBE_WRITE_FLUSH(hw); |
951 | 959 | ||
952 | /* Poll for reset bit to self-clear indicating reset is complete */ | 960 | /* Poll for reset bit to self-clear indicating reset is complete */ |
953 | for (i = 0; i < 10; i++) { | 961 | for (i = 0; i < 10; i++) { |
954 | udelay(1); | 962 | udelay(1); |
955 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 963 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
956 | if (!(ctrl & IXGBE_CTRL_RST)) | 964 | if (!(ctrl & IXGBE_CTRL_RST_MASK)) |
957 | break; | 965 | break; |
958 | } | 966 | } |
959 | if (ctrl & IXGBE_CTRL_RST) { | 967 | |
968 | if (ctrl & IXGBE_CTRL_RST_MASK) { | ||
960 | status = IXGBE_ERR_RESET_FAILED; | 969 | status = IXGBE_ERR_RESET_FAILED; |
961 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 970 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
962 | } | 971 | } |
963 | 972 | ||
973 | msleep(50); | ||
974 | |||
964 | /* | 975 | /* |
965 | * Double resets are required for recovery from certain error | 976 | * Double resets are required for recovery from certain error |
966 | * conditions. Between resets, it is necessary to stall to allow time | 977 | * conditions. Between resets, it is necessary to stall to allow time |
967 | * for any pending HW events to complete. We use 1usec since that is | 978 | * for any pending HW events to complete. |
968 | * what is needed for ixgbe_disable_pcie_master(). The second reset | ||
969 | * then clears out any effects of those events. | ||
970 | */ | 979 | */ |
971 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { | 980 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { |
972 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; | 981 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; |
973 | udelay(1); | ||
974 | goto mac_reset_top; | 982 | goto mac_reset_top; |
975 | } | 983 | } |
976 | 984 | ||
977 | msleep(50); | ||
978 | |||
979 | /* | 985 | /* |
980 | * Store the original AUTOC/AUTOC2 values if they have not been | 986 | * Store the original AUTOC/AUTOC2 values if they have not been |
981 | * stored off yet. Otherwise restore the stored original | 987 | * stored off yet. Otherwise restore the stored original |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9c12b35232af..11e1d5cd40b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -1570,26 +1570,26 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) | |||
1570 | 1570 | ||
1571 | /* X540 needs to set the MACC.FLU bit to force link up */ | 1571 | /* X540 needs to set the MACC.FLU bit to force link up */ |
1572 | if (adapter->hw.mac.type == ixgbe_mac_X540) { | 1572 | if (adapter->hw.mac.type == ixgbe_mac_X540) { |
1573 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC); | 1573 | reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); |
1574 | reg_data |= IXGBE_MACC_FLU; | 1574 | reg_data |= IXGBE_MACC_FLU; |
1575 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data); | 1575 | IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | /* right now we only support MAC loopback in the driver */ | 1578 | /* right now we only support MAC loopback in the driver */ |
1579 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); | 1579 | reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
1580 | /* Setup MAC loopback */ | 1580 | /* Setup MAC loopback */ |
1581 | reg_data |= IXGBE_HLREG0_LPBK; | 1581 | reg_data |= IXGBE_HLREG0_LPBK; |
1582 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); | 1582 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); |
1583 | 1583 | ||
1584 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); | 1584 | reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
1585 | reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; | 1585 | reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; |
1586 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); | 1586 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); |
1587 | 1587 | ||
1588 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); | 1588 | reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
1589 | reg_data &= ~IXGBE_AUTOC_LMS_MASK; | 1589 | reg_data &= ~IXGBE_AUTOC_LMS_MASK; |
1590 | reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; | 1590 | reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; |
1591 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); | 1591 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); |
1592 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1592 | IXGBE_WRITE_FLUSH(hw); |
1593 | usleep_range(10000, 20000); | 1593 | usleep_range(10000, 20000); |
1594 | 1594 | ||
1595 | /* Disable Atlas Tx lanes; re-enabled in reset path */ | 1595 | /* Disable Atlas Tx lanes; re-enabled in reset path */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d20e8040d855..bb069bc3d1a2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -79,59 +79,32 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { | |||
79 | * Class, Class Mask, private data (not used) } | 79 | * Class, Class Mask, private data (not used) } |
80 | */ | 80 | */ |
81 | static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { | 81 | static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { |
82 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), | 82 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, |
83 | board_82598 }, | 83 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, |
84 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), | 84 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, |
85 | board_82598 }, | 85 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, |
86 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), | 86 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, |
87 | board_82598 }, | 87 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, |
88 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), | 88 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, |
89 | board_82598 }, | 89 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, |
90 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), | 90 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, |
91 | board_82598 }, | 91 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, |
92 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), | 92 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, |
93 | board_82598 }, | 93 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, |
94 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), | 94 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, |
95 | board_82598 }, | 95 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, |
96 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), | 96 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, |
97 | board_82598 }, | 97 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, |
98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), | 98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, |
99 | board_82598 }, | 99 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, |
100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), | 100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, |
101 | board_82598 }, | 101 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, |
102 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), | 102 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, |
103 | board_82598 }, | 103 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, |
104 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), | 104 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, |
105 | board_82598 }, | 105 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, |
106 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), | 106 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, |
107 | board_82599 }, | 107 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, |
108 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), | ||
109 | board_82599 }, | ||
110 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), | ||
111 | board_82599 }, | ||
112 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), | ||
113 | board_82599 }, | ||
114 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), | ||
115 | board_82599 }, | ||
116 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), | ||
117 | board_82599 }, | ||
118 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), | ||
119 | board_82599 }, | ||
120 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), | ||
121 | board_82599 }, | ||
122 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), | ||
123 | board_82599 }, | ||
124 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), | ||
125 | board_82599 }, | ||
126 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), | ||
127 | board_82599 }, | ||
128 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), | ||
129 | board_X540 }, | ||
130 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), | ||
131 | board_82599 }, | ||
132 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), | ||
133 | board_82599 }, | ||
134 | |||
135 | /* required last entry */ | 108 | /* required last entry */ |
136 | {0, } | 109 | {0, } |
137 | }; | 110 | }; |
@@ -804,7 +777,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
804 | struct ixgbe_tx_buffer *tx_buffer; | 777 | struct ixgbe_tx_buffer *tx_buffer; |
805 | union ixgbe_adv_tx_desc *tx_desc; | 778 | union ixgbe_adv_tx_desc *tx_desc; |
806 | unsigned int total_bytes = 0, total_packets = 0; | 779 | unsigned int total_bytes = 0, total_packets = 0; |
807 | u16 budget = q_vector->tx.work_limit; | 780 | unsigned int budget = q_vector->tx.work_limit; |
808 | u16 i = tx_ring->next_to_clean; | 781 | u16 i = tx_ring->next_to_clean; |
809 | 782 | ||
810 | tx_buffer = &tx_ring->tx_buffer_info[i]; | 783 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
@@ -891,7 +864,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
891 | ixgbe_tx_timeout_reset(adapter); | 864 | ixgbe_tx_timeout_reset(adapter); |
892 | 865 | ||
893 | /* the adapter is about to reset, no point in enabling stuff */ | 866 | /* the adapter is about to reset, no point in enabling stuff */ |
894 | return budget; | 867 | return true; |
895 | } | 868 | } |
896 | 869 | ||
897 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 870 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
@@ -908,7 +881,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
908 | } | 881 | } |
909 | } | 882 | } |
910 | 883 | ||
911 | return budget; | 884 | return !!budget; |
912 | } | 885 | } |
913 | 886 | ||
914 | #ifdef CONFIG_IXGBE_DCA | 887 | #ifdef CONFIG_IXGBE_DCA |
@@ -924,12 +897,12 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | |||
924 | switch (hw->mac.type) { | 897 | switch (hw->mac.type) { |
925 | case ixgbe_mac_82598EB: | 898 | case ixgbe_mac_82598EB: |
926 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; | 899 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; |
927 | rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 900 | rxctrl |= dca3_get_tag(rx_ring->dev, cpu); |
928 | break; | 901 | break; |
929 | case ixgbe_mac_82599EB: | 902 | case ixgbe_mac_82599EB: |
930 | case ixgbe_mac_X540: | 903 | case ixgbe_mac_X540: |
931 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; | 904 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; |
932 | rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | 905 | rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) << |
933 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); | 906 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); |
934 | break; | 907 | break; |
935 | default: | 908 | default: |
@@ -953,7 +926,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |||
953 | case ixgbe_mac_82598EB: | 926 | case ixgbe_mac_82598EB: |
954 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); | 927 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); |
955 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | 928 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; |
956 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 929 | txctrl |= dca3_get_tag(tx_ring->dev, cpu); |
957 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 930 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
958 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); | 931 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); |
959 | break; | 932 | break; |
@@ -961,7 +934,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |||
961 | case ixgbe_mac_X540: | 934 | case ixgbe_mac_X540: |
962 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); | 935 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); |
963 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; | 936 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; |
964 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | 937 | txctrl |= (dca3_get_tag(tx_ring->dev, cpu) << |
965 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | 938 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); |
966 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 939 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
967 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); | 940 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); |
@@ -1297,9 +1270,9 @@ static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) | |||
1297 | IXGBE_RXDADV_RSCCNT_MASK); | 1270 | IXGBE_RXDADV_RSCCNT_MASK); |
1298 | } | 1271 | } |
1299 | 1272 | ||
1300 | static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 1273 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
1301 | struct ixgbe_ring *rx_ring, | 1274 | struct ixgbe_ring *rx_ring, |
1302 | int *work_done, int work_to_do) | 1275 | int budget) |
1303 | { | 1276 | { |
1304 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1277 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1305 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 1278 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
@@ -1479,11 +1452,11 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1479 | #endif /* IXGBE_FCOE */ | 1452 | #endif /* IXGBE_FCOE */ |
1480 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); | 1453 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); |
1481 | 1454 | ||
1455 | budget--; | ||
1482 | next_desc: | 1456 | next_desc: |
1483 | rx_desc->wb.upper.status_error = 0; | 1457 | rx_desc->wb.upper.status_error = 0; |
1484 | 1458 | ||
1485 | (*work_done)++; | 1459 | if (!budget) |
1486 | if (*work_done >= work_to_do) | ||
1487 | break; | 1460 | break; |
1488 | 1461 | ||
1489 | /* return some buffers to hardware, one at a time is too slow */ | 1462 | /* return some buffers to hardware, one at a time is too slow */ |
@@ -1524,9 +1497,10 @@ next_desc: | |||
1524 | u64_stats_update_end(&rx_ring->syncp); | 1497 | u64_stats_update_end(&rx_ring->syncp); |
1525 | q_vector->rx.total_packets += total_rx_packets; | 1498 | q_vector->rx.total_packets += total_rx_packets; |
1526 | q_vector->rx.total_bytes += total_rx_bytes; | 1499 | q_vector->rx.total_bytes += total_rx_bytes; |
1500 | |||
1501 | return !!budget; | ||
1527 | } | 1502 | } |
1528 | 1503 | ||
1529 | static int ixgbe_clean_rxonly(struct napi_struct *, int); | ||
1530 | /** | 1504 | /** |
1531 | * ixgbe_configure_msix - Configure MSI-X hardware | 1505 | * ixgbe_configure_msix - Configure MSI-X hardware |
1532 | * @adapter: board private structure | 1506 | * @adapter: board private structure |
@@ -1542,6 +1516,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1542 | 1516 | ||
1543 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1517 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
1544 | 1518 | ||
1519 | /* Populate MSIX to EITR Select */ | ||
1520 | if (adapter->num_vfs > 32) { | ||
1521 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | ||
1522 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); | ||
1523 | } | ||
1524 | |||
1545 | /* | 1525 | /* |
1546 | * Populate the IVAR table and set the ITR values to the | 1526 | * Populate the IVAR table and set the ITR values to the |
1547 | * corresponding register. | 1527 | * corresponding register. |
@@ -1564,20 +1544,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1564 | q_vector->eitr = adapter->rx_eitr_param; | 1544 | q_vector->eitr = adapter->rx_eitr_param; |
1565 | 1545 | ||
1566 | ixgbe_write_eitr(q_vector); | 1546 | ixgbe_write_eitr(q_vector); |
1567 | /* If ATR is enabled, set interrupt affinity */ | ||
1568 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
1569 | /* | ||
1570 | * Allocate the affinity_hint cpumask, assign the mask | ||
1571 | * for this vector, and set our affinity_hint for | ||
1572 | * this irq. | ||
1573 | */ | ||
1574 | if (!alloc_cpumask_var(&q_vector->affinity_mask, | ||
1575 | GFP_KERNEL)) | ||
1576 | return; | ||
1577 | cpumask_set_cpu(v_idx, q_vector->affinity_mask); | ||
1578 | irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, | ||
1579 | q_vector->affinity_mask); | ||
1580 | } | ||
1581 | } | 1547 | } |
1582 | 1548 | ||
1583 | switch (adapter->hw.mac.type) { | 1549 | switch (adapter->hw.mac.type) { |
@@ -1862,72 +1828,6 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) | |||
1862 | } | 1828 | } |
1863 | } | 1829 | } |
1864 | 1830 | ||
1865 | static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | ||
1866 | { | ||
1867 | struct ixgbe_adapter *adapter = data; | ||
1868 | struct ixgbe_hw *hw = &adapter->hw; | ||
1869 | u32 eicr; | ||
1870 | |||
1871 | /* | ||
1872 | * Workaround for Silicon errata. Use clear-by-write instead | ||
1873 | * of clear-by-read. Reading with EICS will return the | ||
1874 | * interrupt causes without clearing, which later be done | ||
1875 | * with the write to EICR. | ||
1876 | */ | ||
1877 | eicr = IXGBE_READ_REG(hw, IXGBE_EICS); | ||
1878 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); | ||
1879 | |||
1880 | if (eicr & IXGBE_EICR_LSC) | ||
1881 | ixgbe_check_lsc(adapter); | ||
1882 | |||
1883 | if (eicr & IXGBE_EICR_MAILBOX) | ||
1884 | ixgbe_msg_task(adapter); | ||
1885 | |||
1886 | switch (hw->mac.type) { | ||
1887 | case ixgbe_mac_82599EB: | ||
1888 | case ixgbe_mac_X540: | ||
1889 | /* Handle Flow Director Full threshold interrupt */ | ||
1890 | if (eicr & IXGBE_EICR_FLOW_DIR) { | ||
1891 | int reinit_count = 0; | ||
1892 | int i; | ||
1893 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1894 | struct ixgbe_ring *ring = adapter->tx_ring[i]; | ||
1895 | if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, | ||
1896 | &ring->state)) | ||
1897 | reinit_count++; | ||
1898 | } | ||
1899 | if (reinit_count) { | ||
1900 | /* no more flow director interrupts until after init */ | ||
1901 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); | ||
1902 | eicr &= ~IXGBE_EICR_FLOW_DIR; | ||
1903 | adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; | ||
1904 | ixgbe_service_event_schedule(adapter); | ||
1905 | } | ||
1906 | } | ||
1907 | ixgbe_check_sfp_event(adapter, eicr); | ||
1908 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1909 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
1910 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
1911 | adapter->interrupt_event = eicr; | ||
1912 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
1913 | ixgbe_service_event_schedule(adapter); | ||
1914 | } | ||
1915 | } | ||
1916 | break; | ||
1917 | default: | ||
1918 | break; | ||
1919 | } | ||
1920 | |||
1921 | ixgbe_check_fan_failure(adapter, eicr); | ||
1922 | |||
1923 | /* re-enable the original interrupt state, no lsc, no queues */ | ||
1924 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
1925 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & | ||
1926 | ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); | ||
1927 | |||
1928 | return IRQ_HANDLED; | ||
1929 | } | ||
1930 | |||
1931 | static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, | 1831 | static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, |
1932 | u64 qmask) | 1832 | u64 qmask) |
1933 | { | 1833 | { |
@@ -1980,165 +1880,122 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, | |||
1980 | /* skip the flush */ | 1880 | /* skip the flush */ |
1981 | } | 1881 | } |
1982 | 1882 | ||
1983 | static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | ||
1984 | { | ||
1985 | struct ixgbe_q_vector *q_vector = data; | ||
1986 | |||
1987 | if (!q_vector->tx.count) | ||
1988 | return IRQ_HANDLED; | ||
1989 | |||
1990 | /* EIAM disabled interrupts (on this vector) for us */ | ||
1991 | napi_schedule(&q_vector->napi); | ||
1992 | |||
1993 | return IRQ_HANDLED; | ||
1994 | } | ||
1995 | |||
1996 | /** | ||
1997 | * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) | ||
1998 | * @irq: unused | ||
1999 | * @data: pointer to our q_vector struct for this interrupt vector | ||
2000 | **/ | ||
2001 | static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | ||
2002 | { | ||
2003 | struct ixgbe_q_vector *q_vector = data; | ||
2004 | |||
2005 | if (!q_vector->rx.count) | ||
2006 | return IRQ_HANDLED; | ||
2007 | |||
2008 | /* EIAM disabled interrupts (on this vector) for us */ | ||
2009 | napi_schedule(&q_vector->napi); | ||
2010 | |||
2011 | return IRQ_HANDLED; | ||
2012 | } | ||
2013 | |||
2014 | static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | ||
2015 | { | ||
2016 | struct ixgbe_q_vector *q_vector = data; | ||
2017 | |||
2018 | if (!q_vector->tx.count && !q_vector->rx.count) | ||
2019 | return IRQ_HANDLED; | ||
2020 | |||
2021 | /* EIAM disabled interrupts (on this vector) for us */ | ||
2022 | napi_schedule(&q_vector->napi); | ||
2023 | |||
2024 | return IRQ_HANDLED; | ||
2025 | } | ||
2026 | |||
2027 | /** | 1883 | /** |
2028 | * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine | 1884 | * ixgbe_irq_enable - Enable default interrupt generation settings |
2029 | * @napi: napi struct with our devices info in it | 1885 | * @adapter: board private structure |
2030 | * @budget: amount of work driver is allowed to do this pass, in packets | ||
2031 | * | ||
2032 | * This function is optimized for cleaning one queue only on a single | ||
2033 | * q_vector!!! | ||
2034 | **/ | 1886 | **/ |
2035 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | 1887 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, |
1888 | bool flush) | ||
2036 | { | 1889 | { |
2037 | struct ixgbe_q_vector *q_vector = | 1890 | u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); |
2038 | container_of(napi, struct ixgbe_q_vector, napi); | ||
2039 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
2040 | int work_done = 0; | ||
2041 | |||
2042 | #ifdef CONFIG_IXGBE_DCA | ||
2043 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2044 | ixgbe_update_dca(q_vector); | ||
2045 | #endif | ||
2046 | 1891 | ||
2047 | ixgbe_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget); | 1892 | /* don't reenable LSC while waiting for link */ |
1893 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) | ||
1894 | mask &= ~IXGBE_EIMS_LSC; | ||
2048 | 1895 | ||
2049 | /* If all Rx work done, exit the polling mode */ | 1896 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) |
2050 | if (work_done < budget) { | 1897 | mask |= IXGBE_EIMS_GPI_SDP0; |
2051 | napi_complete(napi); | 1898 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
2052 | if (adapter->rx_itr_setting & 1) | 1899 | mask |= IXGBE_EIMS_GPI_SDP1; |
2053 | ixgbe_set_itr(q_vector); | 1900 | switch (adapter->hw.mac.type) { |
2054 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1901 | case ixgbe_mac_82599EB: |
2055 | ixgbe_irq_enable_queues(adapter, | 1902 | case ixgbe_mac_X540: |
2056 | ((u64)1 << q_vector->v_idx)); | 1903 | mask |= IXGBE_EIMS_ECC; |
1904 | mask |= IXGBE_EIMS_GPI_SDP1; | ||
1905 | mask |= IXGBE_EIMS_GPI_SDP2; | ||
1906 | mask |= IXGBE_EIMS_MAILBOX; | ||
1907 | break; | ||
1908 | default: | ||
1909 | break; | ||
2057 | } | 1910 | } |
1911 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && | ||
1912 | !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) | ||
1913 | mask |= IXGBE_EIMS_FLOW_DIR; | ||
2058 | 1914 | ||
2059 | return work_done; | 1915 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1916 | if (queues) | ||
1917 | ixgbe_irq_enable_queues(adapter, ~0); | ||
1918 | if (flush) | ||
1919 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
2060 | } | 1920 | } |
2061 | 1921 | ||
2062 | /** | 1922 | static irqreturn_t ixgbe_msix_other(int irq, void *data) |
2063 | * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine | ||
2064 | * @napi: napi struct with our devices info in it | ||
2065 | * @budget: amount of work driver is allowed to do this pass, in packets | ||
2066 | * | ||
2067 | * This function will clean more than one rx queue associated with a | ||
2068 | * q_vector. | ||
2069 | **/ | ||
2070 | static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | ||
2071 | { | 1923 | { |
2072 | struct ixgbe_q_vector *q_vector = | 1924 | struct ixgbe_adapter *adapter = data; |
2073 | container_of(napi, struct ixgbe_q_vector, napi); | 1925 | struct ixgbe_hw *hw = &adapter->hw; |
2074 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1926 | u32 eicr; |
2075 | struct ixgbe_ring *ring; | ||
2076 | int work_done = 0; | ||
2077 | bool clean_complete = true; | ||
2078 | 1927 | ||
2079 | #ifdef CONFIG_IXGBE_DCA | 1928 | /* |
2080 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1929 | * Workaround for Silicon errata. Use clear-by-write instead |
2081 | ixgbe_update_dca(q_vector); | 1930 | * of clear-by-read. Reading with EICS will return the |
2082 | #endif | 1931 | * interrupt causes without clearing, which later be done |
1932 | * with the write to EICR. | ||
1933 | */ | ||
1934 | eicr = IXGBE_READ_REG(hw, IXGBE_EICS); | ||
1935 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); | ||
2083 | 1936 | ||
2084 | for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) | 1937 | if (eicr & IXGBE_EICR_LSC) |
2085 | clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); | 1938 | ixgbe_check_lsc(adapter); |
2086 | 1939 | ||
2087 | /* attempt to distribute budget to each queue fairly, but don't allow | 1940 | if (eicr & IXGBE_EICR_MAILBOX) |
2088 | * the budget to go below 1 because we'll exit polling */ | 1941 | ixgbe_msg_task(adapter); |
2089 | budget /= (q_vector->rx.count ?: 1); | ||
2090 | budget = max(budget, 1); | ||
2091 | 1942 | ||
2092 | for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) | 1943 | switch (hw->mac.type) { |
2093 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); | 1944 | case ixgbe_mac_82599EB: |
1945 | case ixgbe_mac_X540: | ||
1946 | if (eicr & IXGBE_EICR_ECC) | ||
1947 | e_info(link, "Received unrecoverable ECC Err, please " | ||
1948 | "reboot\n"); | ||
1949 | /* Handle Flow Director Full threshold interrupt */ | ||
1950 | if (eicr & IXGBE_EICR_FLOW_DIR) { | ||
1951 | int reinit_count = 0; | ||
1952 | int i; | ||
1953 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1954 | struct ixgbe_ring *ring = adapter->tx_ring[i]; | ||
1955 | if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, | ||
1956 | &ring->state)) | ||
1957 | reinit_count++; | ||
1958 | } | ||
1959 | if (reinit_count) { | ||
1960 | /* no more flow director interrupts until after init */ | ||
1961 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); | ||
1962 | adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; | ||
1963 | ixgbe_service_event_schedule(adapter); | ||
1964 | } | ||
1965 | } | ||
1966 | ixgbe_check_sfp_event(adapter, eicr); | ||
1967 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1968 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
1969 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
1970 | adapter->interrupt_event = eicr; | ||
1971 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
1972 | ixgbe_service_event_schedule(adapter); | ||
1973 | } | ||
1974 | } | ||
1975 | break; | ||
1976 | default: | ||
1977 | break; | ||
1978 | } | ||
2094 | 1979 | ||
2095 | if (!clean_complete) | 1980 | ixgbe_check_fan_failure(adapter, eicr); |
2096 | work_done = budget; | ||
2097 | 1981 | ||
2098 | /* If all Rx work done, exit the polling mode */ | 1982 | /* re-enable the original interrupt state, no lsc, no queues */ |
2099 | if (work_done < budget) { | 1983 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
2100 | napi_complete(napi); | 1984 | ixgbe_irq_enable(adapter, false, false); |
2101 | if (adapter->rx_itr_setting & 1) | ||
2102 | ixgbe_set_itr(q_vector); | ||
2103 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2104 | ixgbe_irq_enable_queues(adapter, | ||
2105 | ((u64)1 << q_vector->v_idx)); | ||
2106 | return 0; | ||
2107 | } | ||
2108 | 1985 | ||
2109 | return work_done; | 1986 | return IRQ_HANDLED; |
2110 | } | 1987 | } |
2111 | 1988 | ||
2112 | /** | 1989 | static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) |
2113 | * ixgbe_clean_txonly - msix (aka one shot) tx clean routine | ||
2114 | * @napi: napi struct with our devices info in it | ||
2115 | * @budget: amount of work driver is allowed to do this pass, in packets | ||
2116 | * | ||
2117 | * This function is optimized for cleaning one queue only on a single | ||
2118 | * q_vector!!! | ||
2119 | **/ | ||
2120 | static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | ||
2121 | { | 1990 | { |
2122 | struct ixgbe_q_vector *q_vector = | 1991 | struct ixgbe_q_vector *q_vector = data; |
2123 | container_of(napi, struct ixgbe_q_vector, napi); | ||
2124 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
2125 | |||
2126 | #ifdef CONFIG_IXGBE_DCA | ||
2127 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2128 | ixgbe_update_dca(q_vector); | ||
2129 | #endif | ||
2130 | 1992 | ||
2131 | if (!ixgbe_clean_tx_irq(q_vector, q_vector->tx.ring)) | 1993 | /* EIAM disabled interrupts (on this vector) for us */ |
2132 | return budget; | ||
2133 | 1994 | ||
2134 | /* If all Tx work done, exit the polling mode */ | 1995 | if (q_vector->rx.ring || q_vector->tx.ring) |
2135 | napi_complete(napi); | 1996 | napi_schedule(&q_vector->napi); |
2136 | if (adapter->tx_itr_setting & 1) | ||
2137 | ixgbe_set_itr(q_vector); | ||
2138 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2139 | ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
2140 | 1997 | ||
2141 | return 0; | 1998 | return IRQ_HANDLED; |
2142 | } | 1999 | } |
2143 | 2000 | ||
2144 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | 2001 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
@@ -2176,59 +2033,41 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | |||
2176 | * group the rings as "efficiently" as possible. You would add new | 2033 | * group the rings as "efficiently" as possible. You would add new |
2177 | * mapping configurations in here. | 2034 | * mapping configurations in here. |
2178 | **/ | 2035 | **/ |
2179 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) | 2036 | static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) |
2180 | { | 2037 | { |
2181 | int q_vectors; | 2038 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
2039 | int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0; | ||
2040 | int txr_remaining = adapter->num_tx_queues, txr_idx = 0; | ||
2182 | int v_start = 0; | 2041 | int v_start = 0; |
2183 | int rxr_idx = 0, txr_idx = 0; | ||
2184 | int rxr_remaining = adapter->num_rx_queues; | ||
2185 | int txr_remaining = adapter->num_tx_queues; | ||
2186 | int i, j; | ||
2187 | int rqpv, tqpv; | ||
2188 | int err = 0; | ||
2189 | 2042 | ||
2190 | /* No mapping required if MSI-X is disabled. */ | 2043 | /* only one q_vector if MSI-X is disabled. */ |
2191 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | 2044 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) |
2192 | goto out; | 2045 | q_vectors = 1; |
2193 | |||
2194 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2195 | 2046 | ||
2196 | /* | 2047 | /* |
2197 | * The ideal configuration... | 2048 | * If we don't have enough vectors for a 1-to-1 mapping, we'll have to |
2198 | * We have enough vectors to map one per queue. | 2049 | * group them so there are multiple queues per vector. |
2050 | * | ||
2051 | * Re-adjusting *qpv takes care of the remainder. | ||
2199 | */ | 2052 | */ |
2200 | if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { | 2053 | for (; v_start < q_vectors && rxr_remaining; v_start++) { |
2201 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) | 2054 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start); |
2055 | for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--) | ||
2202 | map_vector_to_rxq(adapter, v_start, rxr_idx); | 2056 | map_vector_to_rxq(adapter, v_start, rxr_idx); |
2203 | |||
2204 | for (; txr_idx < txr_remaining; v_start++, txr_idx++) | ||
2205 | map_vector_to_txq(adapter, v_start, txr_idx); | ||
2206 | |||
2207 | goto out; | ||
2208 | } | 2057 | } |
2209 | 2058 | ||
2210 | /* | 2059 | /* |
2211 | * If we don't have enough vectors for a 1-to-1 | 2060 | * If there are not enough q_vectors for each ring to have it's own |
2212 | * mapping, we'll have to group them so there are | 2061 | * vector then we must pair up Rx/Tx on a each vector |
2213 | * multiple queues per vector. | ||
2214 | */ | 2062 | */ |
2215 | /* Re-adjusting *qpv takes care of the remainder. */ | 2063 | if ((v_start + txr_remaining) > q_vectors) |
2216 | for (i = v_start; i < q_vectors; i++) { | 2064 | v_start = 0; |
2217 | rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); | 2065 | |
2218 | for (j = 0; j < rqpv; j++) { | 2066 | for (; v_start < q_vectors && txr_remaining; v_start++) { |
2219 | map_vector_to_rxq(adapter, i, rxr_idx); | 2067 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start); |
2220 | rxr_idx++; | 2068 | for (; tqpv; tqpv--, txr_idx++, txr_remaining--) |
2221 | rxr_remaining--; | 2069 | map_vector_to_txq(adapter, v_start, txr_idx); |
2222 | } | ||
2223 | tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); | ||
2224 | for (j = 0; j < tqpv; j++) { | ||
2225 | map_vector_to_txq(adapter, i, txr_idx); | ||
2226 | txr_idx++; | ||
2227 | txr_remaining--; | ||
2228 | } | ||
2229 | } | 2070 | } |
2230 | out: | ||
2231 | return err; | ||
2232 | } | 2071 | } |
2233 | 2072 | ||
2234 | /** | 2073 | /** |
@@ -2241,53 +2080,45 @@ out: | |||
2241 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | 2080 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) |
2242 | { | 2081 | { |
2243 | struct net_device *netdev = adapter->netdev; | 2082 | struct net_device *netdev = adapter->netdev; |
2244 | irqreturn_t (*handler)(int, void *); | 2083 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
2245 | int i, vector, q_vectors, err; | 2084 | int vector, err; |
2246 | int ri = 0, ti = 0; | 2085 | int ri = 0, ti = 0; |
2247 | 2086 | ||
2248 | /* Decrement for Other and TCP Timer vectors */ | ||
2249 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2250 | |||
2251 | err = ixgbe_map_rings_to_vectors(adapter); | ||
2252 | if (err) | ||
2253 | return err; | ||
2254 | |||
2255 | #define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \ | ||
2256 | ? &ixgbe_msix_clean_many : \ | ||
2257 | (_v)->rx.count ? &ixgbe_msix_clean_rx : \ | ||
2258 | (_v)->tx.count ? &ixgbe_msix_clean_tx : \ | ||
2259 | NULL) | ||
2260 | for (vector = 0; vector < q_vectors; vector++) { | 2087 | for (vector = 0; vector < q_vectors; vector++) { |
2261 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; | 2088 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
2262 | handler = SET_HANDLER(q_vector); | 2089 | struct msix_entry *entry = &adapter->msix_entries[vector]; |
2263 | 2090 | ||
2264 | if (handler == &ixgbe_msix_clean_rx) { | 2091 | if (q_vector->tx.ring && q_vector->rx.ring) { |
2265 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2092 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2266 | "%s-%s-%d", netdev->name, "rx", ri++); | 2093 | "%s-%s-%d", netdev->name, "TxRx", ri++); |
2267 | } else if (handler == &ixgbe_msix_clean_tx) { | 2094 | ti++; |
2095 | } else if (q_vector->rx.ring) { | ||
2268 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2096 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2269 | "%s-%s-%d", netdev->name, "tx", ti++); | 2097 | "%s-%s-%d", netdev->name, "rx", ri++); |
2270 | } else if (handler == &ixgbe_msix_clean_many) { | 2098 | } else if (q_vector->tx.ring) { |
2271 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2099 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2272 | "%s-%s-%d", netdev->name, "TxRx", ri++); | 2100 | "%s-%s-%d", netdev->name, "tx", ti++); |
2273 | ti++; | ||
2274 | } else { | 2101 | } else { |
2275 | /* skip this unused q_vector */ | 2102 | /* skip this unused q_vector */ |
2276 | continue; | 2103 | continue; |
2277 | } | 2104 | } |
2278 | err = request_irq(adapter->msix_entries[vector].vector, | 2105 | err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, |
2279 | handler, 0, q_vector->name, | 2106 | q_vector->name, q_vector); |
2280 | q_vector); | ||
2281 | if (err) { | 2107 | if (err) { |
2282 | e_err(probe, "request_irq failed for MSIX interrupt " | 2108 | e_err(probe, "request_irq failed for MSIX interrupt " |
2283 | "Error: %d\n", err); | 2109 | "Error: %d\n", err); |
2284 | goto free_queue_irqs; | 2110 | goto free_queue_irqs; |
2285 | } | 2111 | } |
2112 | /* If Flow Director is enabled, set interrupt affinity */ | ||
2113 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
2114 | /* assign the mask for this irq */ | ||
2115 | irq_set_affinity_hint(entry->vector, | ||
2116 | q_vector->affinity_mask); | ||
2117 | } | ||
2286 | } | 2118 | } |
2287 | 2119 | ||
2288 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); | ||
2289 | err = request_irq(adapter->msix_entries[vector].vector, | 2120 | err = request_irq(adapter->msix_entries[vector].vector, |
2290 | ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter); | 2121 | ixgbe_msix_other, 0, netdev->name, adapter); |
2291 | if (err) { | 2122 | if (err) { |
2292 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); | 2123 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); |
2293 | goto free_queue_irqs; | 2124 | goto free_queue_irqs; |
@@ -2296,9 +2127,13 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2296 | return 0; | 2127 | return 0; |
2297 | 2128 | ||
2298 | free_queue_irqs: | 2129 | free_queue_irqs: |
2299 | for (i = vector - 1; i >= 0; i--) | 2130 | while (vector) { |
2300 | free_irq(adapter->msix_entries[--vector].vector, | 2131 | vector--; |
2301 | adapter->q_vector[i]); | 2132 | irq_set_affinity_hint(adapter->msix_entries[vector].vector, |
2133 | NULL); | ||
2134 | free_irq(adapter->msix_entries[vector].vector, | ||
2135 | adapter->q_vector[vector]); | ||
2136 | } | ||
2302 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 2137 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2303 | pci_disable_msix(adapter->pdev); | 2138 | pci_disable_msix(adapter->pdev); |
2304 | kfree(adapter->msix_entries); | 2139 | kfree(adapter->msix_entries); |
@@ -2307,47 +2142,6 @@ free_queue_irqs: | |||
2307 | } | 2142 | } |
2308 | 2143 | ||
2309 | /** | 2144 | /** |
2310 | * ixgbe_irq_enable - Enable default interrupt generation settings | ||
2311 | * @adapter: board private structure | ||
2312 | **/ | ||
2313 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, | ||
2314 | bool flush) | ||
2315 | { | ||
2316 | u32 mask; | ||
2317 | |||
2318 | mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); | ||
2319 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) | ||
2320 | mask |= IXGBE_EIMS_GPI_SDP0; | ||
2321 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | ||
2322 | mask |= IXGBE_EIMS_GPI_SDP1; | ||
2323 | switch (adapter->hw.mac.type) { | ||
2324 | case ixgbe_mac_82599EB: | ||
2325 | case ixgbe_mac_X540: | ||
2326 | mask |= IXGBE_EIMS_ECC; | ||
2327 | mask |= IXGBE_EIMS_GPI_SDP1; | ||
2328 | mask |= IXGBE_EIMS_GPI_SDP2; | ||
2329 | if (adapter->num_vfs) | ||
2330 | mask |= IXGBE_EIMS_MAILBOX; | ||
2331 | break; | ||
2332 | default: | ||
2333 | break; | ||
2334 | } | ||
2335 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
2336 | mask |= IXGBE_EIMS_FLOW_DIR; | ||
2337 | |||
2338 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | ||
2339 | if (queues) | ||
2340 | ixgbe_irq_enable_queues(adapter, ~0); | ||
2341 | if (flush) | ||
2342 | IXGBE_WRITE_FLUSH(&adapter->hw); | ||
2343 | |||
2344 | if (adapter->num_vfs > 32) { | ||
2345 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | ||
2346 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); | ||
2347 | } | ||
2348 | } | ||
2349 | |||
2350 | /** | ||
2351 | * ixgbe_intr - legacy mode Interrupt Handler | 2145 | * ixgbe_intr - legacy mode Interrupt Handler |
2352 | * @irq: interrupt number | 2146 | * @irq: interrupt number |
2353 | * @data: pointer to a network interface device structure | 2147 | * @data: pointer to a network interface device structure |
@@ -2455,19 +2249,25 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
2455 | struct net_device *netdev = adapter->netdev; | 2249 | struct net_device *netdev = adapter->netdev; |
2456 | int err; | 2250 | int err; |
2457 | 2251 | ||
2458 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2252 | /* map all of the rings to the q_vectors */ |
2253 | ixgbe_map_rings_to_vectors(adapter); | ||
2254 | |||
2255 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
2459 | err = ixgbe_request_msix_irqs(adapter); | 2256 | err = ixgbe_request_msix_irqs(adapter); |
2460 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | 2257 | else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) |
2461 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, | 2258 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, |
2462 | netdev->name, adapter); | 2259 | netdev->name, adapter); |
2463 | } else { | 2260 | else |
2464 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, | 2261 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, |
2465 | netdev->name, adapter); | 2262 | netdev->name, adapter); |
2466 | } | ||
2467 | 2263 | ||
2468 | if (err) | 2264 | if (err) { |
2469 | e_err(probe, "request_irq failed, Error %d\n", err); | 2265 | e_err(probe, "request_irq failed, Error %d\n", err); |
2470 | 2266 | ||
2267 | /* place q_vectors and rings back into a known good state */ | ||
2268 | ixgbe_reset_q_vectors(adapter); | ||
2269 | } | ||
2270 | |||
2471 | return err; | 2271 | return err; |
2472 | } | 2272 | } |
2473 | 2273 | ||
@@ -2477,25 +2277,29 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
2477 | int i, q_vectors; | 2277 | int i, q_vectors; |
2478 | 2278 | ||
2479 | q_vectors = adapter->num_msix_vectors; | 2279 | q_vectors = adapter->num_msix_vectors; |
2480 | |||
2481 | i = q_vectors - 1; | 2280 | i = q_vectors - 1; |
2482 | free_irq(adapter->msix_entries[i].vector, adapter); | 2281 | free_irq(adapter->msix_entries[i].vector, adapter); |
2483 | |||
2484 | i--; | 2282 | i--; |
2283 | |||
2485 | for (; i >= 0; i--) { | 2284 | for (; i >= 0; i--) { |
2486 | /* free only the irqs that were actually requested */ | 2285 | /* free only the irqs that were actually requested */ |
2487 | if (!adapter->q_vector[i]->rx.count && | 2286 | if (!adapter->q_vector[i]->rx.ring && |
2488 | !adapter->q_vector[i]->tx.count) | 2287 | !adapter->q_vector[i]->tx.ring) |
2489 | continue; | 2288 | continue; |
2490 | 2289 | ||
2290 | /* clear the affinity_mask in the IRQ descriptor */ | ||
2291 | irq_set_affinity_hint(adapter->msix_entries[i].vector, | ||
2292 | NULL); | ||
2293 | |||
2491 | free_irq(adapter->msix_entries[i].vector, | 2294 | free_irq(adapter->msix_entries[i].vector, |
2492 | adapter->q_vector[i]); | 2295 | adapter->q_vector[i]); |
2493 | } | 2296 | } |
2494 | |||
2495 | ixgbe_reset_q_vectors(adapter); | ||
2496 | } else { | 2297 | } else { |
2497 | free_irq(adapter->pdev->irq, adapter); | 2298 | free_irq(adapter->pdev->irq, adapter); |
2498 | } | 2299 | } |
2300 | |||
2301 | /* clear q_vector state information */ | ||
2302 | ixgbe_reset_q_vectors(adapter); | ||
2499 | } | 2303 | } |
2500 | 2304 | ||
2501 | /** | 2305 | /** |
@@ -2513,8 +2317,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
2513 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | 2317 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
2514 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | 2318 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
2515 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); | 2319 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
2516 | if (adapter->num_vfs > 32) | ||
2517 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); | ||
2518 | break; | 2320 | break; |
2519 | default: | 2321 | default: |
2520 | break; | 2322 | break; |
@@ -2543,9 +2345,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
2543 | ixgbe_set_ivar(adapter, 0, 0, 0); | 2345 | ixgbe_set_ivar(adapter, 0, 0, 0); |
2544 | ixgbe_set_ivar(adapter, 1, 0, 0); | 2346 | ixgbe_set_ivar(adapter, 1, 0, 0); |
2545 | 2347 | ||
2546 | map_vector_to_rxq(adapter, 0, 0); | ||
2547 | map_vector_to_txq(adapter, 0, 0); | ||
2548 | |||
2549 | e_info(hw, "Legacy interrupt IVAR setup done\n"); | 2348 | e_info(hw, "Legacy interrupt IVAR setup done\n"); |
2550 | } | 2349 | } |
2551 | 2350 | ||
@@ -2562,13 +2361,11 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2562 | struct ixgbe_hw *hw = &adapter->hw; | 2361 | struct ixgbe_hw *hw = &adapter->hw; |
2563 | u64 tdba = ring->dma; | 2362 | u64 tdba = ring->dma; |
2564 | int wait_loop = 10; | 2363 | int wait_loop = 10; |
2565 | u32 txdctl; | 2364 | u32 txdctl = IXGBE_TXDCTL_ENABLE; |
2566 | u8 reg_idx = ring->reg_idx; | 2365 | u8 reg_idx = ring->reg_idx; |
2567 | 2366 | ||
2568 | /* disable queue to avoid issues while updating state */ | 2367 | /* disable queue to avoid issues while updating state */ |
2569 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | 2368 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); |
2570 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), | ||
2571 | txdctl & ~IXGBE_TXDCTL_ENABLE); | ||
2572 | IXGBE_WRITE_FLUSH(hw); | 2369 | IXGBE_WRITE_FLUSH(hw); |
2573 | 2370 | ||
2574 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), | 2371 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), |
@@ -2580,18 +2377,22 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2580 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); | 2377 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); |
2581 | ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); | 2378 | ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); |
2582 | 2379 | ||
2583 | /* configure fetching thresholds */ | 2380 | /* |
2584 | if (adapter->rx_itr_setting == 0) { | 2381 | * set WTHRESH to encourage burst writeback, it should not be set |
2585 | /* cannot set wthresh when itr==0 */ | 2382 | * higher than 1 when ITR is 0 as it could cause false TX hangs |
2586 | txdctl &= ~0x007F0000; | 2383 | * |
2587 | } else { | 2384 | * In order to avoid issues WTHRESH + PTHRESH should always be equal |
2588 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | 2385 | * to or less than the number of on chip descriptors, which is |
2589 | txdctl |= (8 << 16); | 2386 | * currently 40. |
2590 | } | 2387 | */ |
2591 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 2388 | if (!adapter->tx_itr_setting || !adapter->rx_itr_setting) |
2592 | /* PThresh workaround for Tx hang with DFP enabled. */ | 2389 | txdctl |= (1 << 16); /* WTHRESH = 1 */ |
2593 | txdctl |= 32; | 2390 | else |
2594 | } | 2391 | txdctl |= (8 << 16); /* WTHRESH = 8 */ |
2392 | |||
2393 | /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */ | ||
2394 | txdctl |= (1 << 8) | /* HTHRESH = 1 */ | ||
2395 | 32; /* PTHRESH = 32 */ | ||
2595 | 2396 | ||
2596 | /* reinitialize flowdirector state */ | 2397 | /* reinitialize flowdirector state */ |
2597 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && | 2398 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && |
@@ -2606,7 +2407,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2606 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); | 2407 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); |
2607 | 2408 | ||
2608 | /* enable queue */ | 2409 | /* enable queue */ |
2609 | txdctl |= IXGBE_TXDCTL_ENABLE; | ||
2610 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); | 2410 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); |
2611 | 2411 | ||
2612 | /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | 2412 | /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ |
@@ -3478,19 +3278,8 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | |||
3478 | q_vectors = 1; | 3278 | q_vectors = 1; |
3479 | 3279 | ||
3480 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | 3280 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { |
3481 | struct napi_struct *napi; | ||
3482 | q_vector = adapter->q_vector[q_idx]; | 3281 | q_vector = adapter->q_vector[q_idx]; |
3483 | napi = &q_vector->napi; | 3282 | napi_enable(&q_vector->napi); |
3484 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3485 | if (!q_vector->rx.count || !q_vector->tx.count) { | ||
3486 | if (q_vector->tx.count == 1) | ||
3487 | napi->poll = &ixgbe_clean_txonly; | ||
3488 | else if (q_vector->rx.count == 1) | ||
3489 | napi->poll = &ixgbe_clean_rxonly; | ||
3490 | } | ||
3491 | } | ||
3492 | |||
3493 | napi_enable(napi); | ||
3494 | } | 3283 | } |
3495 | } | 3284 | } |
3496 | 3285 | ||
@@ -4045,7 +3834,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4045 | struct ixgbe_hw *hw = &adapter->hw; | 3834 | struct ixgbe_hw *hw = &adapter->hw; |
4046 | u32 rxctrl; | 3835 | u32 rxctrl; |
4047 | int i; | 3836 | int i; |
4048 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
4049 | 3837 | ||
4050 | /* signal that we are down to the interrupt handler */ | 3838 | /* signal that we are down to the interrupt handler */ |
4051 | set_bit(__IXGBE_DOWN, &adapter->state); | 3839 | set_bit(__IXGBE_DOWN, &adapter->state); |
@@ -4077,26 +3865,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4077 | 3865 | ||
4078 | del_timer_sync(&adapter->service_timer); | 3866 | del_timer_sync(&adapter->service_timer); |
4079 | 3867 | ||
4080 | /* disable receive for all VFs and wait one second */ | ||
4081 | if (adapter->num_vfs) { | 3868 | if (adapter->num_vfs) { |
4082 | /* ping all the active vfs to let them know we are going down */ | 3869 | /* Clear EITR Select mapping */ |
4083 | ixgbe_ping_all_vfs(adapter); | 3870 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); |
4084 | |||
4085 | /* Disable all VFTE/VFRE TX/RX */ | ||
4086 | ixgbe_disable_tx_rx(adapter); | ||
4087 | 3871 | ||
4088 | /* Mark all the VFs as inactive */ | 3872 | /* Mark all the VFs as inactive */ |
4089 | for (i = 0 ; i < adapter->num_vfs; i++) | 3873 | for (i = 0 ; i < adapter->num_vfs; i++) |
4090 | adapter->vfinfo[i].clear_to_send = 0; | 3874 | adapter->vfinfo[i].clear_to_send = 0; |
4091 | } | ||
4092 | 3875 | ||
4093 | /* Cleanup the affinity_hint CPU mask memory and callback */ | 3876 | /* ping all the active vfs to let them know we are going down */ |
4094 | for (i = 0; i < num_q_vectors; i++) { | 3877 | ixgbe_ping_all_vfs(adapter); |
4095 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | 3878 | |
4096 | /* clear the affinity_mask in the IRQ descriptor */ | 3879 | /* Disable all VFTE/VFRE TX/RX */ |
4097 | irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); | 3880 | ixgbe_disable_tx_rx(adapter); |
4098 | /* release the CPU mask memory */ | ||
4099 | free_cpumask_var(q_vector->affinity_mask); | ||
4100 | } | 3881 | } |
4101 | 3882 | ||
4102 | /* disable transmits in the hardware now that interrupts are off */ | 3883 | /* disable transmits in the hardware now that interrupts are off */ |
@@ -4148,28 +3929,41 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
4148 | struct ixgbe_q_vector *q_vector = | 3929 | struct ixgbe_q_vector *q_vector = |
4149 | container_of(napi, struct ixgbe_q_vector, napi); | 3930 | container_of(napi, struct ixgbe_q_vector, napi); |
4150 | struct ixgbe_adapter *adapter = q_vector->adapter; | 3931 | struct ixgbe_adapter *adapter = q_vector->adapter; |
4151 | int tx_clean_complete, work_done = 0; | 3932 | struct ixgbe_ring *ring; |
3933 | int per_ring_budget; | ||
3934 | bool clean_complete = true; | ||
4152 | 3935 | ||
4153 | #ifdef CONFIG_IXGBE_DCA | 3936 | #ifdef CONFIG_IXGBE_DCA |
4154 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 3937 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
4155 | ixgbe_update_dca(q_vector); | 3938 | ixgbe_update_dca(q_vector); |
4156 | #endif | 3939 | #endif |
4157 | 3940 | ||
4158 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); | 3941 | for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) |
4159 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); | 3942 | clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); |
4160 | 3943 | ||
4161 | if (!tx_clean_complete) | 3944 | /* attempt to distribute budget to each queue fairly, but don't allow |
4162 | work_done = budget; | 3945 | * the budget to go below 1 because we'll exit polling */ |
3946 | if (q_vector->rx.count > 1) | ||
3947 | per_ring_budget = max(budget/q_vector->rx.count, 1); | ||
3948 | else | ||
3949 | per_ring_budget = budget; | ||
4163 | 3950 | ||
4164 | /* If budget not fully consumed, exit the polling mode */ | 3951 | for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) |
4165 | if (work_done < budget) { | 3952 | clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, |
4166 | napi_complete(napi); | 3953 | per_ring_budget); |
4167 | if (adapter->rx_itr_setting & 1) | 3954 | |
4168 | ixgbe_set_itr(q_vector); | 3955 | /* If all work not completed, return budget and keep polling */ |
4169 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 3956 | if (!clean_complete) |
4170 | ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); | 3957 | return budget; |
4171 | } | 3958 | |
4172 | return work_done; | 3959 | /* all work done, exit the polling mode */ |
3960 | napi_complete(napi); | ||
3961 | if (adapter->rx_itr_setting & 1) | ||
3962 | ixgbe_set_itr(q_vector); | ||
3963 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
3964 | ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
3965 | |||
3966 | return 0; | ||
4173 | } | 3967 | } |
4174 | 3968 | ||
4175 | /** | 3969 | /** |
@@ -4810,19 +4604,15 @@ out: | |||
4810 | **/ | 4604 | **/ |
4811 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | 4605 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) |
4812 | { | 4606 | { |
4813 | int q_idx, num_q_vectors; | 4607 | int v_idx, num_q_vectors; |
4814 | struct ixgbe_q_vector *q_vector; | 4608 | struct ixgbe_q_vector *q_vector; |
4815 | int (*poll)(struct napi_struct *, int); | ||
4816 | 4609 | ||
4817 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 4610 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
4818 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4611 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4819 | poll = &ixgbe_clean_rxtx_many; | 4612 | else |
4820 | } else { | ||
4821 | num_q_vectors = 1; | 4613 | num_q_vectors = 1; |
4822 | poll = &ixgbe_poll; | ||
4823 | } | ||
4824 | 4614 | ||
4825 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | 4615 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
4826 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), | 4616 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), |
4827 | GFP_KERNEL, adapter->node); | 4617 | GFP_KERNEL, adapter->node); |
4828 | if (!q_vector) | 4618 | if (!q_vector) |
@@ -4830,25 +4620,35 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
4830 | GFP_KERNEL); | 4620 | GFP_KERNEL); |
4831 | if (!q_vector) | 4621 | if (!q_vector) |
4832 | goto err_out; | 4622 | goto err_out; |
4623 | |||
4833 | q_vector->adapter = adapter; | 4624 | q_vector->adapter = adapter; |
4625 | q_vector->v_idx = v_idx; | ||
4626 | |||
4627 | /* Allocate the affinity_hint cpumask, configure the mask */ | ||
4628 | if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) | ||
4629 | goto err_out; | ||
4630 | cpumask_set_cpu(v_idx, q_vector->affinity_mask); | ||
4631 | |||
4834 | if (q_vector->tx.count && !q_vector->rx.count) | 4632 | if (q_vector->tx.count && !q_vector->rx.count) |
4835 | q_vector->eitr = adapter->tx_eitr_param; | 4633 | q_vector->eitr = adapter->tx_eitr_param; |
4836 | else | 4634 | else |
4837 | q_vector->eitr = adapter->rx_eitr_param; | 4635 | q_vector->eitr = adapter->rx_eitr_param; |
4838 | q_vector->v_idx = q_idx; | 4636 | |
4839 | netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); | 4637 | netif_napi_add(adapter->netdev, &q_vector->napi, |
4840 | adapter->q_vector[q_idx] = q_vector; | 4638 | ixgbe_poll, 64); |
4639 | adapter->q_vector[v_idx] = q_vector; | ||
4841 | } | 4640 | } |
4842 | 4641 | ||
4843 | return 0; | 4642 | return 0; |
4844 | 4643 | ||
4845 | err_out: | 4644 | err_out: |
4846 | while (q_idx) { | 4645 | while (v_idx) { |
4847 | q_idx--; | 4646 | v_idx--; |
4848 | q_vector = adapter->q_vector[q_idx]; | 4647 | q_vector = adapter->q_vector[v_idx]; |
4849 | netif_napi_del(&q_vector->napi); | 4648 | netif_napi_del(&q_vector->napi); |
4649 | free_cpumask_var(q_vector->affinity_mask); | ||
4850 | kfree(q_vector); | 4650 | kfree(q_vector); |
4851 | adapter->q_vector[q_idx] = NULL; | 4651 | adapter->q_vector[v_idx] = NULL; |
4852 | } | 4652 | } |
4853 | return -ENOMEM; | 4653 | return -ENOMEM; |
4854 | } | 4654 | } |
@@ -4863,17 +4663,18 @@ err_out: | |||
4863 | **/ | 4663 | **/ |
4864 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | 4664 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) |
4865 | { | 4665 | { |
4866 | int q_idx, num_q_vectors; | 4666 | int v_idx, num_q_vectors; |
4867 | 4667 | ||
4868 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 4668 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
4869 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4669 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4870 | else | 4670 | else |
4871 | num_q_vectors = 1; | 4671 | num_q_vectors = 1; |
4872 | 4672 | ||
4873 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | 4673 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
4874 | struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; | 4674 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; |
4875 | adapter->q_vector[q_idx] = NULL; | 4675 | adapter->q_vector[v_idx] = NULL; |
4876 | netif_napi_del(&q_vector->napi); | 4676 | netif_napi_del(&q_vector->napi); |
4677 | free_cpumask_var(q_vector->affinity_mask); | ||
4877 | kfree(q_vector); | 4678 | kfree(q_vector); |
4878 | } | 4679 | } |
4879 | } | 4680 | } |
@@ -5091,7 +4892,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
5091 | adapter->rx_ring_count = IXGBE_DEFAULT_RXD; | 4892 | adapter->rx_ring_count = IXGBE_DEFAULT_RXD; |
5092 | 4893 | ||
5093 | /* set default work limits */ | 4894 | /* set default work limits */ |
5094 | adapter->tx_work_limit = adapter->tx_ring_count; | 4895 | adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; |
5095 | 4896 | ||
5096 | /* initialize eeprom parameters */ | 4897 | /* initialize eeprom parameters */ |
5097 | if (ixgbe_init_eeprom_params_generic(hw)) { | 4898 | if (ixgbe_init_eeprom_params_generic(hw)) { |
@@ -6959,7 +6760,7 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
6959 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 6760 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
6960 | for (i = 0; i < num_q_vectors; i++) { | 6761 | for (i = 0; i < num_q_vectors; i++) { |
6961 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | 6762 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
6962 | ixgbe_msix_clean_many(0, q_vector); | 6763 | ixgbe_msix_clean_rings(0, q_vector); |
6963 | } | 6764 | } |
6964 | } else { | 6765 | } else { |
6965 | ixgbe_intr(adapter->pdev->irq, netdev); | 6766 | ixgbe_intr(adapter->pdev->irq, netdev); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9f618ee7d333..a9f8839bffb9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | |||
@@ -982,6 +982,7 @@ | |||
982 | #define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ | 982 | #define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ |
983 | #define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ | 983 | #define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ |
984 | #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ | 984 | #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ |
985 | #define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) | ||
985 | 986 | ||
986 | /* FACTPS */ | 987 | /* FACTPS */ |
987 | #define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ | 988 | #define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 2696c78e9f46..bbfe8c40a784 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | |||
@@ -94,13 +94,8 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, | |||
94 | static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) | 94 | static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) |
95 | { | 95 | { |
96 | ixgbe_link_speed link_speed; | 96 | ixgbe_link_speed link_speed; |
97 | s32 status = 0; | 97 | s32 status; |
98 | u32 ctrl; | 98 | u32 ctrl, i; |
99 | u32 ctrl_ext; | ||
100 | u32 reset_bit; | ||
101 | u32 i; | ||
102 | u32 autoc; | ||
103 | u32 autoc2; | ||
104 | bool link_up = false; | 99 | bool link_up = false; |
105 | 100 | ||
106 | /* Call adapter stop to disable tx/rx and clear interrupts */ | 101 | /* Call adapter stop to disable tx/rx and clear interrupts */ |
@@ -119,53 +114,42 @@ mac_reset_top: | |||
119 | * mng is using it. If link is down or the flag to force full link | 114 | * mng is using it. If link is down or the flag to force full link |
120 | * reset is set, then perform link reset. | 115 | * reset is set, then perform link reset. |
121 | */ | 116 | */ |
122 | if (hw->force_full_reset) { | 117 | ctrl = IXGBE_CTRL_LNK_RST; |
123 | reset_bit = IXGBE_CTRL_LNK_RST; | 118 | if (!hw->force_full_reset) { |
124 | } else { | ||
125 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | 119 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
126 | if (!link_up) | 120 | if (link_up) |
127 | reset_bit = IXGBE_CTRL_LNK_RST; | 121 | ctrl = IXGBE_CTRL_RST; |
128 | else | ||
129 | reset_bit = IXGBE_CTRL_RST; | ||
130 | } | 122 | } |
131 | 123 | ||
132 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 124 | ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); |
133 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); | 125 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); |
134 | IXGBE_WRITE_FLUSH(hw); | 126 | IXGBE_WRITE_FLUSH(hw); |
135 | 127 | ||
136 | /* Poll for reset bit to self-clear indicating reset is complete */ | 128 | /* Poll for reset bit to self-clear indicating reset is complete */ |
137 | for (i = 0; i < 10; i++) { | 129 | for (i = 0; i < 10; i++) { |
138 | udelay(1); | 130 | udelay(1); |
139 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 131 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
140 | if (!(ctrl & reset_bit)) | 132 | if (!(ctrl & IXGBE_CTRL_RST_MASK)) |
141 | break; | 133 | break; |
142 | } | 134 | } |
143 | if (ctrl & reset_bit) { | 135 | |
136 | if (ctrl & IXGBE_CTRL_RST_MASK) { | ||
144 | status = IXGBE_ERR_RESET_FAILED; | 137 | status = IXGBE_ERR_RESET_FAILED; |
145 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 138 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
146 | } | 139 | } |
147 | 140 | ||
141 | msleep(50); | ||
142 | |||
148 | /* | 143 | /* |
149 | * Double resets are required for recovery from certain error | 144 | * Double resets are required for recovery from certain error |
150 | * conditions. Between resets, it is necessary to stall to allow time | 145 | * conditions. Between resets, it is necessary to stall to allow time |
151 | * for any pending HW events to complete. We use 1usec since that is | 146 | * for any pending HW events to complete. |
152 | * what is needed for ixgbe_disable_pcie_master(). The second reset | ||
153 | * then clears out any effects of those events. | ||
154 | */ | 147 | */ |
155 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { | 148 | if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { |
156 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; | 149 | hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; |
157 | udelay(1); | ||
158 | goto mac_reset_top; | 150 | goto mac_reset_top; |
159 | } | 151 | } |
160 | 152 | ||
161 | /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ | ||
162 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | ||
163 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | ||
164 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | ||
165 | IXGBE_WRITE_FLUSH(hw); | ||
166 | |||
167 | msleep(50); | ||
168 | |||
169 | /* Set the Rx packet buffer size. */ | 153 | /* Set the Rx packet buffer size. */ |
170 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); | 154 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); |
171 | 155 | ||
@@ -173,31 +157,6 @@ mac_reset_top: | |||
173 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); | 157 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); |
174 | 158 | ||
175 | /* | 159 | /* |
176 | * Store the original AUTOC/AUTOC2 values if they have not been | ||
177 | * stored off yet. Otherwise restore the stored original | ||
178 | * values since the reset operation sets back to defaults. | ||
179 | */ | ||
180 | autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
181 | autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | ||
182 | if (hw->mac.orig_link_settings_stored == false) { | ||
183 | hw->mac.orig_autoc = autoc; | ||
184 | hw->mac.orig_autoc2 = autoc2; | ||
185 | hw->mac.orig_link_settings_stored = true; | ||
186 | } else { | ||
187 | if (autoc != hw->mac.orig_autoc) | ||
188 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | | ||
189 | IXGBE_AUTOC_AN_RESTART)); | ||
190 | |||
191 | if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != | ||
192 | (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { | ||
193 | autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; | ||
194 | autoc2 |= (hw->mac.orig_autoc2 & | ||
195 | IXGBE_AUTOC2_UPPER_MASK); | ||
196 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Store MAC address from RAR0, clear receive address registers, and | 160 | * Store MAC address from RAR0, clear receive address registers, and |
202 | * clear the multicast table. Also reset num_rar_entries to 128, | 161 | * clear the multicast table. Also reset num_rar_entries to 128, |
203 | * since we modify this value when programming the SAN MAC address. | 162 | * since we modify this value when programming the SAN MAC address. |
@@ -205,9 +164,6 @@ mac_reset_top: | |||
205 | hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; | 164 | hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; |
206 | hw->mac.ops.init_rx_addrs(hw); | 165 | hw->mac.ops.init_rx_addrs(hw); |
207 | 166 | ||
208 | /* Store the permanent mac address */ | ||
209 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); | ||
210 | |||
211 | /* Store the permanent SAN mac address */ | 167 | /* Store the permanent SAN mac address */ |
212 | hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); | 168 | hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); |
213 | 169 | ||