diff options
Diffstat (limited to 'drivers/net/igb')
-rw-r--r-- | drivers/net/igb/e1000_82575.c | 603 | ||||
-rw-r--r-- | drivers/net/igb/e1000_82575.h | 37 | ||||
-rw-r--r-- | drivers/net/igb/e1000_defines.h | 57 | ||||
-rw-r--r-- | drivers/net/igb/e1000_hw.h | 30 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mac.c | 176 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mac.h | 2 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mbx.c | 82 | ||||
-rw-r--r-- | drivers/net/igb/e1000_mbx.h | 10 | ||||
-rw-r--r-- | drivers/net/igb/e1000_nvm.c | 36 | ||||
-rw-r--r-- | drivers/net/igb/e1000_phy.c | 479 | ||||
-rw-r--r-- | drivers/net/igb/e1000_phy.h | 39 | ||||
-rw-r--r-- | drivers/net/igb/e1000_regs.h | 81 | ||||
-rw-r--r-- | drivers/net/igb/igb.h | 160 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 838 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 3738 |
15 files changed, 3868 insertions, 2500 deletions
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index f8f5772557ce..4a32bed77c71 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -30,7 +30,6 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/slab.h> | ||
34 | #include <linux/if_ether.h> | 33 | #include <linux/if_ether.h> |
35 | 34 | ||
36 | #include "e1000_mac.h" | 35 | #include "e1000_mac.h" |
@@ -46,7 +45,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); | |||
46 | static s32 igb_init_hw_82575(struct e1000_hw *); | 45 | static s32 igb_init_hw_82575(struct e1000_hw *); |
47 | static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); | 46 | static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); |
48 | static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); | 47 | static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); |
48 | static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); | ||
49 | static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); | ||
49 | static s32 igb_reset_hw_82575(struct e1000_hw *); | 50 | static s32 igb_reset_hw_82575(struct e1000_hw *); |
51 | static s32 igb_reset_hw_82580(struct e1000_hw *); | ||
50 | static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); | 52 | static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); |
51 | static s32 igb_setup_copper_link_82575(struct e1000_hw *); | 53 | static s32 igb_setup_copper_link_82575(struct e1000_hw *); |
52 | static s32 igb_setup_serdes_link_82575(struct e1000_hw *); | 54 | static s32 igb_setup_serdes_link_82575(struct e1000_hw *); |
@@ -62,6 +64,12 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *); | |||
62 | static s32 igb_read_mac_addr_82575(struct e1000_hw *); | 64 | static s32 igb_read_mac_addr_82575(struct e1000_hw *); |
63 | static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); | 65 | static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); |
64 | 66 | ||
67 | static const u16 e1000_82580_rxpbs_table[] = | ||
68 | { 36, 72, 144, 1, 2, 4, 8, 16, | ||
69 | 35, 70, 140 }; | ||
70 | #define E1000_82580_RXPBS_TABLE_SIZE \ | ||
71 | (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) | ||
72 | |||
65 | static s32 igb_get_invariants_82575(struct e1000_hw *hw) | 73 | static s32 igb_get_invariants_82575(struct e1000_hw *hw) |
66 | { | 74 | { |
67 | struct e1000_phy_info *phy = &hw->phy; | 75 | struct e1000_phy_info *phy = &hw->phy; |
@@ -81,12 +89,21 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
81 | break; | 89 | break; |
82 | case E1000_DEV_ID_82576: | 90 | case E1000_DEV_ID_82576: |
83 | case E1000_DEV_ID_82576_NS: | 91 | case E1000_DEV_ID_82576_NS: |
92 | case E1000_DEV_ID_82576_NS_SERDES: | ||
84 | case E1000_DEV_ID_82576_FIBER: | 93 | case E1000_DEV_ID_82576_FIBER: |
85 | case E1000_DEV_ID_82576_SERDES: | 94 | case E1000_DEV_ID_82576_SERDES: |
86 | case E1000_DEV_ID_82576_QUAD_COPPER: | 95 | case E1000_DEV_ID_82576_QUAD_COPPER: |
96 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
87 | case E1000_DEV_ID_82576_SERDES_QUAD: | 97 | case E1000_DEV_ID_82576_SERDES_QUAD: |
88 | mac->type = e1000_82576; | 98 | mac->type = e1000_82576; |
89 | break; | 99 | break; |
100 | case E1000_DEV_ID_82580_COPPER: | ||
101 | case E1000_DEV_ID_82580_FIBER: | ||
102 | case E1000_DEV_ID_82580_SERDES: | ||
103 | case E1000_DEV_ID_82580_SGMII: | ||
104 | case E1000_DEV_ID_82580_COPPER_DUAL: | ||
105 | mac->type = e1000_82580; | ||
106 | break; | ||
90 | default: | 107 | default: |
91 | return -E1000_ERR_MAC_INIT; | 108 | return -E1000_ERR_MAC_INIT; |
92 | break; | 109 | break; |
@@ -109,6 +126,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
109 | dev_spec->sgmii_active = true; | 126 | dev_spec->sgmii_active = true; |
110 | ctrl_ext |= E1000_CTRL_I2C_ENA; | 127 | ctrl_ext |= E1000_CTRL_I2C_ENA; |
111 | break; | 128 | break; |
129 | case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: | ||
112 | case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: | 130 | case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: |
113 | hw->phy.media_type = e1000_media_type_internal_serdes; | 131 | hw->phy.media_type = e1000_media_type_internal_serdes; |
114 | ctrl_ext |= E1000_CTRL_I2C_ENA; | 132 | ctrl_ext |= E1000_CTRL_I2C_ENA; |
@@ -120,12 +138,26 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
120 | 138 | ||
121 | wr32(E1000_CTRL_EXT, ctrl_ext); | 139 | wr32(E1000_CTRL_EXT, ctrl_ext); |
122 | 140 | ||
141 | /* | ||
142 | * if using i2c make certain the MDICNFG register is cleared to prevent | ||
143 | * communications from being misrouted to the mdic registers | ||
144 | */ | ||
145 | if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580)) | ||
146 | wr32(E1000_MDICNFG, 0); | ||
147 | |||
123 | /* Set mta register count */ | 148 | /* Set mta register count */ |
124 | mac->mta_reg_count = 128; | 149 | mac->mta_reg_count = 128; |
125 | /* Set rar entry count */ | 150 | /* Set rar entry count */ |
126 | mac->rar_entry_count = E1000_RAR_ENTRIES_82575; | 151 | mac->rar_entry_count = E1000_RAR_ENTRIES_82575; |
127 | if (mac->type == e1000_82576) | 152 | if (mac->type == e1000_82576) |
128 | mac->rar_entry_count = E1000_RAR_ENTRIES_82576; | 153 | mac->rar_entry_count = E1000_RAR_ENTRIES_82576; |
154 | if (mac->type == e1000_82580) | ||
155 | mac->rar_entry_count = E1000_RAR_ENTRIES_82580; | ||
156 | /* reset */ | ||
157 | if (mac->type == e1000_82580) | ||
158 | mac->ops.reset_hw = igb_reset_hw_82580; | ||
159 | else | ||
160 | mac->ops.reset_hw = igb_reset_hw_82575; | ||
129 | /* Set if part includes ASF firmware */ | 161 | /* Set if part includes ASF firmware */ |
130 | mac->asf_firmware_present = true; | 162 | mac->asf_firmware_present = true; |
131 | /* Set if manageability features are enabled. */ | 163 | /* Set if manageability features are enabled. */ |
@@ -193,6 +225,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
193 | phy->ops.reset = igb_phy_hw_reset_sgmii_82575; | 225 | phy->ops.reset = igb_phy_hw_reset_sgmii_82575; |
194 | phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; | 226 | phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; |
195 | phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; | 227 | phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; |
228 | } else if (hw->mac.type == e1000_82580) { | ||
229 | phy->ops.reset = igb_phy_hw_reset; | ||
230 | phy->ops.read_reg = igb_read_phy_reg_82580; | ||
231 | phy->ops.write_reg = igb_write_phy_reg_82580; | ||
196 | } else { | 232 | } else { |
197 | phy->ops.reset = igb_phy_hw_reset; | 233 | phy->ops.reset = igb_phy_hw_reset; |
198 | phy->ops.read_reg = igb_read_phy_reg_igp; | 234 | phy->ops.read_reg = igb_read_phy_reg_igp; |
@@ -224,6 +260,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
224 | phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; | 260 | phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; |
225 | phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; | 261 | phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; |
226 | break; | 262 | break; |
263 | case I82580_I_PHY_ID: | ||
264 | phy->type = e1000_phy_82580; | ||
265 | phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; | ||
266 | phy->ops.get_cable_length = igb_get_cable_length_82580; | ||
267 | phy->ops.get_phy_info = igb_get_phy_info_82580; | ||
268 | break; | ||
227 | default: | 269 | default: |
228 | return -E1000_ERR_PHY; | 270 | return -E1000_ERR_PHY; |
229 | } | 271 | } |
@@ -240,9 +282,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
240 | **/ | 282 | **/ |
241 | static s32 igb_acquire_phy_82575(struct e1000_hw *hw) | 283 | static s32 igb_acquire_phy_82575(struct e1000_hw *hw) |
242 | { | 284 | { |
243 | u16 mask; | 285 | u16 mask = E1000_SWFW_PHY0_SM; |
244 | 286 | ||
245 | mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; | 287 | if (hw->bus.func == E1000_FUNC_1) |
288 | mask = E1000_SWFW_PHY1_SM; | ||
246 | 289 | ||
247 | return igb_acquire_swfw_sync_82575(hw, mask); | 290 | return igb_acquire_swfw_sync_82575(hw, mask); |
248 | } | 291 | } |
@@ -256,9 +299,11 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) | |||
256 | **/ | 299 | **/ |
257 | static void igb_release_phy_82575(struct e1000_hw *hw) | 300 | static void igb_release_phy_82575(struct e1000_hw *hw) |
258 | { | 301 | { |
259 | u16 mask; | 302 | u16 mask = E1000_SWFW_PHY0_SM; |
303 | |||
304 | if (hw->bus.func == E1000_FUNC_1) | ||
305 | mask = E1000_SWFW_PHY1_SM; | ||
260 | 306 | ||
261 | mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; | ||
262 | igb_release_swfw_sync_82575(hw, mask); | 307 | igb_release_swfw_sync_82575(hw, mask); |
263 | } | 308 | } |
264 | 309 | ||
@@ -274,45 +319,23 @@ static void igb_release_phy_82575(struct e1000_hw *hw) | |||
274 | static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, | 319 | static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, |
275 | u16 *data) | 320 | u16 *data) |
276 | { | 321 | { |
277 | struct e1000_phy_info *phy = &hw->phy; | 322 | s32 ret_val = -E1000_ERR_PARAM; |
278 | u32 i, i2ccmd = 0; | ||
279 | 323 | ||
280 | if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { | 324 | if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { |
281 | hw_dbg("PHY Address %u is out of range\n", offset); | 325 | hw_dbg("PHY Address %u is out of range\n", offset); |
282 | return -E1000_ERR_PARAM; | 326 | goto out; |
283 | } | 327 | } |
284 | 328 | ||
285 | /* | 329 | ret_val = hw->phy.ops.acquire(hw); |
286 | * Set up Op-code, Phy Address, and register address in the I2CCMD | 330 | if (ret_val) |
287 | * register. The MAC will take care of interfacing with the | 331 | goto out; |
288 | * PHY to retrieve the desired data. | ||
289 | */ | ||
290 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | | ||
291 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | | ||
292 | (E1000_I2CCMD_OPCODE_READ)); | ||
293 | |||
294 | wr32(E1000_I2CCMD, i2ccmd); | ||
295 | 332 | ||
296 | /* Poll the ready bit to see if the I2C read completed */ | 333 | ret_val = igb_read_phy_reg_i2c(hw, offset, data); |
297 | for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { | ||
298 | udelay(50); | ||
299 | i2ccmd = rd32(E1000_I2CCMD); | ||
300 | if (i2ccmd & E1000_I2CCMD_READY) | ||
301 | break; | ||
302 | } | ||
303 | if (!(i2ccmd & E1000_I2CCMD_READY)) { | ||
304 | hw_dbg("I2CCMD Read did not complete\n"); | ||
305 | return -E1000_ERR_PHY; | ||
306 | } | ||
307 | if (i2ccmd & E1000_I2CCMD_ERROR) { | ||
308 | hw_dbg("I2CCMD Error bit set\n"); | ||
309 | return -E1000_ERR_PHY; | ||
310 | } | ||
311 | 334 | ||
312 | /* Need to byte-swap the 16-bit value. */ | 335 | hw->phy.ops.release(hw); |
313 | *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); | ||
314 | 336 | ||
315 | return 0; | 337 | out: |
338 | return ret_val; | ||
316 | } | 339 | } |
317 | 340 | ||
318 | /** | 341 | /** |
@@ -327,47 +350,24 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, | |||
327 | static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, | 350 | static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, |
328 | u16 data) | 351 | u16 data) |
329 | { | 352 | { |
330 | struct e1000_phy_info *phy = &hw->phy; | 353 | s32 ret_val = -E1000_ERR_PARAM; |
331 | u32 i, i2ccmd = 0; | 354 | |
332 | u16 phy_data_swapped; | ||
333 | 355 | ||
334 | if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { | 356 | if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { |
335 | hw_dbg("PHY Address %d is out of range\n", offset); | 357 | hw_dbg("PHY Address %d is out of range\n", offset); |
336 | return -E1000_ERR_PARAM; | 358 | goto out; |
337 | } | 359 | } |
338 | 360 | ||
339 | /* Swap the data bytes for the I2C interface */ | 361 | ret_val = hw->phy.ops.acquire(hw); |
340 | phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); | 362 | if (ret_val) |
363 | goto out; | ||
341 | 364 | ||
342 | /* | 365 | ret_val = igb_write_phy_reg_i2c(hw, offset, data); |
343 | * Set up Op-code, Phy Address, and register address in the I2CCMD | ||
344 | * register. The MAC will take care of interfacing with the | ||
345 | * PHY to retrieve the desired data. | ||
346 | */ | ||
347 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | | ||
348 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | | ||
349 | E1000_I2CCMD_OPCODE_WRITE | | ||
350 | phy_data_swapped); | ||
351 | |||
352 | wr32(E1000_I2CCMD, i2ccmd); | ||
353 | |||
354 | /* Poll the ready bit to see if the I2C read completed */ | ||
355 | for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { | ||
356 | udelay(50); | ||
357 | i2ccmd = rd32(E1000_I2CCMD); | ||
358 | if (i2ccmd & E1000_I2CCMD_READY) | ||
359 | break; | ||
360 | } | ||
361 | if (!(i2ccmd & E1000_I2CCMD_READY)) { | ||
362 | hw_dbg("I2CCMD Write did not complete\n"); | ||
363 | return -E1000_ERR_PHY; | ||
364 | } | ||
365 | if (i2ccmd & E1000_I2CCMD_ERROR) { | ||
366 | hw_dbg("I2CCMD Error bit set\n"); | ||
367 | return -E1000_ERR_PHY; | ||
368 | } | ||
369 | 366 | ||
370 | return 0; | 367 | hw->phy.ops.release(hw); |
368 | |||
369 | out: | ||
370 | return ret_val; | ||
371 | } | 371 | } |
372 | 372 | ||
373 | /** | 373 | /** |
@@ -676,6 +676,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) | |||
676 | 676 | ||
677 | if (hw->bus.func == 1) | 677 | if (hw->bus.func == 1) |
678 | mask = E1000_NVM_CFG_DONE_PORT_1; | 678 | mask = E1000_NVM_CFG_DONE_PORT_1; |
679 | else if (hw->bus.func == E1000_FUNC_2) | ||
680 | mask = E1000_NVM_CFG_DONE_PORT_2; | ||
681 | else if (hw->bus.func == E1000_FUNC_3) | ||
682 | mask = E1000_NVM_CFG_DONE_PORT_3; | ||
679 | 683 | ||
680 | while (timeout) { | 684 | while (timeout) { |
681 | if (rd32(E1000_EEMNGCTL) & mask) | 685 | if (rd32(E1000_EEMNGCTL) & mask) |
@@ -706,9 +710,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) | |||
706 | s32 ret_val; | 710 | s32 ret_val; |
707 | u16 speed, duplex; | 711 | u16 speed, duplex; |
708 | 712 | ||
709 | /* SGMII link check is done through the PCS register. */ | 713 | if (hw->phy.media_type != e1000_media_type_copper) { |
710 | if ((hw->phy.media_type != e1000_media_type_copper) || | ||
711 | (igb_sgmii_active_82575(hw))) { | ||
712 | ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, | 714 | ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, |
713 | &duplex); | 715 | &duplex); |
714 | /* | 716 | /* |
@@ -723,6 +725,35 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) | |||
723 | 725 | ||
724 | return ret_val; | 726 | return ret_val; |
725 | } | 727 | } |
728 | |||
729 | /** | ||
730 | * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown | ||
731 | * @hw: pointer to the HW structure | ||
732 | **/ | ||
733 | void igb_power_up_serdes_link_82575(struct e1000_hw *hw) | ||
734 | { | ||
735 | u32 reg; | ||
736 | |||
737 | |||
738 | if ((hw->phy.media_type != e1000_media_type_internal_serdes) && | ||
739 | !igb_sgmii_active_82575(hw)) | ||
740 | return; | ||
741 | |||
742 | /* Enable PCS to turn on link */ | ||
743 | reg = rd32(E1000_PCS_CFG0); | ||
744 | reg |= E1000_PCS_CFG_PCS_EN; | ||
745 | wr32(E1000_PCS_CFG0, reg); | ||
746 | |||
747 | /* Power up the laser */ | ||
748 | reg = rd32(E1000_CTRL_EXT); | ||
749 | reg &= ~E1000_CTRL_EXT_SDP3_DATA; | ||
750 | wr32(E1000_CTRL_EXT, reg); | ||
751 | |||
752 | /* flush the write to verify completion */ | ||
753 | wrfl(); | ||
754 | msleep(1); | ||
755 | } | ||
756 | |||
726 | /** | 757 | /** |
727 | * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex | 758 | * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex |
728 | * @hw: pointer to the HW structure | 759 | * @hw: pointer to the HW structure |
@@ -789,11 +820,10 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) | |||
789 | { | 820 | { |
790 | u32 reg; | 821 | u32 reg; |
791 | 822 | ||
792 | if (hw->phy.media_type != e1000_media_type_internal_serdes || | 823 | if (hw->phy.media_type != e1000_media_type_internal_serdes && |
793 | igb_sgmii_active_82575(hw)) | 824 | igb_sgmii_active_82575(hw)) |
794 | return; | 825 | return; |
795 | 826 | ||
796 | /* if the management interface is not enabled, then power down */ | ||
797 | if (!igb_enable_mng_pass_thru(hw)) { | 827 | if (!igb_enable_mng_pass_thru(hw)) { |
798 | /* Disable PCS to turn off link */ | 828 | /* Disable PCS to turn off link */ |
799 | reg = rd32(E1000_PCS_CFG0); | 829 | reg = rd32(E1000_PCS_CFG0); |
@@ -809,8 +839,6 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) | |||
809 | wrfl(); | 839 | wrfl(); |
810 | msleep(1); | 840 | msleep(1); |
811 | } | 841 | } |
812 | |||
813 | return; | ||
814 | } | 842 | } |
815 | 843 | ||
816 | /** | 844 | /** |
@@ -908,6 +936,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) | |||
908 | for (i = 0; i < mac->mta_reg_count; i++) | 936 | for (i = 0; i < mac->mta_reg_count; i++) |
909 | array_wr32(E1000_MTA, i, 0); | 937 | array_wr32(E1000_MTA, i, 0); |
910 | 938 | ||
939 | /* Zero out the Unicast HASH table */ | ||
940 | hw_dbg("Zeroing the UTA\n"); | ||
941 | for (i = 0; i < mac->uta_reg_count; i++) | ||
942 | array_wr32(E1000_UTA, i, 0); | ||
943 | |||
911 | /* Setup link and flow control */ | 944 | /* Setup link and flow control */ |
912 | ret_val = igb_setup_link(hw); | 945 | ret_val = igb_setup_link(hw); |
913 | 946 | ||
@@ -934,7 +967,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) | |||
934 | { | 967 | { |
935 | u32 ctrl; | 968 | u32 ctrl; |
936 | s32 ret_val; | 969 | s32 ret_val; |
937 | bool link; | ||
938 | 970 | ||
939 | ctrl = rd32(E1000_CTRL); | 971 | ctrl = rd32(E1000_CTRL); |
940 | ctrl |= E1000_CTRL_SLU; | 972 | ctrl |= E1000_CTRL_SLU; |
@@ -946,6 +978,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) | |||
946 | goto out; | 978 | goto out; |
947 | 979 | ||
948 | if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { | 980 | if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { |
981 | /* allow time for SFP cage time to power up phy */ | ||
982 | msleep(300); | ||
983 | |||
949 | ret_val = hw->phy.ops.reset(hw); | 984 | ret_val = hw->phy.ops.reset(hw); |
950 | if (ret_val) { | 985 | if (ret_val) { |
951 | hw_dbg("Error resetting the PHY.\n"); | 986 | hw_dbg("Error resetting the PHY.\n"); |
@@ -959,6 +994,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) | |||
959 | case e1000_phy_igp_3: | 994 | case e1000_phy_igp_3: |
960 | ret_val = igb_copper_link_setup_igp(hw); | 995 | ret_val = igb_copper_link_setup_igp(hw); |
961 | break; | 996 | break; |
997 | case e1000_phy_82580: | ||
998 | ret_val = igb_copper_link_setup_82580(hw); | ||
999 | break; | ||
962 | default: | 1000 | default: |
963 | ret_val = -E1000_ERR_PHY; | 1001 | ret_val = -E1000_ERR_PHY; |
964 | break; | 1002 | break; |
@@ -967,57 +1005,24 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) | |||
967 | if (ret_val) | 1005 | if (ret_val) |
968 | goto out; | 1006 | goto out; |
969 | 1007 | ||
970 | if (hw->mac.autoneg) { | 1008 | ret_val = igb_setup_copper_link(hw); |
971 | /* | ||
972 | * Setup autoneg and flow control advertisement | ||
973 | * and perform autonegotiation. | ||
974 | */ | ||
975 | ret_val = igb_copper_link_autoneg(hw); | ||
976 | if (ret_val) | ||
977 | goto out; | ||
978 | } else { | ||
979 | /* | ||
980 | * PHY will be set to 10H, 10F, 100H or 100F | ||
981 | * depending on user settings. | ||
982 | */ | ||
983 | hw_dbg("Forcing Speed and Duplex\n"); | ||
984 | ret_val = hw->phy.ops.force_speed_duplex(hw); | ||
985 | if (ret_val) { | ||
986 | hw_dbg("Error Forcing Speed and Duplex\n"); | ||
987 | goto out; | ||
988 | } | ||
989 | } | ||
990 | |||
991 | /* | ||
992 | * Check link status. Wait up to 100 microseconds for link to become | ||
993 | * valid. | ||
994 | */ | ||
995 | ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); | ||
996 | if (ret_val) | ||
997 | goto out; | ||
998 | |||
999 | if (link) { | ||
1000 | hw_dbg("Valid link established!!!\n"); | ||
1001 | /* Config the MAC and PHY after link is up */ | ||
1002 | igb_config_collision_dist(hw); | ||
1003 | ret_val = igb_config_fc_after_link_up(hw); | ||
1004 | } else { | ||
1005 | hw_dbg("Unable to establish link!!!\n"); | ||
1006 | } | ||
1007 | |||
1008 | out: | 1009 | out: |
1009 | return ret_val; | 1010 | return ret_val; |
1010 | } | 1011 | } |
1011 | 1012 | ||
1012 | /** | 1013 | /** |
1013 | * igb_setup_serdes_link_82575 - Setup link for fiber/serdes | 1014 | * igb_setup_serdes_link_82575 - Setup link for serdes |
1014 | * @hw: pointer to the HW structure | 1015 | * @hw: pointer to the HW structure |
1015 | * | 1016 | * |
1016 | * Configures speed and duplex for fiber and serdes links. | 1017 | * Configure the physical coding sub-layer (PCS) link. The PCS link is |
1018 | * used on copper connections where the serialized gigabit media independent | ||
1019 | * interface (sgmii), or serdes fiber is being used. Configures the link | ||
1020 | * for auto-negotiation or forces speed/duplex. | ||
1017 | **/ | 1021 | **/ |
1018 | static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | 1022 | static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) |
1019 | { | 1023 | { |
1020 | u32 ctrl_reg, reg; | 1024 | u32 ctrl_ext, ctrl_reg, reg; |
1025 | bool pcs_autoneg; | ||
1021 | 1026 | ||
1022 | if ((hw->phy.media_type != e1000_media_type_internal_serdes) && | 1027 | if ((hw->phy.media_type != e1000_media_type_internal_serdes) && |
1023 | !igb_sgmii_active_82575(hw)) | 1028 | !igb_sgmii_active_82575(hw)) |
@@ -1032,9 +1037,9 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1032 | wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); | 1037 | wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); |
1033 | 1038 | ||
1034 | /* power on the sfp cage if present */ | 1039 | /* power on the sfp cage if present */ |
1035 | reg = rd32(E1000_CTRL_EXT); | 1040 | ctrl_ext = rd32(E1000_CTRL_EXT); |
1036 | reg &= ~E1000_CTRL_EXT_SDP3_DATA; | 1041 | ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; |
1037 | wr32(E1000_CTRL_EXT, reg); | 1042 | wr32(E1000_CTRL_EXT, ctrl_ext); |
1038 | 1043 | ||
1039 | ctrl_reg = rd32(E1000_CTRL); | 1044 | ctrl_reg = rd32(E1000_CTRL); |
1040 | ctrl_reg |= E1000_CTRL_SLU; | 1045 | ctrl_reg |= E1000_CTRL_SLU; |
@@ -1051,15 +1056,31 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1051 | 1056 | ||
1052 | reg = rd32(E1000_PCS_LCTL); | 1057 | reg = rd32(E1000_PCS_LCTL); |
1053 | 1058 | ||
1054 | if (igb_sgmii_active_82575(hw)) { | 1059 | /* default pcs_autoneg to the same setting as mac autoneg */ |
1055 | /* allow time for SFP cage to power up phy */ | 1060 | pcs_autoneg = hw->mac.autoneg; |
1056 | msleep(300); | ||
1057 | 1061 | ||
1058 | /* AN time out should be disabled for SGMII mode */ | 1062 | switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { |
1063 | case E1000_CTRL_EXT_LINK_MODE_SGMII: | ||
1064 | /* sgmii mode lets the phy handle forcing speed/duplex */ | ||
1065 | pcs_autoneg = true; | ||
1066 | /* autoneg time out should be disabled for SGMII mode */ | ||
1059 | reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); | 1067 | reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); |
1060 | } else { | 1068 | break; |
1069 | case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: | ||
1070 | /* disable PCS autoneg and support parallel detect only */ | ||
1071 | pcs_autoneg = false; | ||
1072 | default: | ||
1073 | /* | ||
1074 | * non-SGMII modes only supports a speed of 1000/Full for the | ||
1075 | * link so it is best to just force the MAC and let the pcs | ||
1076 | * link either autoneg or be forced to 1000/Full | ||
1077 | */ | ||
1061 | ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | | 1078 | ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | |
1062 | E1000_CTRL_FD | E1000_CTRL_FRCDPX; | 1079 | E1000_CTRL_FD | E1000_CTRL_FRCDPX; |
1080 | |||
1081 | /* set speed of 1000/Full if speed/duplex is forced */ | ||
1082 | reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; | ||
1083 | break; | ||
1063 | } | 1084 | } |
1064 | 1085 | ||
1065 | wr32(E1000_CTRL, ctrl_reg); | 1086 | wr32(E1000_CTRL, ctrl_reg); |
@@ -1070,7 +1091,6 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1070 | * mode that will be compatible with older link partners and switches. | 1091 | * mode that will be compatible with older link partners and switches. |
1071 | * However, both are supported by the hardware and some drivers/tools. | 1092 | * However, both are supported by the hardware and some drivers/tools. |
1072 | */ | 1093 | */ |
1073 | |||
1074 | reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | | 1094 | reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | |
1075 | E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); | 1095 | E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); |
1076 | 1096 | ||
@@ -1080,25 +1100,16 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1080 | */ | 1100 | */ |
1081 | reg |= E1000_PCS_LCTL_FORCE_FCTRL; | 1101 | reg |= E1000_PCS_LCTL_FORCE_FCTRL; |
1082 | 1102 | ||
1083 | /* | 1103 | if (pcs_autoneg) { |
1084 | * we always set sgmii to autoneg since it is the phy that will be | ||
1085 | * forcing the link and the serdes is just a go-between | ||
1086 | */ | ||
1087 | if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) { | ||
1088 | /* Set PCS register for autoneg */ | 1104 | /* Set PCS register for autoneg */ |
1089 | reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ | 1105 | reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ |
1090 | E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ | 1106 | E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ |
1091 | E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ | 1107 | hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); |
1092 | E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ | ||
1093 | hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); | ||
1094 | } else { | 1108 | } else { |
1095 | /* Set PCS register for forced speed */ | 1109 | /* Set PCS register for forced link */ |
1096 | reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ | 1110 | reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ |
1097 | E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ | 1111 | |
1098 | E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ | 1112 | hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); |
1099 | E1000_PCS_LCTL_FSD | /* Force Speed */ | ||
1100 | E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ | ||
1101 | hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); | ||
1102 | } | 1113 | } |
1103 | 1114 | ||
1104 | wr32(E1000_PCS_LCTL, reg); | 1115 | wr32(E1000_PCS_LCTL, reg); |
@@ -1167,13 +1178,38 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) | |||
1167 | { | 1178 | { |
1168 | s32 ret_val = 0; | 1179 | s32 ret_val = 0; |
1169 | 1180 | ||
1170 | if (igb_check_alt_mac_addr(hw)) | 1181 | /* |
1171 | ret_val = igb_read_mac_addr(hw); | 1182 | * If there's an alternate MAC address place it in RAR0 |
1183 | * so that it will override the Si installed default perm | ||
1184 | * address. | ||
1185 | */ | ||
1186 | ret_val = igb_check_alt_mac_addr(hw); | ||
1187 | if (ret_val) | ||
1188 | goto out; | ||
1189 | |||
1190 | ret_val = igb_read_mac_addr(hw); | ||
1172 | 1191 | ||
1192 | out: | ||
1173 | return ret_val; | 1193 | return ret_val; |
1174 | } | 1194 | } |
1175 | 1195 | ||
1176 | /** | 1196 | /** |
1197 | * igb_power_down_phy_copper_82575 - Remove link during PHY power down | ||
1198 | * @hw: pointer to the HW structure | ||
1199 | * | ||
1200 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1201 | * driver unload, or wake on lan is not enabled, remove the link. | ||
1202 | **/ | ||
1203 | void igb_power_down_phy_copper_82575(struct e1000_hw *hw) | ||
1204 | { | ||
1205 | /* If the management interface is not enabled, then power down */ | ||
1206 | if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) | ||
1207 | igb_power_down_phy_copper(hw); | ||
1208 | |||
1209 | return; | ||
1210 | } | ||
1211 | |||
1212 | /** | ||
1177 | * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters | 1213 | * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters |
1178 | * @hw: pointer to the HW structure | 1214 | * @hw: pointer to the HW structure |
1179 | * | 1215 | * |
@@ -1181,61 +1217,59 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) | |||
1181 | **/ | 1217 | **/ |
1182 | static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) | 1218 | static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) |
1183 | { | 1219 | { |
1184 | u32 temp; | ||
1185 | |||
1186 | igb_clear_hw_cntrs_base(hw); | 1220 | igb_clear_hw_cntrs_base(hw); |
1187 | 1221 | ||
1188 | temp = rd32(E1000_PRC64); | 1222 | rd32(E1000_PRC64); |
1189 | temp = rd32(E1000_PRC127); | 1223 | rd32(E1000_PRC127); |
1190 | temp = rd32(E1000_PRC255); | 1224 | rd32(E1000_PRC255); |
1191 | temp = rd32(E1000_PRC511); | 1225 | rd32(E1000_PRC511); |
1192 | temp = rd32(E1000_PRC1023); | 1226 | rd32(E1000_PRC1023); |
1193 | temp = rd32(E1000_PRC1522); | 1227 | rd32(E1000_PRC1522); |
1194 | temp = rd32(E1000_PTC64); | 1228 | rd32(E1000_PTC64); |
1195 | temp = rd32(E1000_PTC127); | 1229 | rd32(E1000_PTC127); |
1196 | temp = rd32(E1000_PTC255); | 1230 | rd32(E1000_PTC255); |
1197 | temp = rd32(E1000_PTC511); | 1231 | rd32(E1000_PTC511); |
1198 | temp = rd32(E1000_PTC1023); | 1232 | rd32(E1000_PTC1023); |
1199 | temp = rd32(E1000_PTC1522); | 1233 | rd32(E1000_PTC1522); |
1200 | 1234 | ||
1201 | temp = rd32(E1000_ALGNERRC); | 1235 | rd32(E1000_ALGNERRC); |
1202 | temp = rd32(E1000_RXERRC); | 1236 | rd32(E1000_RXERRC); |
1203 | temp = rd32(E1000_TNCRS); | 1237 | rd32(E1000_TNCRS); |
1204 | temp = rd32(E1000_CEXTERR); | 1238 | rd32(E1000_CEXTERR); |
1205 | temp = rd32(E1000_TSCTC); | 1239 | rd32(E1000_TSCTC); |
1206 | temp = rd32(E1000_TSCTFC); | 1240 | rd32(E1000_TSCTFC); |
1207 | 1241 | ||
1208 | temp = rd32(E1000_MGTPRC); | 1242 | rd32(E1000_MGTPRC); |
1209 | temp = rd32(E1000_MGTPDC); | 1243 | rd32(E1000_MGTPDC); |
1210 | temp = rd32(E1000_MGTPTC); | 1244 | rd32(E1000_MGTPTC); |
1211 | 1245 | ||
1212 | temp = rd32(E1000_IAC); | 1246 | rd32(E1000_IAC); |
1213 | temp = rd32(E1000_ICRXOC); | 1247 | rd32(E1000_ICRXOC); |
1214 | 1248 | ||
1215 | temp = rd32(E1000_ICRXPTC); | 1249 | rd32(E1000_ICRXPTC); |
1216 | temp = rd32(E1000_ICRXATC); | 1250 | rd32(E1000_ICRXATC); |
1217 | temp = rd32(E1000_ICTXPTC); | 1251 | rd32(E1000_ICTXPTC); |
1218 | temp = rd32(E1000_ICTXATC); | 1252 | rd32(E1000_ICTXATC); |
1219 | temp = rd32(E1000_ICTXQEC); | 1253 | rd32(E1000_ICTXQEC); |
1220 | temp = rd32(E1000_ICTXQMTC); | 1254 | rd32(E1000_ICTXQMTC); |
1221 | temp = rd32(E1000_ICRXDMTC); | 1255 | rd32(E1000_ICRXDMTC); |
1222 | 1256 | ||
1223 | temp = rd32(E1000_CBTMPC); | 1257 | rd32(E1000_CBTMPC); |
1224 | temp = rd32(E1000_HTDPMC); | 1258 | rd32(E1000_HTDPMC); |
1225 | temp = rd32(E1000_CBRMPC); | 1259 | rd32(E1000_CBRMPC); |
1226 | temp = rd32(E1000_RPTHC); | 1260 | rd32(E1000_RPTHC); |
1227 | temp = rd32(E1000_HGPTC); | 1261 | rd32(E1000_HGPTC); |
1228 | temp = rd32(E1000_HTCBDPC); | 1262 | rd32(E1000_HTCBDPC); |
1229 | temp = rd32(E1000_HGORCL); | 1263 | rd32(E1000_HGORCL); |
1230 | temp = rd32(E1000_HGORCH); | 1264 | rd32(E1000_HGORCH); |
1231 | temp = rd32(E1000_HGOTCL); | 1265 | rd32(E1000_HGOTCL); |
1232 | temp = rd32(E1000_HGOTCH); | 1266 | rd32(E1000_HGOTCH); |
1233 | temp = rd32(E1000_LENERRS); | 1267 | rd32(E1000_LENERRS); |
1234 | 1268 | ||
1235 | /* This register should not be read in copper configurations */ | 1269 | /* This register should not be read in copper configurations */ |
1236 | if (hw->phy.media_type == e1000_media_type_internal_serdes || | 1270 | if (hw->phy.media_type == e1000_media_type_internal_serdes || |
1237 | igb_sgmii_active_82575(hw)) | 1271 | igb_sgmii_active_82575(hw)) |
1238 | temp = rd32(E1000_SCVPC); | 1272 | rd32(E1000_SCVPC); |
1239 | } | 1273 | } |
1240 | 1274 | ||
1241 | /** | 1275 | /** |
@@ -1400,8 +1434,183 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) | |||
1400 | wr32(E1000_VT_CTL, vt_ctl); | 1434 | wr32(E1000_VT_CTL, vt_ctl); |
1401 | } | 1435 | } |
1402 | 1436 | ||
1437 | /** | ||
1438 | * igb_read_phy_reg_82580 - Read 82580 MDI control register | ||
1439 | * @hw: pointer to the HW structure | ||
1440 | * @offset: register offset to be read | ||
1441 | * @data: pointer to the read data | ||
1442 | * | ||
1443 | * Reads the MDI control register in the PHY at offset and stores the | ||
1444 | * information read to data. | ||
1445 | **/ | ||
1446 | static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) | ||
1447 | { | ||
1448 | u32 mdicnfg = 0; | ||
1449 | s32 ret_val; | ||
1450 | |||
1451 | |||
1452 | ret_val = hw->phy.ops.acquire(hw); | ||
1453 | if (ret_val) | ||
1454 | goto out; | ||
1455 | |||
1456 | /* | ||
1457 | * We config the phy address in MDICNFG register now. Same bits | ||
1458 | * as before. The values in MDIC can be written but will be | ||
1459 | * ignored. This allows us to call the old function after | ||
1460 | * configuring the PHY address in the new register | ||
1461 | */ | ||
1462 | mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); | ||
1463 | wr32(E1000_MDICNFG, mdicnfg); | ||
1464 | |||
1465 | ret_val = igb_read_phy_reg_mdic(hw, offset, data); | ||
1466 | |||
1467 | hw->phy.ops.release(hw); | ||
1468 | |||
1469 | out: | ||
1470 | return ret_val; | ||
1471 | } | ||
1472 | |||
1473 | /** | ||
1474 | * igb_write_phy_reg_82580 - Write 82580 MDI control register | ||
1475 | * @hw: pointer to the HW structure | ||
1476 | * @offset: register offset to write to | ||
1477 | * @data: data to write to register at offset | ||
1478 | * | ||
1479 | * Writes data to MDI control register in the PHY at offset. | ||
1480 | **/ | ||
1481 | static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) | ||
1482 | { | ||
1483 | u32 mdicnfg = 0; | ||
1484 | s32 ret_val; | ||
1485 | |||
1486 | |||
1487 | ret_val = hw->phy.ops.acquire(hw); | ||
1488 | if (ret_val) | ||
1489 | goto out; | ||
1490 | |||
1491 | /* | ||
1492 | * We config the phy address in MDICNFG register now. Same bits | ||
1493 | * as before. The values in MDIC can be written but will be | ||
1494 | * ignored. This allows us to call the old function after | ||
1495 | * configuring the PHY address in the new register | ||
1496 | */ | ||
1497 | mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT); | ||
1498 | wr32(E1000_MDICNFG, mdicnfg); | ||
1499 | |||
1500 | ret_val = igb_write_phy_reg_mdic(hw, offset, data); | ||
1501 | |||
1502 | hw->phy.ops.release(hw); | ||
1503 | |||
1504 | out: | ||
1505 | return ret_val; | ||
1506 | } | ||
1507 | |||
1508 | /** | ||
1509 | * igb_reset_hw_82580 - Reset hardware | ||
1510 | * @hw: pointer to the HW structure | ||
1511 | * | ||
1512 | * This resets function or entire device (all ports, etc.) | ||
1513 | * to a known state. | ||
1514 | **/ | ||
1515 | static s32 igb_reset_hw_82580(struct e1000_hw *hw) | ||
1516 | { | ||
1517 | s32 ret_val = 0; | ||
1518 | /* BH SW mailbox bit in SW_FW_SYNC */ | ||
1519 | u16 swmbsw_mask = E1000_SW_SYNCH_MB; | ||
1520 | u32 ctrl, icr; | ||
1521 | bool global_device_reset = hw->dev_spec._82575.global_device_reset; | ||
1522 | |||
1523 | |||
1524 | hw->dev_spec._82575.global_device_reset = false; | ||
1525 | |||
1526 | /* Get current control state. */ | ||
1527 | ctrl = rd32(E1000_CTRL); | ||
1528 | |||
1529 | /* | ||
1530 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
1531 | * on the last TLP read/write transaction when MAC is reset. | ||
1532 | */ | ||
1533 | ret_val = igb_disable_pcie_master(hw); | ||
1534 | if (ret_val) | ||
1535 | hw_dbg("PCI-E Master disable polling has failed.\n"); | ||
1536 | |||
1537 | hw_dbg("Masking off all interrupts\n"); | ||
1538 | wr32(E1000_IMC, 0xffffffff); | ||
1539 | wr32(E1000_RCTL, 0); | ||
1540 | wr32(E1000_TCTL, E1000_TCTL_PSP); | ||
1541 | wrfl(); | ||
1542 | |||
1543 | msleep(10); | ||
1544 | |||
1545 | /* Determine whether or not a global dev reset is requested */ | ||
1546 | if (global_device_reset && | ||
1547 | igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) | ||
1548 | global_device_reset = false; | ||
1549 | |||
1550 | if (global_device_reset && | ||
1551 | !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) | ||
1552 | ctrl |= E1000_CTRL_DEV_RST; | ||
1553 | else | ||
1554 | ctrl |= E1000_CTRL_RST; | ||
1555 | |||
1556 | wr32(E1000_CTRL, ctrl); | ||
1557 | |||
1558 | /* Add delay to insure DEV_RST has time to complete */ | ||
1559 | if (global_device_reset) | ||
1560 | msleep(5); | ||
1561 | |||
1562 | ret_val = igb_get_auto_rd_done(hw); | ||
1563 | if (ret_val) { | ||
1564 | /* | ||
1565 | * When auto config read does not complete, do not | ||
1566 | * return with an error. This can happen in situations | ||
1567 | * where there is no eeprom and prevents getting link. | ||
1568 | */ | ||
1569 | hw_dbg("Auto Read Done did not complete\n"); | ||
1570 | } | ||
1571 | |||
1572 | /* If EEPROM is not present, run manual init scripts */ | ||
1573 | if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) | ||
1574 | igb_reset_init_script_82575(hw); | ||
1575 | |||
1576 | /* clear global device reset status bit */ | ||
1577 | wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); | ||
1578 | |||
1579 | /* Clear any pending interrupt events. */ | ||
1580 | wr32(E1000_IMC, 0xffffffff); | ||
1581 | icr = rd32(E1000_ICR); | ||
1582 | |||
1583 | /* Install any alternate MAC address into RAR0 */ | ||
1584 | ret_val = igb_check_alt_mac_addr(hw); | ||
1585 | |||
1586 | /* Release semaphore */ | ||
1587 | if (global_device_reset) | ||
1588 | igb_release_swfw_sync_82575(hw, swmbsw_mask); | ||
1589 | |||
1590 | return ret_val; | ||
1591 | } | ||
1592 | |||
1593 | /** | ||
1594 | * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size | ||
1595 | * @data: data received by reading RXPBS register | ||
1596 | * | ||
1597 | * The 82580 uses a table based approach for packet buffer allocation sizes. | ||
1598 | * This function converts the retrieved value into the correct table value | ||
1599 | * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 | ||
1600 | * 0x0 36 72 144 1 2 4 8 16 | ||
1601 | * 0x8 35 70 140 rsv rsv rsv rsv rsv | ||
1602 | */ | ||
1603 | u16 igb_rxpbs_adjust_82580(u32 data) | ||
1604 | { | ||
1605 | u16 ret_val = 0; | ||
1606 | |||
1607 | if (data < E1000_82580_RXPBS_TABLE_SIZE) | ||
1608 | ret_val = e1000_82580_rxpbs_table[data]; | ||
1609 | |||
1610 | return ret_val; | ||
1611 | } | ||
1612 | |||
1403 | static struct e1000_mac_operations e1000_mac_ops_82575 = { | 1613 | static struct e1000_mac_operations e1000_mac_ops_82575 = { |
1404 | .reset_hw = igb_reset_hw_82575, | ||
1405 | .init_hw = igb_init_hw_82575, | 1614 | .init_hw = igb_init_hw_82575, |
1406 | .check_for_link = igb_check_for_link_82575, | 1615 | .check_for_link = igb_check_for_link_82575, |
1407 | .rar_set = igb_rar_set, | 1616 | .rar_set = igb_rar_set, |
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index ebd146fd4e15..fbe1c99c193c 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h | |||
@@ -29,6 +29,8 @@ | |||
29 | #define _E1000_82575_H_ | 29 | #define _E1000_82575_H_ |
30 | 30 | ||
31 | extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); | 31 | extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); |
32 | extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw); | ||
33 | extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw); | ||
32 | extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); | 34 | extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); |
33 | 35 | ||
34 | #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ | 36 | #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ |
@@ -38,6 +40,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); | |||
38 | 40 | ||
39 | #define E1000_RAR_ENTRIES_82575 16 | 41 | #define E1000_RAR_ENTRIES_82575 16 |
40 | #define E1000_RAR_ENTRIES_82576 24 | 42 | #define E1000_RAR_ENTRIES_82576 24 |
43 | #define E1000_RAR_ENTRIES_82580 24 | ||
44 | |||
45 | #define E1000_SW_SYNCH_MB 0x00000100 | ||
46 | #define E1000_STAT_DEV_RST_SET 0x00100000 | ||
47 | #define E1000_CTRL_DEV_RST 0x20000000 | ||
41 | 48 | ||
42 | /* SRRCTL bit definitions */ | 49 | /* SRRCTL bit definitions */ |
43 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ | 50 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ |
@@ -66,6 +73,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); | |||
66 | E1000_EICR_RX_QUEUE3) | 73 | E1000_EICR_RX_QUEUE3) |
67 | 74 | ||
68 | /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ | 75 | /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ |
76 | #define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ | ||
77 | #define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ | ||
69 | 78 | ||
70 | /* Receive Descriptor - Advanced */ | 79 | /* Receive Descriptor - Advanced */ |
71 | union e1000_adv_rx_desc { | 80 | union e1000_adv_rx_desc { |
@@ -98,6 +107,7 @@ union e1000_adv_rx_desc { | |||
98 | 107 | ||
99 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 | 108 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 |
100 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 | 109 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 |
110 | #define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ | ||
101 | 111 | ||
102 | /* Transmit Descriptor - Advanced */ | 112 | /* Transmit Descriptor - Advanced */ |
103 | union e1000_adv_tx_desc { | 113 | union e1000_adv_tx_desc { |
@@ -167,6 +177,18 @@ struct e1000_adv_tx_context_desc { | |||
167 | #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ | 177 | #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ |
168 | #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ | 178 | #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ |
169 | 179 | ||
180 | /* ETQF register bit definitions */ | ||
181 | #define E1000_ETQF_FILTER_ENABLE (1 << 26) | ||
182 | #define E1000_ETQF_1588 (1 << 30) | ||
183 | |||
184 | /* FTQF register bit definitions */ | ||
185 | #define E1000_FTQF_VF_BP 0x00008000 | ||
186 | #define E1000_FTQF_1588_TIME_STAMP 0x08000000 | ||
187 | #define E1000_FTQF_MASK 0xF0000000 | ||
188 | #define E1000_FTQF_MASK_PROTO_BP 0x10000000 | ||
189 | #define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 | ||
190 | |||
191 | #define E1000_NVM_APME_82575 0x0400 | ||
170 | #define MAX_NUM_VFS 8 | 192 | #define MAX_NUM_VFS 8 |
171 | 193 | ||
172 | #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ | 194 | #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ |
@@ -199,12 +221,27 @@ struct e1000_adv_tx_context_desc { | |||
199 | #define E1000_VLVF_LVLAN 0x00100000 | 221 | #define E1000_VLVF_LVLAN 0x00100000 |
200 | #define E1000_VLVF_VLANID_ENABLE 0x80000000 | 222 | #define E1000_VLVF_VLANID_ENABLE 0x80000000 |
201 | 223 | ||
224 | #define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ | ||
225 | #define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ | ||
226 | |||
202 | #define E1000_IOVCTL 0x05BBC | 227 | #define E1000_IOVCTL 0x05BBC |
203 | #define E1000_IOVCTL_REUSE_VFQ 0x00000001 | 228 | #define E1000_IOVCTL_REUSE_VFQ 0x00000001 |
204 | 229 | ||
230 | #define E1000_RPLOLR_STRVLAN 0x40000000 | ||
231 | #define E1000_RPLOLR_STRCRC 0x80000000 | ||
232 | |||
233 | #define E1000_DTXCTL_8023LL 0x0004 | ||
234 | #define E1000_DTXCTL_VLAN_ADDED 0x0008 | ||
235 | #define E1000_DTXCTL_OOS_ENABLE 0x0010 | ||
236 | #define E1000_DTXCTL_MDP_EN 0x0020 | ||
237 | #define E1000_DTXCTL_SPOOF_INT 0x0040 | ||
238 | |||
205 | #define ALL_QUEUES 0xFFFF | 239 | #define ALL_QUEUES 0xFFFF |
206 | 240 | ||
241 | /* RX packet buffer size defines */ | ||
242 | #define E1000_RXPBS_SIZE_MASK_82576 0x0000007F | ||
207 | void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); | 243 | void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); |
208 | void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); | 244 | void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); |
245 | u16 igb_rxpbs_adjust_82580(u32 data); | ||
209 | 246 | ||
210 | #endif | 247 | #endif |
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index cb916833f303..fe6cf1b696c7 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define E1000_CTRL_EXT_PFRSTD 0x00004000 | 49 | #define E1000_CTRL_EXT_PFRSTD 0x00004000 |
50 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 | 50 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 |
51 | #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 | 51 | #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 |
52 | #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 | ||
52 | #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 | 53 | #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 |
53 | #define E1000_CTRL_EXT_EIAME 0x01000000 | 54 | #define E1000_CTRL_EXT_EIAME 0x01000000 |
54 | #define E1000_CTRL_EXT_IRCA 0x00000001 | 55 | #define E1000_CTRL_EXT_IRCA 0x00000001 |
@@ -312,12 +313,6 @@ | |||
312 | #define E1000_PBA_34K 0x0022 | 313 | #define E1000_PBA_34K 0x0022 |
313 | #define E1000_PBA_64K 0x0040 /* 64KB */ | 314 | #define E1000_PBA_64K 0x0040 /* 64KB */ |
314 | 315 | ||
315 | #define IFS_MAX 80 | ||
316 | #define IFS_MIN 40 | ||
317 | #define IFS_RATIO 4 | ||
318 | #define IFS_STEP 10 | ||
319 | #define MIN_NUM_XMITS 1000 | ||
320 | |||
321 | /* SW Semaphore Register */ | 316 | /* SW Semaphore Register */ |
322 | #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ | 317 | #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ |
323 | #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ | 318 | #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ |
@@ -329,6 +324,7 @@ | |||
329 | #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ | 324 | #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ |
330 | #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ | 325 | #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ |
331 | #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ | 326 | #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ |
327 | #define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ | ||
332 | /* If this bit asserted, the driver should claim the interrupt */ | 328 | /* If this bit asserted, the driver should claim the interrupt */ |
333 | #define E1000_ICR_INT_ASSERTED 0x80000000 | 329 | #define E1000_ICR_INT_ASSERTED 0x80000000 |
334 | /* LAN connected device generates an interrupt */ | 330 | /* LAN connected device generates an interrupt */ |
@@ -370,6 +366,7 @@ | |||
370 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ | 366 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ |
371 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ | 367 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ |
372 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ | 368 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ |
369 | #define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ | ||
373 | #define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ | 370 | #define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ |
374 | 371 | ||
375 | /* Extended Interrupt Mask Set */ | 372 | /* Extended Interrupt Mask Set */ |
@@ -378,6 +375,7 @@ | |||
378 | /* Interrupt Cause Set */ | 375 | /* Interrupt Cause Set */ |
379 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ | 376 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ |
380 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ | 377 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ |
378 | #define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ | ||
381 | 379 | ||
382 | /* Extended Interrupt Cause Set */ | 380 | /* Extended Interrupt Cause Set */ |
383 | 381 | ||
@@ -435,6 +433,39 @@ | |||
435 | /* Flow Control */ | 433 | /* Flow Control */ |
436 | #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ | 434 | #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ |
437 | 435 | ||
436 | #define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ | ||
437 | #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ | ||
438 | |||
439 | #define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ | ||
440 | #define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ | ||
441 | #define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 | ||
442 | #define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 | ||
443 | #define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 | ||
444 | #define E1000_TSYNCRXCTL_TYPE_ALL 0x08 | ||
445 | #define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A | ||
446 | #define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ | ||
447 | |||
448 | #define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF | ||
449 | #define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 | ||
450 | #define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 | ||
451 | #define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 | ||
452 | #define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 | ||
453 | #define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 | ||
454 | |||
455 | #define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 | ||
456 | #define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 | ||
457 | #define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 | ||
458 | #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 | ||
459 | #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 | ||
460 | #define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 | ||
461 | #define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 | ||
462 | #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 | ||
463 | #define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 | ||
464 | #define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 | ||
465 | #define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 | ||
466 | |||
467 | #define E1000_TIMINCA_16NS_SHIFT 24 | ||
468 | |||
438 | /* PCI Express Control */ | 469 | /* PCI Express Control */ |
439 | #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 | 470 | #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 |
440 | #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 | 471 | #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 |
@@ -444,6 +475,7 @@ | |||
444 | /* PHY Control Register */ | 475 | /* PHY Control Register */ |
445 | #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ | 476 | #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ |
446 | #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ | 477 | #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ |
478 | #define MII_CR_POWER_DOWN 0x0800 /* Power down */ | ||
447 | #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ | 479 | #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ |
448 | #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ | 480 | #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ |
449 | #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ | 481 | #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ |
@@ -524,8 +556,12 @@ | |||
524 | #define NVM_ALT_MAC_ADDR_PTR 0x0037 | 556 | #define NVM_ALT_MAC_ADDR_PTR 0x0037 |
525 | #define NVM_CHECKSUM_REG 0x003F | 557 | #define NVM_CHECKSUM_REG 0x003F |
526 | 558 | ||
527 | #define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ | 559 | #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ |
528 | #define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ | 560 | #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ |
561 | #define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ | ||
562 | #define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ | ||
563 | |||
564 | #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) | ||
529 | 565 | ||
530 | /* Mask bits for fields in Word 0x0f of the NVM */ | 566 | /* Mask bits for fields in Word 0x0f of the NVM */ |
531 | #define NVM_WORD0F_PAUSE_MASK 0x3000 | 567 | #define NVM_WORD0F_PAUSE_MASK 0x3000 |
@@ -592,6 +628,7 @@ | |||
592 | */ | 628 | */ |
593 | #define M88E1111_I_PHY_ID 0x01410CC0 | 629 | #define M88E1111_I_PHY_ID 0x01410CC0 |
594 | #define IGP03E1000_E_PHY_ID 0x02A80390 | 630 | #define IGP03E1000_E_PHY_ID 0x02A80390 |
631 | #define I82580_I_PHY_ID 0x015403A0 | ||
595 | #define M88_VENDOR 0x0141 | 632 | #define M88_VENDOR 0x0141 |
596 | 633 | ||
597 | /* M88E1000 Specific Registers */ | 634 | /* M88E1000 Specific Registers */ |
@@ -678,4 +715,8 @@ | |||
678 | #define E1000_VFTA_ENTRY_MASK 0x7F | 715 | #define E1000_VFTA_ENTRY_MASK 0x7F |
679 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F | 716 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F |
680 | 717 | ||
718 | /* DMA Coalescing register fields */ | ||
719 | #define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based | ||
720 | on DMA coal */ | ||
721 | |||
681 | #endif | 722 | #endif |
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 119869b1124d..82a533f5192a 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h | |||
@@ -41,21 +41,37 @@ struct e1000_hw; | |||
41 | #define E1000_DEV_ID_82576_FIBER 0x10E6 | 41 | #define E1000_DEV_ID_82576_FIBER 0x10E6 |
42 | #define E1000_DEV_ID_82576_SERDES 0x10E7 | 42 | #define E1000_DEV_ID_82576_SERDES 0x10E7 |
43 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 | 43 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 |
44 | #define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 | ||
44 | #define E1000_DEV_ID_82576_NS 0x150A | 45 | #define E1000_DEV_ID_82576_NS 0x150A |
46 | #define E1000_DEV_ID_82576_NS_SERDES 0x1518 | ||
45 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D | 47 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D |
46 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7 | 48 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7 |
47 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 | 49 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 |
48 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 | 50 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 |
51 | #define E1000_DEV_ID_82580_COPPER 0x150E | ||
52 | #define E1000_DEV_ID_82580_FIBER 0x150F | ||
53 | #define E1000_DEV_ID_82580_SERDES 0x1510 | ||
54 | #define E1000_DEV_ID_82580_SGMII 0x1511 | ||
55 | #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 | ||
49 | 56 | ||
50 | #define E1000_REVISION_2 2 | 57 | #define E1000_REVISION_2 2 |
51 | #define E1000_REVISION_4 4 | 58 | #define E1000_REVISION_4 4 |
52 | 59 | ||
60 | #define E1000_FUNC_0 0 | ||
53 | #define E1000_FUNC_1 1 | 61 | #define E1000_FUNC_1 1 |
62 | #define E1000_FUNC_2 2 | ||
63 | #define E1000_FUNC_3 3 | ||
64 | |||
65 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 | ||
66 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 | ||
67 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 | ||
68 | #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 | ||
54 | 69 | ||
55 | enum e1000_mac_type { | 70 | enum e1000_mac_type { |
56 | e1000_undefined = 0, | 71 | e1000_undefined = 0, |
57 | e1000_82575, | 72 | e1000_82575, |
58 | e1000_82576, | 73 | e1000_82576, |
74 | e1000_82580, | ||
59 | e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ | 75 | e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ |
60 | }; | 76 | }; |
61 | 77 | ||
@@ -70,7 +86,6 @@ enum e1000_nvm_type { | |||
70 | e1000_nvm_unknown = 0, | 86 | e1000_nvm_unknown = 0, |
71 | e1000_nvm_none, | 87 | e1000_nvm_none, |
72 | e1000_nvm_eeprom_spi, | 88 | e1000_nvm_eeprom_spi, |
73 | e1000_nvm_eeprom_microwire, | ||
74 | e1000_nvm_flash_hw, | 89 | e1000_nvm_flash_hw, |
75 | e1000_nvm_flash_sw | 90 | e1000_nvm_flash_sw |
76 | }; | 91 | }; |
@@ -79,8 +94,6 @@ enum e1000_nvm_override { | |||
79 | e1000_nvm_override_none = 0, | 94 | e1000_nvm_override_none = 0, |
80 | e1000_nvm_override_spi_small, | 95 | e1000_nvm_override_spi_small, |
81 | e1000_nvm_override_spi_large, | 96 | e1000_nvm_override_spi_large, |
82 | e1000_nvm_override_microwire_small, | ||
83 | e1000_nvm_override_microwire_large | ||
84 | }; | 97 | }; |
85 | 98 | ||
86 | enum e1000_phy_type { | 99 | enum e1000_phy_type { |
@@ -92,6 +105,7 @@ enum e1000_phy_type { | |||
92 | e1000_phy_gg82563, | 105 | e1000_phy_gg82563, |
93 | e1000_phy_igp_3, | 106 | e1000_phy_igp_3, |
94 | e1000_phy_ife, | 107 | e1000_phy_ife, |
108 | e1000_phy_82580, | ||
95 | }; | 109 | }; |
96 | 110 | ||
97 | enum e1000_bus_type { | 111 | enum e1000_bus_type { |
@@ -288,6 +302,7 @@ struct e1000_mac_operations { | |||
288 | 302 | ||
289 | struct e1000_phy_operations { | 303 | struct e1000_phy_operations { |
290 | s32 (*acquire)(struct e1000_hw *); | 304 | s32 (*acquire)(struct e1000_hw *); |
305 | s32 (*check_polarity)(struct e1000_hw *); | ||
291 | s32 (*check_reset_block)(struct e1000_hw *); | 306 | s32 (*check_reset_block)(struct e1000_hw *); |
292 | s32 (*force_speed_duplex)(struct e1000_hw *); | 307 | s32 (*force_speed_duplex)(struct e1000_hw *); |
293 | s32 (*get_cfg_done)(struct e1000_hw *hw); | 308 | s32 (*get_cfg_done)(struct e1000_hw *hw); |
@@ -325,20 +340,14 @@ struct e1000_mac_info { | |||
325 | 340 | ||
326 | enum e1000_mac_type type; | 341 | enum e1000_mac_type type; |
327 | 342 | ||
328 | u32 collision_delta; | ||
329 | u32 ledctl_default; | 343 | u32 ledctl_default; |
330 | u32 ledctl_mode1; | 344 | u32 ledctl_mode1; |
331 | u32 ledctl_mode2; | 345 | u32 ledctl_mode2; |
332 | u32 mc_filter_type; | 346 | u32 mc_filter_type; |
333 | u32 tx_packet_delta; | ||
334 | u32 txcw; | 347 | u32 txcw; |
335 | 348 | ||
336 | u16 current_ifs_val; | ||
337 | u16 ifs_max_val; | ||
338 | u16 ifs_min_val; | ||
339 | u16 ifs_ratio; | ||
340 | u16 ifs_step_size; | ||
341 | u16 mta_reg_count; | 349 | u16 mta_reg_count; |
350 | u16 uta_reg_count; | ||
342 | 351 | ||
343 | /* Maximum size of the MTA register table in all supported adapters */ | 352 | /* Maximum size of the MTA register table in all supported adapters */ |
344 | #define MAX_MTA_REG 128 | 353 | #define MAX_MTA_REG 128 |
@@ -463,6 +472,7 @@ struct e1000_mbx_info { | |||
463 | 472 | ||
464 | struct e1000_dev_spec_82575 { | 473 | struct e1000_dev_spec_82575 { |
465 | bool sgmii_active; | 474 | bool sgmii_active; |
475 | bool global_device_reset; | ||
466 | }; | 476 | }; |
467 | 477 | ||
468 | struct e1000_hw { | 478 | struct e1000_hw { |
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 7d76bb085e10..be8d010e4021 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c | |||
@@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | if (nvm_alt_mac_addr_offset == 0xFFFF) { | 187 | if (nvm_alt_mac_addr_offset == 0xFFFF) { |
188 | ret_val = -(E1000_NOT_IMPLEMENTED); | 188 | /* There is no Alternate MAC Address */ |
189 | goto out; | 189 | goto out; |
190 | } | 190 | } |
191 | 191 | ||
192 | if (hw->bus.func == E1000_FUNC_1) | 192 | if (hw->bus.func == E1000_FUNC_1) |
193 | nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16); | 193 | nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; |
194 | |||
195 | for (i = 0; i < ETH_ALEN; i += 2) { | 194 | for (i = 0; i < ETH_ALEN; i += 2) { |
196 | offset = nvm_alt_mac_addr_offset + (i >> 1); | 195 | offset = nvm_alt_mac_addr_offset + (i >> 1); |
197 | ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); | 196 | ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); |
@@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) | |||
206 | 205 | ||
207 | /* if multicast bit is set, the alternate address will not be used */ | 206 | /* if multicast bit is set, the alternate address will not be used */ |
208 | if (alt_mac_addr[0] & 0x01) { | 207 | if (alt_mac_addr[0] & 0x01) { |
209 | ret_val = -(E1000_NOT_IMPLEMENTED); | 208 | hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); |
210 | goto out; | 209 | goto out; |
211 | } | 210 | } |
212 | 211 | ||
213 | for (i = 0; i < ETH_ALEN; i++) | 212 | /* |
214 | hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; | 213 | * We have a valid alternate MAC address, and we want to treat it the |
215 | 214 | * same as the normal permanent MAC address stored by the HW into the | |
216 | hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0); | 215 | * RAR. Do this by mapping this address into RAR0. |
216 | */ | ||
217 | hw->mac.ops.rar_set(hw, alt_mac_addr, 0); | ||
217 | 218 | ||
218 | out: | 219 | out: |
219 | return ret_val; | 220 | return ret_val; |
@@ -246,8 +247,15 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
246 | if (rar_low || rar_high) | 247 | if (rar_low || rar_high) |
247 | rar_high |= E1000_RAH_AV; | 248 | rar_high |= E1000_RAH_AV; |
248 | 249 | ||
250 | /* | ||
251 | * Some bridges will combine consecutive 32-bit writes into | ||
252 | * a single burst write, which will malfunction on some parts. | ||
253 | * The flushes avoid this. | ||
254 | */ | ||
249 | wr32(E1000_RAL(index), rar_low); | 255 | wr32(E1000_RAL(index), rar_low); |
256 | wrfl(); | ||
250 | wr32(E1000_RAH(index), rar_high); | 257 | wr32(E1000_RAH(index), rar_high); |
258 | wrfl(); | ||
251 | } | 259 | } |
252 | 260 | ||
253 | /** | 261 | /** |
@@ -399,45 +407,43 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, | |||
399 | **/ | 407 | **/ |
400 | void igb_clear_hw_cntrs_base(struct e1000_hw *hw) | 408 | void igb_clear_hw_cntrs_base(struct e1000_hw *hw) |
401 | { | 409 | { |
402 | u32 temp; | 410 | rd32(E1000_CRCERRS); |
403 | 411 | rd32(E1000_SYMERRS); | |
404 | temp = rd32(E1000_CRCERRS); | 412 | rd32(E1000_MPC); |
405 | temp = rd32(E1000_SYMERRS); | 413 | rd32(E1000_SCC); |
406 | temp = rd32(E1000_MPC); | 414 | rd32(E1000_ECOL); |
407 | temp = rd32(E1000_SCC); | 415 | rd32(E1000_MCC); |
408 | temp = rd32(E1000_ECOL); | 416 | rd32(E1000_LATECOL); |
409 | temp = rd32(E1000_MCC); | 417 | rd32(E1000_COLC); |
410 | temp = rd32(E1000_LATECOL); | 418 | rd32(E1000_DC); |
411 | temp = rd32(E1000_COLC); | 419 | rd32(E1000_SEC); |
412 | temp = rd32(E1000_DC); | 420 | rd32(E1000_RLEC); |
413 | temp = rd32(E1000_SEC); | 421 | rd32(E1000_XONRXC); |
414 | temp = rd32(E1000_RLEC); | 422 | rd32(E1000_XONTXC); |
415 | temp = rd32(E1000_XONRXC); | 423 | rd32(E1000_XOFFRXC); |
416 | temp = rd32(E1000_XONTXC); | 424 | rd32(E1000_XOFFTXC); |
417 | temp = rd32(E1000_XOFFRXC); | 425 | rd32(E1000_FCRUC); |
418 | temp = rd32(E1000_XOFFTXC); | 426 | rd32(E1000_GPRC); |
419 | temp = rd32(E1000_FCRUC); | 427 | rd32(E1000_BPRC); |
420 | temp = rd32(E1000_GPRC); | 428 | rd32(E1000_MPRC); |
421 | temp = rd32(E1000_BPRC); | 429 | rd32(E1000_GPTC); |
422 | temp = rd32(E1000_MPRC); | 430 | rd32(E1000_GORCL); |
423 | temp = rd32(E1000_GPTC); | 431 | rd32(E1000_GORCH); |
424 | temp = rd32(E1000_GORCL); | 432 | rd32(E1000_GOTCL); |
425 | temp = rd32(E1000_GORCH); | 433 | rd32(E1000_GOTCH); |
426 | temp = rd32(E1000_GOTCL); | 434 | rd32(E1000_RNBC); |
427 | temp = rd32(E1000_GOTCH); | 435 | rd32(E1000_RUC); |
428 | temp = rd32(E1000_RNBC); | 436 | rd32(E1000_RFC); |
429 | temp = rd32(E1000_RUC); | 437 | rd32(E1000_ROC); |
430 | temp = rd32(E1000_RFC); | 438 | rd32(E1000_RJC); |
431 | temp = rd32(E1000_ROC); | 439 | rd32(E1000_TORL); |
432 | temp = rd32(E1000_RJC); | 440 | rd32(E1000_TORH); |
433 | temp = rd32(E1000_TORL); | 441 | rd32(E1000_TOTL); |
434 | temp = rd32(E1000_TORH); | 442 | rd32(E1000_TOTH); |
435 | temp = rd32(E1000_TOTL); | 443 | rd32(E1000_TPR); |
436 | temp = rd32(E1000_TOTH); | 444 | rd32(E1000_TPT); |
437 | temp = rd32(E1000_TPR); | 445 | rd32(E1000_MPTC); |
438 | temp = rd32(E1000_TPT); | 446 | rd32(E1000_BPTC); |
439 | temp = rd32(E1000_MPTC); | ||
440 | temp = rd32(E1000_BPTC); | ||
441 | } | 447 | } |
442 | 448 | ||
443 | /** | 449 | /** |
@@ -1298,76 +1304,6 @@ out: | |||
1298 | } | 1304 | } |
1299 | 1305 | ||
1300 | /** | 1306 | /** |
1301 | * igb_reset_adaptive - Reset Adaptive Interframe Spacing | ||
1302 | * @hw: pointer to the HW structure | ||
1303 | * | ||
1304 | * Reset the Adaptive Interframe Spacing throttle to default values. | ||
1305 | **/ | ||
1306 | void igb_reset_adaptive(struct e1000_hw *hw) | ||
1307 | { | ||
1308 | struct e1000_mac_info *mac = &hw->mac; | ||
1309 | |||
1310 | if (!mac->adaptive_ifs) { | ||
1311 | hw_dbg("Not in Adaptive IFS mode!\n"); | ||
1312 | goto out; | ||
1313 | } | ||
1314 | |||
1315 | if (!mac->ifs_params_forced) { | ||
1316 | mac->current_ifs_val = 0; | ||
1317 | mac->ifs_min_val = IFS_MIN; | ||
1318 | mac->ifs_max_val = IFS_MAX; | ||
1319 | mac->ifs_step_size = IFS_STEP; | ||
1320 | mac->ifs_ratio = IFS_RATIO; | ||
1321 | } | ||
1322 | |||
1323 | mac->in_ifs_mode = false; | ||
1324 | wr32(E1000_AIT, 0); | ||
1325 | out: | ||
1326 | return; | ||
1327 | } | ||
1328 | |||
1329 | /** | ||
1330 | * igb_update_adaptive - Update Adaptive Interframe Spacing | ||
1331 | * @hw: pointer to the HW structure | ||
1332 | * | ||
1333 | * Update the Adaptive Interframe Spacing Throttle value based on the | ||
1334 | * time between transmitted packets and time between collisions. | ||
1335 | **/ | ||
1336 | void igb_update_adaptive(struct e1000_hw *hw) | ||
1337 | { | ||
1338 | struct e1000_mac_info *mac = &hw->mac; | ||
1339 | |||
1340 | if (!mac->adaptive_ifs) { | ||
1341 | hw_dbg("Not in Adaptive IFS mode!\n"); | ||
1342 | goto out; | ||
1343 | } | ||
1344 | |||
1345 | if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { | ||
1346 | if (mac->tx_packet_delta > MIN_NUM_XMITS) { | ||
1347 | mac->in_ifs_mode = true; | ||
1348 | if (mac->current_ifs_val < mac->ifs_max_val) { | ||
1349 | if (!mac->current_ifs_val) | ||
1350 | mac->current_ifs_val = mac->ifs_min_val; | ||
1351 | else | ||
1352 | mac->current_ifs_val += | ||
1353 | mac->ifs_step_size; | ||
1354 | wr32(E1000_AIT, | ||
1355 | mac->current_ifs_val); | ||
1356 | } | ||
1357 | } | ||
1358 | } else { | ||
1359 | if (mac->in_ifs_mode && | ||
1360 | (mac->tx_packet_delta <= MIN_NUM_XMITS)) { | ||
1361 | mac->current_ifs_val = 0; | ||
1362 | mac->in_ifs_mode = false; | ||
1363 | wr32(E1000_AIT, 0); | ||
1364 | } | ||
1365 | } | ||
1366 | out: | ||
1367 | return; | ||
1368 | } | ||
1369 | |||
1370 | /** | ||
1371 | * igb_validate_mdi_setting - Verify MDI/MDIx settings | 1307 | * igb_validate_mdi_setting - Verify MDI/MDIx settings |
1372 | * @hw: pointer to the HW structure | 1308 | * @hw: pointer to the HW structure |
1373 | * | 1309 | * |
@@ -1431,7 +1367,8 @@ out: | |||
1431 | * igb_enable_mng_pass_thru - Enable processing of ARP's | 1367 | * igb_enable_mng_pass_thru - Enable processing of ARP's |
1432 | * @hw: pointer to the HW structure | 1368 | * @hw: pointer to the HW structure |
1433 | * | 1369 | * |
1434 | * Verifies the hardware needs to allow ARPs to be processed by the host. | 1370 | * Verifies the hardware needs to leave interface enabled so that frames can |
1371 | * be directed to and from the management interface. | ||
1435 | **/ | 1372 | **/ |
1436 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw) | 1373 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw) |
1437 | { | 1374 | { |
@@ -1444,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) | |||
1444 | 1381 | ||
1445 | manc = rd32(E1000_MANC); | 1382 | manc = rd32(E1000_MANC); |
1446 | 1383 | ||
1447 | if (!(manc & E1000_MANC_RCV_TCO_EN) || | 1384 | if (!(manc & E1000_MANC_RCV_TCO_EN)) |
1448 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) | ||
1449 | goto out; | 1385 | goto out; |
1450 | 1386 | ||
1451 | if (hw->mac.arc_subsystem_valid) { | 1387 | if (hw->mac.arc_subsystem_valid) { |
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index bca17d882417..601be99711c2 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h | |||
@@ -67,8 +67,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value); | |||
67 | void igb_put_hw_semaphore(struct e1000_hw *hw); | 67 | void igb_put_hw_semaphore(struct e1000_hw *hw); |
68 | void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | 68 | void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); |
69 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw); | 69 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw); |
70 | void igb_reset_adaptive(struct e1000_hw *hw); | ||
71 | void igb_update_adaptive(struct e1000_hw *hw); | ||
72 | 70 | ||
73 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw); | 71 | bool igb_enable_mng_pass_thru(struct e1000_hw *hw); |
74 | 72 | ||
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index ed9058eca45c..c474cdb70047 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c | |||
@@ -143,12 +143,16 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) | |||
143 | if (!countdown || !mbx->ops.check_for_msg) | 143 | if (!countdown || !mbx->ops.check_for_msg) |
144 | goto out; | 144 | goto out; |
145 | 145 | ||
146 | while (mbx->ops.check_for_msg(hw, mbx_id)) { | 146 | while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { |
147 | countdown--; | 147 | countdown--; |
148 | if (!countdown) | 148 | if (!countdown) |
149 | break; | 149 | break; |
150 | udelay(mbx->usec_delay); | 150 | udelay(mbx->usec_delay); |
151 | } | 151 | } |
152 | |||
153 | /* if we failed, all future posted messages fail until reset */ | ||
154 | if (!countdown) | ||
155 | mbx->timeout = 0; | ||
152 | out: | 156 | out: |
153 | return countdown ? 0 : -E1000_ERR_MBX; | 157 | return countdown ? 0 : -E1000_ERR_MBX; |
154 | } | 158 | } |
@@ -168,12 +172,16 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) | |||
168 | if (!countdown || !mbx->ops.check_for_ack) | 172 | if (!countdown || !mbx->ops.check_for_ack) |
169 | goto out; | 173 | goto out; |
170 | 174 | ||
171 | while (mbx->ops.check_for_ack(hw, mbx_id)) { | 175 | while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { |
172 | countdown--; | 176 | countdown--; |
173 | if (!countdown) | 177 | if (!countdown) |
174 | break; | 178 | break; |
175 | udelay(mbx->usec_delay); | 179 | udelay(mbx->usec_delay); |
176 | } | 180 | } |
181 | |||
182 | /* if we failed, all future posted messages fail until reset */ | ||
183 | if (!countdown) | ||
184 | mbx->timeout = 0; | ||
177 | out: | 185 | out: |
178 | return countdown ? 0 : -E1000_ERR_MBX; | 186 | return countdown ? 0 : -E1000_ERR_MBX; |
179 | } | 187 | } |
@@ -217,12 +225,13 @@ out: | |||
217 | static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) | 225 | static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) |
218 | { | 226 | { |
219 | struct e1000_mbx_info *mbx = &hw->mbx; | 227 | struct e1000_mbx_info *mbx = &hw->mbx; |
220 | s32 ret_val = 0; | 228 | s32 ret_val = -E1000_ERR_MBX; |
221 | 229 | ||
222 | if (!mbx->ops.write) | 230 | /* exit if either we can't write or there isn't a defined timeout */ |
231 | if (!mbx->ops.write || !mbx->timeout) | ||
223 | goto out; | 232 | goto out; |
224 | 233 | ||
225 | /* send msg*/ | 234 | /* send msg */ |
226 | ret_val = mbx->ops.write(hw, msg, size, mbx_id); | 235 | ret_val = mbx->ops.write(hw, msg, size, mbx_id); |
227 | 236 | ||
228 | /* if msg sent wait until we receive an ack */ | 237 | /* if msg sent wait until we receive an ack */ |
@@ -305,6 +314,30 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) | |||
305 | } | 314 | } |
306 | 315 | ||
307 | /** | 316 | /** |
317 | * igb_obtain_mbx_lock_pf - obtain mailbox lock | ||
318 | * @hw: pointer to the HW structure | ||
319 | * @vf_number: the VF index | ||
320 | * | ||
321 | * return SUCCESS if we obtained the mailbox lock | ||
322 | **/ | ||
323 | static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) | ||
324 | { | ||
325 | s32 ret_val = -E1000_ERR_MBX; | ||
326 | u32 p2v_mailbox; | ||
327 | |||
328 | |||
329 | /* Take ownership of the buffer */ | ||
330 | wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); | ||
331 | |||
332 | /* reserve mailbox for vf use */ | ||
333 | p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); | ||
334 | if (p2v_mailbox & E1000_P2VMAILBOX_PFU) | ||
335 | ret_val = 0; | ||
336 | |||
337 | return ret_val; | ||
338 | } | ||
339 | |||
340 | /** | ||
308 | * igb_write_mbx_pf - Places a message in the mailbox | 341 | * igb_write_mbx_pf - Places a message in the mailbox |
309 | * @hw: pointer to the HW structure | 342 | * @hw: pointer to the HW structure |
310 | * @msg: The message buffer | 343 | * @msg: The message buffer |
@@ -316,27 +349,17 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) | |||
316 | static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, | 349 | static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, |
317 | u16 vf_number) | 350 | u16 vf_number) |
318 | { | 351 | { |
319 | u32 p2v_mailbox; | 352 | s32 ret_val; |
320 | s32 ret_val = 0; | ||
321 | u16 i; | 353 | u16 i; |
322 | 354 | ||
323 | /* Take ownership of the buffer */ | 355 | /* lock the mailbox to prevent pf/vf race condition */ |
324 | wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); | 356 | ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); |
325 | 357 | if (ret_val) | |
326 | /* Make sure we have ownership now... */ | ||
327 | p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); | ||
328 | if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) { | ||
329 | /* failed to grab ownership */ | ||
330 | ret_val = -E1000_ERR_MBX; | ||
331 | goto out_no_write; | 358 | goto out_no_write; |
332 | } | ||
333 | 359 | ||
334 | /* | 360 | /* flush msg and acks as we are overwriting the message buffer */ |
335 | * flush any ack or msg which may already be in the queue | ||
336 | * as they are likely the result of an error | ||
337 | */ | ||
338 | igb_check_for_ack_pf(hw, vf_number); | ||
339 | igb_check_for_msg_pf(hw, vf_number); | 361 | igb_check_for_msg_pf(hw, vf_number); |
362 | igb_check_for_ack_pf(hw, vf_number); | ||
340 | 363 | ||
341 | /* copy the caller specified message to the mailbox memory buffer */ | 364 | /* copy the caller specified message to the mailbox memory buffer */ |
342 | for (i = 0; i < size; i++) | 365 | for (i = 0; i < size; i++) |
@@ -367,20 +390,13 @@ out_no_write: | |||
367 | static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, | 390 | static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, |
368 | u16 vf_number) | 391 | u16 vf_number) |
369 | { | 392 | { |
370 | u32 p2v_mailbox; | 393 | s32 ret_val; |
371 | s32 ret_val = 0; | ||
372 | u16 i; | 394 | u16 i; |
373 | 395 | ||
374 | /* Take ownership of the buffer */ | 396 | /* lock the mailbox to prevent pf/vf race condition */ |
375 | wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); | 397 | ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); |
376 | 398 | if (ret_val) | |
377 | /* Make sure we have ownership now... */ | ||
378 | p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); | ||
379 | if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) { | ||
380 | /* failed to grab ownership */ | ||
381 | ret_val = -E1000_ERR_MBX; | ||
382 | goto out_no_read; | 399 | goto out_no_read; |
383 | } | ||
384 | 400 | ||
385 | /* copy the message to the mailbox memory buffer */ | 401 | /* copy the message to the mailbox memory buffer */ |
386 | for (i = 0; i < size; i++) | 402 | for (i = 0; i < size; i++) |
@@ -392,8 +408,6 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, | |||
392 | /* update stats */ | 408 | /* update stats */ |
393 | hw->mbx.stats.msgs_rx++; | 409 | hw->mbx.stats.msgs_rx++; |
394 | 410 | ||
395 | ret_val = 0; | ||
396 | |||
397 | out_no_read: | 411 | out_no_read: |
398 | return ret_val; | 412 | return ret_val; |
399 | } | 413 | } |
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h index ebc02ea3f198..bb112fb6c3a1 100644 --- a/drivers/net/igb/e1000_mbx.h +++ b/drivers/net/igb/e1000_mbx.h | |||
@@ -58,10 +58,12 @@ | |||
58 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) | 58 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) |
59 | 59 | ||
60 | #define E1000_VF_RESET 0x01 /* VF requests reset */ | 60 | #define E1000_VF_RESET 0x01 /* VF requests reset */ |
61 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ | 61 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ |
62 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | 62 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ |
63 | #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | 63 | #define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ |
64 | #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | 64 | #define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ |
65 | #define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ | ||
66 | #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) | ||
65 | 67 | ||
66 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ | 68 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ |
67 | 69 | ||
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index a88bfe2f1e8f..d83b77fa4038 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c | |||
@@ -78,9 +78,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) | |||
78 | u32 mask; | 78 | u32 mask; |
79 | 79 | ||
80 | mask = 0x01 << (count - 1); | 80 | mask = 0x01 << (count - 1); |
81 | if (nvm->type == e1000_nvm_eeprom_microwire) | 81 | if (nvm->type == e1000_nvm_eeprom_spi) |
82 | eecd &= ~E1000_EECD_DO; | ||
83 | else if (nvm->type == e1000_nvm_eeprom_spi) | ||
84 | eecd |= E1000_EECD_DO; | 82 | eecd |= E1000_EECD_DO; |
85 | 83 | ||
86 | do { | 84 | do { |
@@ -220,22 +218,7 @@ static void igb_standby_nvm(struct e1000_hw *hw) | |||
220 | struct e1000_nvm_info *nvm = &hw->nvm; | 218 | struct e1000_nvm_info *nvm = &hw->nvm; |
221 | u32 eecd = rd32(E1000_EECD); | 219 | u32 eecd = rd32(E1000_EECD); |
222 | 220 | ||
223 | if (nvm->type == e1000_nvm_eeprom_microwire) { | 221 | if (nvm->type == e1000_nvm_eeprom_spi) { |
224 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | ||
225 | wr32(E1000_EECD, eecd); | ||
226 | wrfl(); | ||
227 | udelay(nvm->delay_usec); | ||
228 | |||
229 | igb_raise_eec_clk(hw, &eecd); | ||
230 | |||
231 | /* Select EEPROM */ | ||
232 | eecd |= E1000_EECD_CS; | ||
233 | wr32(E1000_EECD, eecd); | ||
234 | wrfl(); | ||
235 | udelay(nvm->delay_usec); | ||
236 | |||
237 | igb_lower_eec_clk(hw, &eecd); | ||
238 | } else if (nvm->type == e1000_nvm_eeprom_spi) { | ||
239 | /* Toggle CS to flush commands */ | 222 | /* Toggle CS to flush commands */ |
240 | eecd |= E1000_EECD_CS; | 223 | eecd |= E1000_EECD_CS; |
241 | wr32(E1000_EECD, eecd); | 224 | wr32(E1000_EECD, eecd); |
@@ -263,12 +246,6 @@ static void e1000_stop_nvm(struct e1000_hw *hw) | |||
263 | /* Pull CS high */ | 246 | /* Pull CS high */ |
264 | eecd |= E1000_EECD_CS; | 247 | eecd |= E1000_EECD_CS; |
265 | igb_lower_eec_clk(hw, &eecd); | 248 | igb_lower_eec_clk(hw, &eecd); |
266 | } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { | ||
267 | /* CS on Microcwire is active-high */ | ||
268 | eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); | ||
269 | wr32(E1000_EECD, eecd); | ||
270 | igb_raise_eec_clk(hw, &eecd); | ||
271 | igb_lower_eec_clk(hw, &eecd); | ||
272 | } | 249 | } |
273 | } | 250 | } |
274 | 251 | ||
@@ -304,14 +281,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) | |||
304 | u8 spi_stat_reg; | 281 | u8 spi_stat_reg; |
305 | 282 | ||
306 | 283 | ||
307 | if (nvm->type == e1000_nvm_eeprom_microwire) { | 284 | if (nvm->type == e1000_nvm_eeprom_spi) { |
308 | /* Clear SK and DI */ | ||
309 | eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); | ||
310 | wr32(E1000_EECD, eecd); | ||
311 | /* Set CS */ | ||
312 | eecd |= E1000_EECD_CS; | ||
313 | wr32(E1000_EECD, eecd); | ||
314 | } else if (nvm->type == e1000_nvm_eeprom_spi) { | ||
315 | /* Clear SK and CS */ | 285 | /* Clear SK and CS */ |
316 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 286 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
317 | wr32(E1000_EECD, eecd); | 287 | wr32(E1000_EECD, eecd); |
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c index ee460600e74b..cf1f32300923 100644 --- a/drivers/net/igb/e1000_phy.c +++ b/drivers/net/igb/e1000_phy.c | |||
@@ -39,6 +39,9 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw); | |||
39 | /* Cable length tables */ | 39 | /* Cable length tables */ |
40 | static const u16 e1000_m88_cable_length_table[] = | 40 | static const u16 e1000_m88_cable_length_table[] = |
41 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; | 41 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; |
42 | #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ | ||
43 | (sizeof(e1000_m88_cable_length_table) / \ | ||
44 | sizeof(e1000_m88_cable_length_table[0])) | ||
42 | 45 | ||
43 | static const u16 e1000_igp_2_cable_length_table[] = | 46 | static const u16 e1000_igp_2_cable_length_table[] = |
44 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, | 47 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, |
@@ -109,7 +112,10 @@ out: | |||
109 | **/ | 112 | **/ |
110 | static s32 igb_phy_reset_dsp(struct e1000_hw *hw) | 113 | static s32 igb_phy_reset_dsp(struct e1000_hw *hw) |
111 | { | 114 | { |
112 | s32 ret_val; | 115 | s32 ret_val = 0; |
116 | |||
117 | if (!(hw->phy.ops.write_reg)) | ||
118 | goto out; | ||
113 | 119 | ||
114 | ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); | 120 | ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); |
115 | if (ret_val) | 121 | if (ret_val) |
@@ -130,7 +136,7 @@ out: | |||
130 | * Reads the MDI control regsiter in the PHY at offset and stores the | 136 | * Reads the MDI control regsiter in the PHY at offset and stores the |
131 | * information read to data. | 137 | * information read to data. |
132 | **/ | 138 | **/ |
133 | static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | 139 | s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) |
134 | { | 140 | { |
135 | struct e1000_phy_info *phy = &hw->phy; | 141 | struct e1000_phy_info *phy = &hw->phy; |
136 | u32 i, mdic = 0; | 142 | u32 i, mdic = 0; |
@@ -188,7 +194,7 @@ out: | |||
188 | * | 194 | * |
189 | * Writes data to MDI control register in the PHY at offset. | 195 | * Writes data to MDI control register in the PHY at offset. |
190 | **/ | 196 | **/ |
191 | static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | 197 | s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) |
192 | { | 198 | { |
193 | struct e1000_phy_info *phy = &hw->phy; | 199 | struct e1000_phy_info *phy = &hw->phy; |
194 | u32 i, mdic = 0; | 200 | u32 i, mdic = 0; |
@@ -239,6 +245,103 @@ out: | |||
239 | } | 245 | } |
240 | 246 | ||
241 | /** | 247 | /** |
248 | * igb_read_phy_reg_i2c - Read PHY register using i2c | ||
249 | * @hw: pointer to the HW structure | ||
250 | * @offset: register offset to be read | ||
251 | * @data: pointer to the read data | ||
252 | * | ||
253 | * Reads the PHY register at offset using the i2c interface and stores the | ||
254 | * retrieved information in data. | ||
255 | **/ | ||
256 | s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) | ||
257 | { | ||
258 | struct e1000_phy_info *phy = &hw->phy; | ||
259 | u32 i, i2ccmd = 0; | ||
260 | |||
261 | |||
262 | /* | ||
263 | * Set up Op-code, Phy Address, and register address in the I2CCMD | ||
264 | * register. The MAC will take care of interfacing with the | ||
265 | * PHY to retrieve the desired data. | ||
266 | */ | ||
267 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | | ||
268 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | | ||
269 | (E1000_I2CCMD_OPCODE_READ)); | ||
270 | |||
271 | wr32(E1000_I2CCMD, i2ccmd); | ||
272 | |||
273 | /* Poll the ready bit to see if the I2C read completed */ | ||
274 | for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { | ||
275 | udelay(50); | ||
276 | i2ccmd = rd32(E1000_I2CCMD); | ||
277 | if (i2ccmd & E1000_I2CCMD_READY) | ||
278 | break; | ||
279 | } | ||
280 | if (!(i2ccmd & E1000_I2CCMD_READY)) { | ||
281 | hw_dbg("I2CCMD Read did not complete\n"); | ||
282 | return -E1000_ERR_PHY; | ||
283 | } | ||
284 | if (i2ccmd & E1000_I2CCMD_ERROR) { | ||
285 | hw_dbg("I2CCMD Error bit set\n"); | ||
286 | return -E1000_ERR_PHY; | ||
287 | } | ||
288 | |||
289 | /* Need to byte-swap the 16-bit value. */ | ||
290 | *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * igb_write_phy_reg_i2c - Write PHY register using i2c | ||
297 | * @hw: pointer to the HW structure | ||
298 | * @offset: register offset to write to | ||
299 | * @data: data to write at register offset | ||
300 | * | ||
301 | * Writes the data to PHY register at the offset using the i2c interface. | ||
302 | **/ | ||
303 | s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) | ||
304 | { | ||
305 | struct e1000_phy_info *phy = &hw->phy; | ||
306 | u32 i, i2ccmd = 0; | ||
307 | u16 phy_data_swapped; | ||
308 | |||
309 | |||
310 | /* Swap the data bytes for the I2C interface */ | ||
311 | phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); | ||
312 | |||
313 | /* | ||
314 | * Set up Op-code, Phy Address, and register address in the I2CCMD | ||
315 | * register. The MAC will take care of interfacing with the | ||
316 | * PHY to retrieve the desired data. | ||
317 | */ | ||
318 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | | ||
319 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | | ||
320 | E1000_I2CCMD_OPCODE_WRITE | | ||
321 | phy_data_swapped); | ||
322 | |||
323 | wr32(E1000_I2CCMD, i2ccmd); | ||
324 | |||
325 | /* Poll the ready bit to see if the I2C read completed */ | ||
326 | for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { | ||
327 | udelay(50); | ||
328 | i2ccmd = rd32(E1000_I2CCMD); | ||
329 | if (i2ccmd & E1000_I2CCMD_READY) | ||
330 | break; | ||
331 | } | ||
332 | if (!(i2ccmd & E1000_I2CCMD_READY)) { | ||
333 | hw_dbg("I2CCMD Write did not complete\n"); | ||
334 | return -E1000_ERR_PHY; | ||
335 | } | ||
336 | if (i2ccmd & E1000_I2CCMD_ERROR) { | ||
337 | hw_dbg("I2CCMD Error bit set\n"); | ||
338 | return -E1000_ERR_PHY; | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | /** | ||
242 | * igb_read_phy_reg_igp - Read igp PHY register | 345 | * igb_read_phy_reg_igp - Read igp PHY register |
243 | * @hw: pointer to the HW structure | 346 | * @hw: pointer to the HW structure |
244 | * @offset: register offset to be read | 347 | * @offset: register offset to be read |
@@ -318,6 +421,48 @@ out: | |||
318 | } | 421 | } |
319 | 422 | ||
320 | /** | 423 | /** |
424 | * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link | ||
425 | * @hw: pointer to the HW structure | ||
426 | * | ||
427 | * Sets up Carrier-sense on Transmit and downshift values. | ||
428 | **/ | ||
429 | s32 igb_copper_link_setup_82580(struct e1000_hw *hw) | ||
430 | { | ||
431 | struct e1000_phy_info *phy = &hw->phy; | ||
432 | s32 ret_val; | ||
433 | u16 phy_data; | ||
434 | |||
435 | |||
436 | if (phy->reset_disable) { | ||
437 | ret_val = 0; | ||
438 | goto out; | ||
439 | } | ||
440 | |||
441 | if (phy->type == e1000_phy_82580) { | ||
442 | ret_val = hw->phy.ops.reset(hw); | ||
443 | if (ret_val) { | ||
444 | hw_dbg("Error resetting the PHY.\n"); | ||
445 | goto out; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | ||
450 | ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); | ||
451 | if (ret_val) | ||
452 | goto out; | ||
453 | |||
454 | phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; | ||
455 | |||
456 | /* Enable downshift */ | ||
457 | phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; | ||
458 | |||
459 | ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); | ||
460 | |||
461 | out: | ||
462 | return ret_val; | ||
463 | } | ||
464 | |||
465 | /** | ||
321 | * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link | 466 | * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link |
322 | * @hw: pointer to the HW structure | 467 | * @hw: pointer to the HW structure |
323 | * | 468 | * |
@@ -572,7 +717,7 @@ out: | |||
572 | * and restart the negotiation process between the link partner. If | 717 | * and restart the negotiation process between the link partner. If |
573 | * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. | 718 | * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. |
574 | **/ | 719 | **/ |
575 | s32 igb_copper_link_autoneg(struct e1000_hw *hw) | 720 | static s32 igb_copper_link_autoneg(struct e1000_hw *hw) |
576 | { | 721 | { |
577 | struct e1000_phy_info *phy = &hw->phy; | 722 | struct e1000_phy_info *phy = &hw->phy; |
578 | s32 ret_val; | 723 | s32 ret_val; |
@@ -796,6 +941,65 @@ out: | |||
796 | } | 941 | } |
797 | 942 | ||
798 | /** | 943 | /** |
944 | * igb_setup_copper_link - Configure copper link settings | ||
945 | * @hw: pointer to the HW structure | ||
946 | * | ||
947 | * Calls the appropriate function to configure the link for auto-neg or forced | ||
948 | * speed and duplex. Then we check for link, once link is established calls | ||
949 | * to configure collision distance and flow control are called. If link is | ||
950 | * not established, we return -E1000_ERR_PHY (-2). | ||
951 | **/ | ||
952 | s32 igb_setup_copper_link(struct e1000_hw *hw) | ||
953 | { | ||
954 | s32 ret_val; | ||
955 | bool link; | ||
956 | |||
957 | |||
958 | if (hw->mac.autoneg) { | ||
959 | /* | ||
960 | * Setup autoneg and flow control advertisement and perform | ||
961 | * autonegotiation. | ||
962 | */ | ||
963 | ret_val = igb_copper_link_autoneg(hw); | ||
964 | if (ret_val) | ||
965 | goto out; | ||
966 | } else { | ||
967 | /* | ||
968 | * PHY will be set to 10H, 10F, 100H or 100F | ||
969 | * depending on user settings. | ||
970 | */ | ||
971 | hw_dbg("Forcing Speed and Duplex\n"); | ||
972 | ret_val = hw->phy.ops.force_speed_duplex(hw); | ||
973 | if (ret_val) { | ||
974 | hw_dbg("Error Forcing Speed and Duplex\n"); | ||
975 | goto out; | ||
976 | } | ||
977 | } | ||
978 | |||
979 | /* | ||
980 | * Check link status. Wait up to 100 microseconds for link to become | ||
981 | * valid. | ||
982 | */ | ||
983 | ret_val = igb_phy_has_link(hw, | ||
984 | COPPER_LINK_UP_LIMIT, | ||
985 | 10, | ||
986 | &link); | ||
987 | if (ret_val) | ||
988 | goto out; | ||
989 | |||
990 | if (link) { | ||
991 | hw_dbg("Valid link established!!!\n"); | ||
992 | igb_config_collision_dist(hw); | ||
993 | ret_val = igb_config_fc_after_link_up(hw); | ||
994 | } else { | ||
995 | hw_dbg("Unable to establish link!!!\n"); | ||
996 | } | ||
997 | |||
998 | out: | ||
999 | return ret_val; | ||
1000 | } | ||
1001 | |||
1002 | /** | ||
799 | * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY | 1003 | * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY |
800 | * @hw: pointer to the HW structure | 1004 | * @hw: pointer to the HW structure |
801 | * | 1005 | * |
@@ -903,22 +1107,19 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
903 | 1107 | ||
904 | igb_phy_force_speed_duplex_setup(hw, &phy_data); | 1108 | igb_phy_force_speed_duplex_setup(hw, &phy_data); |
905 | 1109 | ||
906 | /* Reset the phy to commit changes. */ | ||
907 | phy_data |= MII_CR_RESET; | ||
908 | |||
909 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); | 1110 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); |
910 | if (ret_val) | 1111 | if (ret_val) |
911 | goto out; | 1112 | goto out; |
912 | 1113 | ||
913 | udelay(1); | 1114 | /* Reset the phy to commit changes. */ |
1115 | ret_val = igb_phy_sw_reset(hw); | ||
1116 | if (ret_val) | ||
1117 | goto out; | ||
914 | 1118 | ||
915 | if (phy->autoneg_wait_to_complete) { | 1119 | if (phy->autoneg_wait_to_complete) { |
916 | hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); | 1120 | hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); |
917 | 1121 | ||
918 | ret_val = igb_phy_has_link(hw, | 1122 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); |
919 | PHY_FORCE_LIMIT, | ||
920 | 100000, | ||
921 | &link); | ||
922 | if (ret_val) | 1123 | if (ret_val) |
923 | goto out; | 1124 | goto out; |
924 | 1125 | ||
@@ -928,8 +1129,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
928 | * Reset the DSP and cross our fingers. | 1129 | * Reset the DSP and cross our fingers. |
929 | */ | 1130 | */ |
930 | ret_val = phy->ops.write_reg(hw, | 1131 | ret_val = phy->ops.write_reg(hw, |
931 | M88E1000_PHY_PAGE_SELECT, | 1132 | M88E1000_PHY_PAGE_SELECT, |
932 | 0x001d); | 1133 | 0x001d); |
933 | if (ret_val) | 1134 | if (ret_val) |
934 | goto out; | 1135 | goto out; |
935 | ret_val = igb_phy_reset_dsp(hw); | 1136 | ret_val = igb_phy_reset_dsp(hw); |
@@ -939,7 +1140,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
939 | 1140 | ||
940 | /* Try once more */ | 1141 | /* Try once more */ |
941 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, | 1142 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, |
942 | 100000, &link); | 1143 | 100000, &link); |
943 | if (ret_val) | 1144 | if (ret_val) |
944 | goto out; | 1145 | goto out; |
945 | } | 1146 | } |
@@ -1051,9 +1252,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, | |||
1051 | s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) | 1252 | s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) |
1052 | { | 1253 | { |
1053 | struct e1000_phy_info *phy = &hw->phy; | 1254 | struct e1000_phy_info *phy = &hw->phy; |
1054 | s32 ret_val; | 1255 | s32 ret_val = 0; |
1055 | u16 data; | 1256 | u16 data; |
1056 | 1257 | ||
1258 | if (!(hw->phy.ops.read_reg)) | ||
1259 | goto out; | ||
1260 | |||
1057 | ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); | 1261 | ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); |
1058 | if (ret_val) | 1262 | if (ret_val) |
1059 | goto out; | 1263 | goto out; |
@@ -1288,8 +1492,14 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, | |||
1288 | * it across the board. | 1492 | * it across the board. |
1289 | */ | 1493 | */ |
1290 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); | 1494 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); |
1291 | if (ret_val) | 1495 | if (ret_val) { |
1292 | break; | 1496 | /* |
1497 | * If the first read fails, another entity may have | ||
1498 | * ownership of the resources, wait and try again to | ||
1499 | * see if they have relinquished the resources yet. | ||
1500 | */ | ||
1501 | udelay(usec_interval); | ||
1502 | } | ||
1293 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); | 1503 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); |
1294 | if (ret_val) | 1504 | if (ret_val) |
1295 | break; | 1505 | break; |
@@ -1333,8 +1543,13 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) | |||
1333 | 1543 | ||
1334 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> | 1544 | index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> |
1335 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; | 1545 | M88E1000_PSSR_CABLE_LENGTH_SHIFT; |
1546 | if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { | ||
1547 | ret_val = -E1000_ERR_PHY; | ||
1548 | goto out; | ||
1549 | } | ||
1550 | |||
1336 | phy->min_cable_length = e1000_m88_cable_length_table[index]; | 1551 | phy->min_cable_length = e1000_m88_cable_length_table[index]; |
1337 | phy->max_cable_length = e1000_m88_cable_length_table[index+1]; | 1552 | phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; |
1338 | 1553 | ||
1339 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; | 1554 | phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; |
1340 | 1555 | ||
@@ -1715,3 +1930,229 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) | |||
1715 | return 0; | 1930 | return 0; |
1716 | } | 1931 | } |
1717 | 1932 | ||
1933 | /** | ||
1934 | * igb_power_up_phy_copper - Restore copper link in case of PHY power down | ||
1935 | * @hw: pointer to the HW structure | ||
1936 | * | ||
1937 | * In the case of a PHY power down to save power, or to turn off link during a | ||
1938 | * driver unload, restore the link to previous settings. | ||
1939 | **/ | ||
1940 | void igb_power_up_phy_copper(struct e1000_hw *hw) | ||
1941 | { | ||
1942 | u16 mii_reg = 0; | ||
1943 | |||
1944 | /* The PHY will retain its settings across a power down/up cycle */ | ||
1945 | hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); | ||
1946 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
1947 | hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); | ||
1948 | } | ||
1949 | |||
1950 | /** | ||
1951 | * igb_power_down_phy_copper - Power down copper PHY | ||
1952 | * @hw: pointer to the HW structure | ||
1953 | * | ||
1954 | * Power down PHY to save power when interface is down and wake on lan | ||
1955 | * is not enabled. | ||
1956 | **/ | ||
1957 | void igb_power_down_phy_copper(struct e1000_hw *hw) | ||
1958 | { | ||
1959 | u16 mii_reg = 0; | ||
1960 | |||
1961 | /* The PHY will retain its settings across a power down/up cycle */ | ||
1962 | hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); | ||
1963 | mii_reg |= MII_CR_POWER_DOWN; | ||
1964 | hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); | ||
1965 | msleep(1); | ||
1966 | } | ||
1967 | |||
1968 | /** | ||
1969 | * igb_check_polarity_82580 - Checks the polarity. | ||
1970 | * @hw: pointer to the HW structure | ||
1971 | * | ||
1972 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | ||
1973 | * | ||
1974 | * Polarity is determined based on the PHY specific status register. | ||
1975 | **/ | ||
1976 | static s32 igb_check_polarity_82580(struct e1000_hw *hw) | ||
1977 | { | ||
1978 | struct e1000_phy_info *phy = &hw->phy; | ||
1979 | s32 ret_val; | ||
1980 | u16 data; | ||
1981 | |||
1982 | |||
1983 | ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); | ||
1984 | |||
1985 | if (!ret_val) | ||
1986 | phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) | ||
1987 | ? e1000_rev_polarity_reversed | ||
1988 | : e1000_rev_polarity_normal; | ||
1989 | |||
1990 | return ret_val; | ||
1991 | } | ||
1992 | |||
1993 | /** | ||
1994 | * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY | ||
1995 | * @hw: pointer to the HW structure | ||
1996 | * | ||
1997 | * Calls the PHY setup function to force speed and duplex. Clears the | ||
1998 | * auto-crossover to force MDI manually. Waits for link and returns | ||
1999 | * successful if link up is successful, else -E1000_ERR_PHY (-2). | ||
2000 | **/ | ||
2001 | s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) | ||
2002 | { | ||
2003 | struct e1000_phy_info *phy = &hw->phy; | ||
2004 | s32 ret_val; | ||
2005 | u16 phy_data; | ||
2006 | bool link; | ||
2007 | |||
2008 | |||
2009 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); | ||
2010 | if (ret_val) | ||
2011 | goto out; | ||
2012 | |||
2013 | igb_phy_force_speed_duplex_setup(hw, &phy_data); | ||
2014 | |||
2015 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); | ||
2016 | if (ret_val) | ||
2017 | goto out; | ||
2018 | |||
2019 | /* | ||
2020 | * Clear Auto-Crossover to force MDI manually. 82580 requires MDI | ||
2021 | * forced whenever speed and duplex are forced. | ||
2022 | */ | ||
2023 | ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); | ||
2024 | if (ret_val) | ||
2025 | goto out; | ||
2026 | |||
2027 | phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; | ||
2028 | phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; | ||
2029 | |||
2030 | ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); | ||
2031 | if (ret_val) | ||
2032 | goto out; | ||
2033 | |||
2034 | hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); | ||
2035 | |||
2036 | udelay(1); | ||
2037 | |||
2038 | if (phy->autoneg_wait_to_complete) { | ||
2039 | hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); | ||
2040 | |||
2041 | ret_val = igb_phy_has_link(hw, | ||
2042 | PHY_FORCE_LIMIT, | ||
2043 | 100000, | ||
2044 | &link); | ||
2045 | if (ret_val) | ||
2046 | goto out; | ||
2047 | |||
2048 | if (!link) | ||
2049 | hw_dbg("Link taking longer than expected.\n"); | ||
2050 | |||
2051 | /* Try once more */ | ||
2052 | ret_val = igb_phy_has_link(hw, | ||
2053 | PHY_FORCE_LIMIT, | ||
2054 | 100000, | ||
2055 | &link); | ||
2056 | if (ret_val) | ||
2057 | goto out; | ||
2058 | } | ||
2059 | |||
2060 | out: | ||
2061 | return ret_val; | ||
2062 | } | ||
2063 | |||
2064 | /** | ||
2065 | * igb_get_phy_info_82580 - Retrieve I82580 PHY information | ||
2066 | * @hw: pointer to the HW structure | ||
2067 | * | ||
2068 | * Read PHY status to determine if link is up. If link is up, then | ||
2069 | * set/determine 10base-T extended distance and polarity correction. Read | ||
2070 | * PHY port status to determine MDI/MDIx and speed. Based on the speed, | ||
2071 | * determine on the cable length, local and remote receiver. | ||
2072 | **/ | ||
2073 | s32 igb_get_phy_info_82580(struct e1000_hw *hw) | ||
2074 | { | ||
2075 | struct e1000_phy_info *phy = &hw->phy; | ||
2076 | s32 ret_val; | ||
2077 | u16 data; | ||
2078 | bool link; | ||
2079 | |||
2080 | |||
2081 | ret_val = igb_phy_has_link(hw, 1, 0, &link); | ||
2082 | if (ret_val) | ||
2083 | goto out; | ||
2084 | |||
2085 | if (!link) { | ||
2086 | hw_dbg("Phy info is only valid if link is up\n"); | ||
2087 | ret_val = -E1000_ERR_CONFIG; | ||
2088 | goto out; | ||
2089 | } | ||
2090 | |||
2091 | phy->polarity_correction = true; | ||
2092 | |||
2093 | ret_val = igb_check_polarity_82580(hw); | ||
2094 | if (ret_val) | ||
2095 | goto out; | ||
2096 | |||
2097 | ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); | ||
2098 | if (ret_val) | ||
2099 | goto out; | ||
2100 | |||
2101 | phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; | ||
2102 | |||
2103 | if ((data & I82580_PHY_STATUS2_SPEED_MASK) == | ||
2104 | I82580_PHY_STATUS2_SPEED_1000MBPS) { | ||
2105 | ret_val = hw->phy.ops.get_cable_length(hw); | ||
2106 | if (ret_val) | ||
2107 | goto out; | ||
2108 | |||
2109 | ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); | ||
2110 | if (ret_val) | ||
2111 | goto out; | ||
2112 | |||
2113 | phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) | ||
2114 | ? e1000_1000t_rx_status_ok | ||
2115 | : e1000_1000t_rx_status_not_ok; | ||
2116 | |||
2117 | phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) | ||
2118 | ? e1000_1000t_rx_status_ok | ||
2119 | : e1000_1000t_rx_status_not_ok; | ||
2120 | } else { | ||
2121 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | ||
2122 | phy->local_rx = e1000_1000t_rx_status_undefined; | ||
2123 | phy->remote_rx = e1000_1000t_rx_status_undefined; | ||
2124 | } | ||
2125 | |||
2126 | out: | ||
2127 | return ret_val; | ||
2128 | } | ||
2129 | |||
2130 | /** | ||
2131 | * igb_get_cable_length_82580 - Determine cable length for 82580 PHY | ||
2132 | * @hw: pointer to the HW structure | ||
2133 | * | ||
2134 | * Reads the diagnostic status register and verifies result is valid before | ||
2135 | * placing it in the phy_cable_length field. | ||
2136 | **/ | ||
2137 | s32 igb_get_cable_length_82580(struct e1000_hw *hw) | ||
2138 | { | ||
2139 | struct e1000_phy_info *phy = &hw->phy; | ||
2140 | s32 ret_val; | ||
2141 | u16 phy_data, length; | ||
2142 | |||
2143 | |||
2144 | ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); | ||
2145 | if (ret_val) | ||
2146 | goto out; | ||
2147 | |||
2148 | length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> | ||
2149 | I82580_DSTATUS_CABLE_LENGTH_SHIFT; | ||
2150 | |||
2151 | if (length == E1000_CABLE_LENGTH_UNDEFINED) | ||
2152 | ret_val = -E1000_ERR_PHY; | ||
2153 | |||
2154 | phy->cable_length = length; | ||
2155 | |||
2156 | out: | ||
2157 | return ret_val; | ||
2158 | } | ||
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h index ebe4b616db8a..565a6dbb3714 100644 --- a/drivers/net/igb/e1000_phy.h +++ b/drivers/net/igb/e1000_phy.h | |||
@@ -43,7 +43,6 @@ enum e1000_smart_speed { | |||
43 | 43 | ||
44 | s32 igb_check_downshift(struct e1000_hw *hw); | 44 | s32 igb_check_downshift(struct e1000_hw *hw); |
45 | s32 igb_check_reset_block(struct e1000_hw *hw); | 45 | s32 igb_check_reset_block(struct e1000_hw *hw); |
46 | s32 igb_copper_link_autoneg(struct e1000_hw *hw); | ||
47 | s32 igb_copper_link_setup_igp(struct e1000_hw *hw); | 46 | s32 igb_copper_link_setup_igp(struct e1000_hw *hw); |
48 | s32 igb_copper_link_setup_m88(struct e1000_hw *hw); | 47 | s32 igb_copper_link_setup_m88(struct e1000_hw *hw); |
49 | s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); | 48 | s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); |
@@ -57,10 +56,21 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw); | |||
57 | s32 igb_phy_hw_reset(struct e1000_hw *hw); | 56 | s32 igb_phy_hw_reset(struct e1000_hw *hw); |
58 | s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); | 57 | s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); |
59 | s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); | 58 | s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); |
59 | s32 igb_setup_copper_link(struct e1000_hw *hw); | ||
60 | s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); | 60 | s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); |
61 | s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, | 61 | s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, |
62 | u32 usec_interval, bool *success); | 62 | u32 usec_interval, bool *success); |
63 | void igb_power_up_phy_copper(struct e1000_hw *hw); | ||
64 | void igb_power_down_phy_copper(struct e1000_hw *hw); | ||
63 | s32 igb_phy_init_script_igp3(struct e1000_hw *hw); | 65 | s32 igb_phy_init_script_igp3(struct e1000_hw *hw); |
66 | s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); | ||
67 | s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); | ||
68 | s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); | ||
69 | s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); | ||
70 | s32 igb_copper_link_setup_82580(struct e1000_hw *hw); | ||
71 | s32 igb_get_phy_info_82580(struct e1000_hw *hw); | ||
72 | s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); | ||
73 | s32 igb_get_cable_length_82580(struct e1000_hw *hw); | ||
64 | 74 | ||
65 | /* IGP01E1000 Specific Registers */ | 75 | /* IGP01E1000 Specific Registers */ |
66 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ | 76 | #define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ |
@@ -75,6 +85,33 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw); | |||
75 | #define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ | 85 | #define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ |
76 | #define IGP01E1000_PSCFR_SMART_SPEED 0x0080 | 86 | #define IGP01E1000_PSCFR_SMART_SPEED 0x0080 |
77 | 87 | ||
88 | #define I82580_ADDR_REG 16 | ||
89 | #define I82580_CFG_REG 22 | ||
90 | #define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) | ||
91 | #define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ | ||
92 | #define I82580_CTRL_REG 23 | ||
93 | #define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) | ||
94 | |||
95 | /* 82580 specific PHY registers */ | ||
96 | #define I82580_PHY_CTRL_2 18 | ||
97 | #define I82580_PHY_LBK_CTRL 19 | ||
98 | #define I82580_PHY_STATUS_2 26 | ||
99 | #define I82580_PHY_DIAG_STATUS 31 | ||
100 | |||
101 | /* I82580 PHY Status 2 */ | ||
102 | #define I82580_PHY_STATUS2_REV_POLARITY 0x0400 | ||
103 | #define I82580_PHY_STATUS2_MDIX 0x0800 | ||
104 | #define I82580_PHY_STATUS2_SPEED_MASK 0x0300 | ||
105 | #define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 | ||
106 | #define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 | ||
107 | |||
108 | /* I82580 PHY Control 2 */ | ||
109 | #define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 | ||
110 | #define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 | ||
111 | |||
112 | /* I82580 PHY Diagnostics Status */ | ||
113 | #define I82580_DSTATUS_CABLE_LENGTH 0x03FC | ||
114 | #define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 | ||
78 | /* Enable flexible speed on link-up */ | 115 | /* Enable flexible speed on link-up */ |
79 | #define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ | 116 | #define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ |
80 | #define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ | 117 | #define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ |
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index 345d1442d6d6..abb7333a1fbf 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #define E1000_EERD 0x00014 /* EEPROM Read - RW */ | 34 | #define E1000_EERD 0x00014 /* EEPROM Read - RW */ |
35 | #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ | 35 | #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ |
36 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ | 36 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ |
37 | #define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ | ||
37 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ | 38 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ |
38 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ | 39 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ |
39 | #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ | 40 | #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ |
@@ -76,59 +77,20 @@ | |||
76 | #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ | 77 | #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ |
77 | 78 | ||
78 | /* IEEE 1588 TIMESYNCH */ | 79 | /* IEEE 1588 TIMESYNCH */ |
79 | #define E1000_TSYNCTXCTL 0x0B614 | 80 | #define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ |
80 | #define E1000_TSYNCTXCTL_VALID (1<<0) | 81 | #define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ |
81 | #define E1000_TSYNCTXCTL_ENABLED (1<<4) | 82 | #define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ |
82 | #define E1000_TSYNCRXCTL 0x0B620 | 83 | #define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ |
83 | #define E1000_TSYNCRXCTL_VALID (1<<0) | 84 | #define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ |
84 | #define E1000_TSYNCRXCTL_ENABLED (1<<4) | 85 | #define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ |
85 | enum { | 86 | #define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ |
86 | E1000_TSYNCRXCTL_TYPE_L2_V2 = 0, | 87 | #define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ |
87 | E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1), | 88 | #define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ |
88 | E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2), | 89 | #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ |
89 | E1000_TSYNCRXCTL_TYPE_ALL = (1<<3), | 90 | #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ |
90 | E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1), | 91 | #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ |
91 | }; | 92 | #define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ |
92 | #define E1000_TSYNCRXCFG 0x05F50 | 93 | #define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ |
93 | enum { | ||
94 | E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0, | ||
95 | E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0, | ||
96 | E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0, | ||
97 | E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0, | ||
98 | E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0, | ||
99 | |||
100 | E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8, | ||
101 | E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8, | ||
102 | E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8, | ||
103 | E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8, | ||
104 | E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8, | ||
105 | E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8, | ||
106 | E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8, | ||
107 | E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8, | ||
108 | E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8, | ||
109 | E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8, | ||
110 | }; | ||
111 | #define E1000_SYSTIML 0x0B600 | ||
112 | #define E1000_SYSTIMH 0x0B604 | ||
113 | #define E1000_TIMINCA 0x0B608 | ||
114 | |||
115 | #define E1000_RXMTRL 0x0B634 | ||
116 | #define E1000_RXSTMPL 0x0B624 | ||
117 | #define E1000_RXSTMPH 0x0B628 | ||
118 | #define E1000_RXSATRL 0x0B62C | ||
119 | #define E1000_RXSATRH 0x0B630 | ||
120 | |||
121 | #define E1000_TXSTMPL 0x0B618 | ||
122 | #define E1000_TXSTMPH 0x0B61C | ||
123 | |||
124 | #define E1000_ETQF0 0x05CB0 | ||
125 | #define E1000_ETQF1 0x05CB4 | ||
126 | #define E1000_ETQF2 0x05CB8 | ||
127 | #define E1000_ETQF3 0x05CBC | ||
128 | #define E1000_ETQF4 0x05CC0 | ||
129 | #define E1000_ETQF5 0x05CC4 | ||
130 | #define E1000_ETQF6 0x05CC8 | ||
131 | #define E1000_ETQF7 0x05CCC | ||
132 | 94 | ||
133 | /* Filtering Registers */ | 95 | /* Filtering Registers */ |
134 | #define E1000_SAQF(_n) (0x5980 + 4 * (_n)) | 96 | #define E1000_SAQF(_n) (0x5980 + 4 * (_n)) |
@@ -143,7 +105,9 @@ enum { | |||
143 | #define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ | 105 | #define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ |
144 | 106 | ||
145 | #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) | 107 | #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) |
108 | |||
146 | /* Split and Replication RX Control - RW */ | 109 | /* Split and Replication RX Control - RW */ |
110 | #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ | ||
147 | /* | 111 | /* |
148 | * Convenience macros | 112 | * Convenience macros |
149 | * | 113 | * |
@@ -288,10 +252,17 @@ enum { | |||
288 | #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ | 252 | #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ |
289 | #define E1000_RA 0x05400 /* Receive Address - RW Array */ | 253 | #define E1000_RA 0x05400 /* Receive Address - RW Array */ |
290 | #define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ | 254 | #define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ |
255 | #define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) | ||
291 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ | 256 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ |
292 | (0x054E0 + ((_i - 16) * 8))) | 257 | (0x054E0 + ((_i - 16) * 8))) |
293 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ | 258 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ |
294 | (0x054E4 + ((_i - 16) * 8))) | 259 | (0x054E4 + ((_i - 16) * 8))) |
260 | #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) | ||
261 | #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) | ||
262 | #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) | ||
263 | #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) | ||
264 | #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) | ||
265 | #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) | ||
295 | #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ | 266 | #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ |
296 | #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ | 267 | #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ |
297 | #define E1000_WUC 0x05800 /* Wakeup Control - RW */ | 268 | #define E1000_WUC 0x05800 /* Wakeup Control - RW */ |
@@ -331,6 +302,7 @@ enum { | |||
331 | #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ | 302 | #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ |
332 | #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ | 303 | #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ |
333 | #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ | 304 | #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ |
305 | #define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ | ||
334 | #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ | 306 | #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ |
335 | /* These act per VF so an array friendly macro is used */ | 307 | /* These act per VF so an array friendly macro is used */ |
336 | #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) | 308 | #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) |
@@ -338,6 +310,7 @@ enum { | |||
338 | #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) | 310 | #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) |
339 | #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine | 311 | #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine |
340 | * Filter - RW */ | 312 | * Filter - RW */ |
313 | #define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) | ||
341 | 314 | ||
342 | #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) | 315 | #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) |
343 | #define rd32(reg) (readl(hw->hw_addr + reg)) | 316 | #define rd32(reg) (readl(hw->hw_addr + reg)) |
@@ -348,4 +321,6 @@ enum { | |||
348 | #define array_rd32(reg, offset) \ | 321 | #define array_rd32(reg, offset) \ |
349 | (readl(hw->hw_addr + reg + ((offset) << 2))) | 322 | (readl(hw->hw_addr + reg + ((offset) << 2))) |
350 | 323 | ||
324 | /* DMA Coalescing registers */ | ||
325 | #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ | ||
351 | #endif | 326 | #endif |
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 7126fea26fec..3b772b822a5d 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -55,12 +55,14 @@ struct igb_adapter; | |||
55 | #define IGB_DEFAULT_ITR 3 /* dynamic */ | 55 | #define IGB_DEFAULT_ITR 3 /* dynamic */ |
56 | #define IGB_MAX_ITR_USECS 10000 | 56 | #define IGB_MAX_ITR_USECS 10000 |
57 | #define IGB_MIN_ITR_USECS 10 | 57 | #define IGB_MIN_ITR_USECS 10 |
58 | #define NON_Q_VECTORS 1 | ||
59 | #define MAX_Q_VECTORS 8 | ||
58 | 60 | ||
59 | /* Transmit and receive queues */ | 61 | /* Transmit and receive queues */ |
60 | #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ | 62 | #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ |
61 | (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) | 63 | (hw->mac.type > e1000_82575 ? 8 : 4)) |
62 | #define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES | 64 | #define IGB_ABS_MAX_TX_QUEUES 8 |
63 | #define IGB_ABS_MAX_TX_QUEUES 4 | 65 | #define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES |
64 | 66 | ||
65 | #define IGB_MAX_VF_MC_ENTRIES 30 | 67 | #define IGB_MAX_VF_MC_ENTRIES 30 |
66 | #define IGB_MAX_VF_FUNCTIONS 8 | 68 | #define IGB_MAX_VF_FUNCTIONS 8 |
@@ -71,9 +73,17 @@ struct vf_data_storage { | |||
71 | u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; | 73 | u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; |
72 | u16 num_vf_mc_hashes; | 74 | u16 num_vf_mc_hashes; |
73 | u16 vlans_enabled; | 75 | u16 vlans_enabled; |
74 | bool clear_to_send; | 76 | u32 flags; |
77 | unsigned long last_nack; | ||
78 | u16 pf_vlan; /* When set, guest VLAN config not allowed. */ | ||
79 | u16 pf_qos; | ||
75 | }; | 80 | }; |
76 | 81 | ||
82 | #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ | ||
83 | #define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ | ||
84 | #define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ | ||
85 | #define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ | ||
86 | |||
77 | /* RX descriptor control thresholds. | 87 | /* RX descriptor control thresholds. |
78 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of | 88 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of |
79 | * descriptors available in its onboard memory. | 89 | * descriptors available in its onboard memory. |
@@ -85,17 +95,19 @@ struct vf_data_storage { | |||
85 | * descriptors until either it has this many to write back, or the | 95 | * descriptors until either it has this many to write back, or the |
86 | * ITR timer expires. | 96 | * ITR timer expires. |
87 | */ | 97 | */ |
88 | #define IGB_RX_PTHRESH 16 | 98 | #define IGB_RX_PTHRESH 8 |
89 | #define IGB_RX_HTHRESH 8 | 99 | #define IGB_RX_HTHRESH 8 |
90 | #define IGB_RX_WTHRESH 1 | 100 | #define IGB_RX_WTHRESH 1 |
101 | #define IGB_TX_PTHRESH 8 | ||
102 | #define IGB_TX_HTHRESH 1 | ||
103 | #define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ | ||
104 | adapter->msix_entries) ? 1 : 16) | ||
91 | 105 | ||
92 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | 106 | /* this is the size past which hardware will drop packets when setting LPE=0 */ |
93 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | 107 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
94 | 108 | ||
95 | /* Supported Rx Buffer Sizes */ | 109 | /* Supported Rx Buffer Sizes */ |
96 | #define IGB_RXBUFFER_128 128 /* Used for packet split */ | 110 | #define IGB_RXBUFFER_128 128 /* Used for packet split */ |
97 | #define IGB_RXBUFFER_256 256 /* Used for packet split */ | ||
98 | #define IGB_RXBUFFER_512 512 | ||
99 | #define IGB_RXBUFFER_1024 1024 | 111 | #define IGB_RXBUFFER_1024 1024 |
100 | #define IGB_RXBUFFER_2048 2048 | 112 | #define IGB_RXBUFFER_2048 2048 |
101 | #define IGB_RXBUFFER_16384 16384 | 113 | #define IGB_RXBUFFER_16384 16384 |
@@ -128,12 +140,14 @@ struct igb_buffer { | |||
128 | unsigned long time_stamp; | 140 | unsigned long time_stamp; |
129 | u16 length; | 141 | u16 length; |
130 | u16 next_to_watch; | 142 | u16 next_to_watch; |
143 | u16 mapped_as_page; | ||
144 | u16 gso_segs; | ||
131 | }; | 145 | }; |
132 | /* RX */ | 146 | /* RX */ |
133 | struct { | 147 | struct { |
134 | struct page *page; | 148 | struct page *page; |
135 | u64 page_dma; | 149 | dma_addr_t page_dma; |
136 | unsigned int page_offset; | 150 | u16 page_offset; |
137 | }; | 151 | }; |
138 | }; | 152 | }; |
139 | }; | 153 | }; |
@@ -141,36 +155,54 @@ struct igb_buffer { | |||
141 | struct igb_tx_queue_stats { | 155 | struct igb_tx_queue_stats { |
142 | u64 packets; | 156 | u64 packets; |
143 | u64 bytes; | 157 | u64 bytes; |
158 | u64 restart_queue; | ||
144 | }; | 159 | }; |
145 | 160 | ||
146 | struct igb_rx_queue_stats { | 161 | struct igb_rx_queue_stats { |
147 | u64 packets; | 162 | u64 packets; |
148 | u64 bytes; | 163 | u64 bytes; |
149 | u64 drops; | 164 | u64 drops; |
165 | u64 csum_err; | ||
166 | u64 alloc_failed; | ||
150 | }; | 167 | }; |
151 | 168 | ||
152 | struct igb_ring { | 169 | struct igb_q_vector { |
153 | struct igb_adapter *adapter; /* backlink */ | 170 | struct igb_adapter *adapter; /* backlink */ |
154 | void *desc; /* descriptor ring memory */ | 171 | struct igb_ring *rx_ring; |
155 | dma_addr_t dma; /* phys address of the ring */ | 172 | struct igb_ring *tx_ring; |
156 | unsigned int size; /* length of desc. ring in bytes */ | 173 | struct napi_struct napi; |
157 | unsigned int count; /* number of desc. in the ring */ | ||
158 | u16 next_to_use; | ||
159 | u16 next_to_clean; | ||
160 | u16 head; | ||
161 | u16 tail; | ||
162 | struct igb_buffer *buffer_info; /* array of buffer info structs */ | ||
163 | 174 | ||
164 | u32 eims_value; | 175 | u32 eims_value; |
165 | u32 itr_val; | ||
166 | u16 itr_register; | ||
167 | u16 cpu; | 176 | u16 cpu; |
168 | 177 | ||
169 | u16 queue_index; | 178 | u16 itr_val; |
170 | u16 reg_idx; | 179 | u8 set_itr; |
180 | void __iomem *itr_register; | ||
181 | |||
182 | char name[IFNAMSIZ + 9]; | ||
183 | }; | ||
184 | |||
185 | struct igb_ring { | ||
186 | struct igb_q_vector *q_vector; /* backlink to q_vector */ | ||
187 | struct net_device *netdev; /* back pointer to net_device */ | ||
188 | struct pci_dev *pdev; /* pci device for dma mapping */ | ||
189 | dma_addr_t dma; /* phys address of the ring */ | ||
190 | void *desc; /* descriptor ring memory */ | ||
191 | unsigned int size; /* length of desc. ring in bytes */ | ||
192 | u16 count; /* number of desc. in the ring */ | ||
193 | u16 next_to_use; | ||
194 | u16 next_to_clean; | ||
195 | u8 queue_index; | ||
196 | u8 reg_idx; | ||
197 | void __iomem *head; | ||
198 | void __iomem *tail; | ||
199 | struct igb_buffer *buffer_info; /* array of buffer info structs */ | ||
200 | |||
171 | unsigned int total_bytes; | 201 | unsigned int total_bytes; |
172 | unsigned int total_packets; | 202 | unsigned int total_packets; |
173 | 203 | ||
204 | u32 flags; | ||
205 | |||
174 | union { | 206 | union { |
175 | /* TX */ | 207 | /* TX */ |
176 | struct { | 208 | struct { |
@@ -180,16 +212,18 @@ struct igb_ring { | |||
180 | /* RX */ | 212 | /* RX */ |
181 | struct { | 213 | struct { |
182 | struct igb_rx_queue_stats rx_stats; | 214 | struct igb_rx_queue_stats rx_stats; |
183 | u64 rx_queue_drops; | 215 | u32 rx_buffer_len; |
184 | struct napi_struct napi; | ||
185 | int set_itr; | ||
186 | struct igb_ring *buddy; | ||
187 | }; | 216 | }; |
188 | }; | 217 | }; |
189 | |||
190 | char name[IFNAMSIZ + 5]; | ||
191 | }; | 218 | }; |
192 | 219 | ||
220 | #define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ | ||
221 | #define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ | ||
222 | |||
223 | #define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ | ||
224 | |||
225 | #define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) | ||
226 | |||
193 | #define E1000_RX_DESC_ADV(R, i) \ | 227 | #define E1000_RX_DESC_ADV(R, i) \ |
194 | (&(((union e1000_adv_rx_desc *)((R).desc))[i])) | 228 | (&(((union e1000_adv_rx_desc *)((R).desc))[i])) |
195 | #define E1000_TX_DESC_ADV(R, i) \ | 229 | #define E1000_TX_DESC_ADV(R, i) \ |
@@ -197,26 +231,30 @@ struct igb_ring { | |||
197 | #define E1000_TX_CTXTDESC_ADV(R, i) \ | 231 | #define E1000_TX_CTXTDESC_ADV(R, i) \ |
198 | (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) | 232 | (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) |
199 | 233 | ||
200 | /* board specific private data structure */ | 234 | /* igb_desc_unused - calculate if we have unused descriptors */ |
235 | static inline int igb_desc_unused(struct igb_ring *ring) | ||
236 | { | ||
237 | if (ring->next_to_clean > ring->next_to_use) | ||
238 | return ring->next_to_clean - ring->next_to_use - 1; | ||
201 | 239 | ||
240 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; | ||
241 | } | ||
242 | |||
243 | /* board specific private data structure */ | ||
202 | struct igb_adapter { | 244 | struct igb_adapter { |
203 | struct timer_list watchdog_timer; | 245 | struct timer_list watchdog_timer; |
204 | struct timer_list phy_info_timer; | 246 | struct timer_list phy_info_timer; |
205 | struct vlan_group *vlgrp; | 247 | struct vlan_group *vlgrp; |
206 | u16 mng_vlan_id; | 248 | u16 mng_vlan_id; |
207 | u32 bd_number; | 249 | u32 bd_number; |
208 | u32 rx_buffer_len; | ||
209 | u32 wol; | 250 | u32 wol; |
210 | u32 en_mng_pt; | 251 | u32 en_mng_pt; |
211 | u16 link_speed; | 252 | u16 link_speed; |
212 | u16 link_duplex; | 253 | u16 link_duplex; |
213 | unsigned int total_tx_bytes; | 254 | |
214 | unsigned int total_tx_packets; | ||
215 | unsigned int total_rx_bytes; | ||
216 | unsigned int total_rx_packets; | ||
217 | /* Interrupt Throttle Rate */ | 255 | /* Interrupt Throttle Rate */ |
218 | u32 itr; | 256 | u32 rx_itr_setting; |
219 | u32 itr_setting; | 257 | u32 tx_itr_setting; |
220 | u16 tx_itr; | 258 | u16 tx_itr; |
221 | u16 rx_itr; | 259 | u16 rx_itr; |
222 | 260 | ||
@@ -228,35 +266,20 @@ struct igb_adapter { | |||
228 | unsigned long led_status; | 266 | unsigned long led_status; |
229 | 267 | ||
230 | /* TX */ | 268 | /* TX */ |
231 | struct igb_ring *tx_ring; /* One per active queue */ | 269 | struct igb_ring *tx_ring[16]; |
232 | unsigned int restart_queue; | ||
233 | unsigned long tx_queue_len; | ||
234 | u32 txd_cmd; | ||
235 | u32 gotc; | ||
236 | u64 gotc_old; | ||
237 | u64 tpt_old; | ||
238 | u64 colc_old; | ||
239 | u32 tx_timeout_count; | 270 | u32 tx_timeout_count; |
240 | 271 | ||
241 | /* RX */ | 272 | /* RX */ |
242 | struct igb_ring *rx_ring; /* One per active queue */ | 273 | struct igb_ring *rx_ring[16]; |
243 | int num_tx_queues; | 274 | int num_tx_queues; |
244 | int num_rx_queues; | 275 | int num_rx_queues; |
245 | 276 | ||
246 | u64 hw_csum_err; | ||
247 | u64 hw_csum_good; | ||
248 | u32 alloc_rx_buff_failed; | ||
249 | u32 gorc; | ||
250 | u64 gorc_old; | ||
251 | u16 rx_ps_hdr_size; | ||
252 | u32 max_frame_size; | 277 | u32 max_frame_size; |
253 | u32 min_frame_size; | 278 | u32 min_frame_size; |
254 | 279 | ||
255 | /* OS defined structs */ | 280 | /* OS defined structs */ |
256 | struct net_device *netdev; | 281 | struct net_device *netdev; |
257 | struct napi_struct napi; | ||
258 | struct pci_dev *pdev; | 282 | struct pci_dev *pdev; |
259 | struct net_device_stats net_stats; | ||
260 | struct cyclecounter cycles; | 283 | struct cyclecounter cycles; |
261 | struct timecounter clock; | 284 | struct timecounter clock; |
262 | struct timecompare compare; | 285 | struct timecompare compare; |
@@ -273,6 +296,9 @@ struct igb_adapter { | |||
273 | struct igb_ring test_rx_ring; | 296 | struct igb_ring test_rx_ring; |
274 | 297 | ||
275 | int msg_enable; | 298 | int msg_enable; |
299 | |||
300 | unsigned int num_q_vectors; | ||
301 | struct igb_q_vector *q_vector[MAX_Q_VECTORS]; | ||
276 | struct msix_entry *msix_entries; | 302 | struct msix_entry *msix_entries; |
277 | u32 eims_enable_mask; | 303 | u32 eims_enable_mask; |
278 | u32 eims_other; | 304 | u32 eims_other; |
@@ -283,18 +309,20 @@ struct igb_adapter { | |||
283 | u32 eeprom_wol; | 309 | u32 eeprom_wol; |
284 | 310 | ||
285 | struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; | 311 | struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; |
286 | unsigned int tx_ring_count; | 312 | u16 tx_ring_count; |
287 | unsigned int rx_ring_count; | 313 | u16 rx_ring_count; |
288 | unsigned int vfs_allocated_count; | 314 | unsigned int vfs_allocated_count; |
289 | struct vf_data_storage *vf_data; | 315 | struct vf_data_storage *vf_data; |
316 | u32 rss_queues; | ||
290 | }; | 317 | }; |
291 | 318 | ||
292 | #define IGB_FLAG_HAS_MSI (1 << 0) | 319 | #define IGB_FLAG_HAS_MSI (1 << 0) |
293 | #define IGB_FLAG_DCA_ENABLED (1 << 1) | 320 | #define IGB_FLAG_DCA_ENABLED (1 << 1) |
294 | #define IGB_FLAG_QUAD_PORT_A (1 << 2) | 321 | #define IGB_FLAG_QUAD_PORT_A (1 << 2) |
295 | #define IGB_FLAG_NEED_CTX_IDX (1 << 3) | 322 | #define IGB_FLAG_QUEUE_PAIRS (1 << 3) |
296 | #define IGB_FLAG_RX_CSUM_DISABLED (1 << 4) | ||
297 | 323 | ||
324 | #define IGB_82576_TSYNC_SHIFT 19 | ||
325 | #define IGB_82580_TSYNC_SHIFT 24 | ||
298 | enum e1000_state_t { | 326 | enum e1000_state_t { |
299 | __IGB_TESTING, | 327 | __IGB_TESTING, |
300 | __IGB_RESETTING, | 328 | __IGB_RESETTING, |
@@ -314,12 +342,22 @@ extern void igb_down(struct igb_adapter *); | |||
314 | extern void igb_reinit_locked(struct igb_adapter *); | 342 | extern void igb_reinit_locked(struct igb_adapter *); |
315 | extern void igb_reset(struct igb_adapter *); | 343 | extern void igb_reset(struct igb_adapter *); |
316 | extern int igb_set_spd_dplx(struct igb_adapter *, u16); | 344 | extern int igb_set_spd_dplx(struct igb_adapter *, u16); |
317 | extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); | 345 | extern int igb_setup_tx_resources(struct igb_ring *); |
318 | extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); | 346 | extern int igb_setup_rx_resources(struct igb_ring *); |
319 | extern void igb_free_tx_resources(struct igb_ring *); | 347 | extern void igb_free_tx_resources(struct igb_ring *); |
320 | extern void igb_free_rx_resources(struct igb_ring *); | 348 | extern void igb_free_rx_resources(struct igb_ring *); |
349 | extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); | ||
350 | extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); | ||
351 | extern void igb_setup_tctl(struct igb_adapter *); | ||
352 | extern void igb_setup_rctl(struct igb_adapter *); | ||
353 | extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *); | ||
354 | extern void igb_unmap_and_free_tx_resource(struct igb_ring *, | ||
355 | struct igb_buffer *); | ||
356 | extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); | ||
321 | extern void igb_update_stats(struct igb_adapter *); | 357 | extern void igb_update_stats(struct igb_adapter *); |
358 | extern bool igb_has_link(struct igb_adapter *adapter); | ||
322 | extern void igb_set_ethtool_ops(struct net_device *); | 359 | extern void igb_set_ethtool_ops(struct net_device *); |
360 | extern void igb_power_up_link(struct igb_adapter *); | ||
323 | 361 | ||
324 | static inline s32 igb_reset_phy(struct e1000_hw *hw) | 362 | static inline s32 igb_reset_phy(struct e1000_hw *hw) |
325 | { | 363 | { |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index b243ed3b0c36..743038490104 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/if_ether.h> | 35 | #include <linux/if_ether.h> |
36 | #include <linux/ethtool.h> | 36 | #include <linux/ethtool.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/slab.h> | ||
38 | 39 | ||
39 | #include "igb.h" | 40 | #include "igb.h" |
40 | 41 | ||
@@ -44,78 +45,94 @@ struct igb_stats { | |||
44 | int stat_offset; | 45 | int stat_offset; |
45 | }; | 46 | }; |
46 | 47 | ||
47 | #define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ | 48 | #define IGB_STAT(_name, _stat) { \ |
48 | offsetof(struct igb_adapter, m) | 49 | .stat_string = _name, \ |
50 | .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ | ||
51 | .stat_offset = offsetof(struct igb_adapter, _stat) \ | ||
52 | } | ||
49 | static const struct igb_stats igb_gstrings_stats[] = { | 53 | static const struct igb_stats igb_gstrings_stats[] = { |
50 | { "rx_packets", IGB_STAT(stats.gprc) }, | 54 | IGB_STAT("rx_packets", stats.gprc), |
51 | { "tx_packets", IGB_STAT(stats.gptc) }, | 55 | IGB_STAT("tx_packets", stats.gptc), |
52 | { "rx_bytes", IGB_STAT(stats.gorc) }, | 56 | IGB_STAT("rx_bytes", stats.gorc), |
53 | { "tx_bytes", IGB_STAT(stats.gotc) }, | 57 | IGB_STAT("tx_bytes", stats.gotc), |
54 | { "rx_broadcast", IGB_STAT(stats.bprc) }, | 58 | IGB_STAT("rx_broadcast", stats.bprc), |
55 | { "tx_broadcast", IGB_STAT(stats.bptc) }, | 59 | IGB_STAT("tx_broadcast", stats.bptc), |
56 | { "rx_multicast", IGB_STAT(stats.mprc) }, | 60 | IGB_STAT("rx_multicast", stats.mprc), |
57 | { "tx_multicast", IGB_STAT(stats.mptc) }, | 61 | IGB_STAT("tx_multicast", stats.mptc), |
58 | { "rx_errors", IGB_STAT(net_stats.rx_errors) }, | 62 | IGB_STAT("multicast", stats.mprc), |
59 | { "tx_errors", IGB_STAT(net_stats.tx_errors) }, | 63 | IGB_STAT("collisions", stats.colc), |
60 | { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, | 64 | IGB_STAT("rx_crc_errors", stats.crcerrs), |
61 | { "multicast", IGB_STAT(stats.mprc) }, | 65 | IGB_STAT("rx_no_buffer_count", stats.rnbc), |
62 | { "collisions", IGB_STAT(stats.colc) }, | 66 | IGB_STAT("rx_missed_errors", stats.mpc), |
63 | { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, | 67 | IGB_STAT("tx_aborted_errors", stats.ecol), |
64 | { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, | 68 | IGB_STAT("tx_carrier_errors", stats.tncrs), |
65 | { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, | 69 | IGB_STAT("tx_window_errors", stats.latecol), |
66 | { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, | 70 | IGB_STAT("tx_abort_late_coll", stats.latecol), |
67 | { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, | 71 | IGB_STAT("tx_deferred_ok", stats.dc), |
68 | { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, | 72 | IGB_STAT("tx_single_coll_ok", stats.scc), |
69 | { "rx_missed_errors", IGB_STAT(stats.mpc) }, | 73 | IGB_STAT("tx_multi_coll_ok", stats.mcc), |
70 | { "tx_aborted_errors", IGB_STAT(stats.ecol) }, | 74 | IGB_STAT("tx_timeout_count", tx_timeout_count), |
71 | { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, | 75 | IGB_STAT("rx_long_length_errors", stats.roc), |
72 | { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, | 76 | IGB_STAT("rx_short_length_errors", stats.ruc), |
73 | { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, | 77 | IGB_STAT("rx_align_errors", stats.algnerrc), |
74 | { "tx_window_errors", IGB_STAT(stats.latecol) }, | 78 | IGB_STAT("tx_tcp_seg_good", stats.tsctc), |
75 | { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, | 79 | IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), |
76 | { "tx_deferred_ok", IGB_STAT(stats.dc) }, | 80 | IGB_STAT("rx_flow_control_xon", stats.xonrxc), |
77 | { "tx_single_coll_ok", IGB_STAT(stats.scc) }, | 81 | IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), |
78 | { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, | 82 | IGB_STAT("tx_flow_control_xon", stats.xontxc), |
79 | { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, | 83 | IGB_STAT("tx_flow_control_xoff", stats.xofftxc), |
80 | { "tx_restart_queue", IGB_STAT(restart_queue) }, | 84 | IGB_STAT("rx_long_byte_count", stats.gorc), |
81 | { "rx_long_length_errors", IGB_STAT(stats.roc) }, | 85 | IGB_STAT("tx_dma_out_of_sync", stats.doosync), |
82 | { "rx_short_length_errors", IGB_STAT(stats.ruc) }, | 86 | IGB_STAT("tx_smbus", stats.mgptc), |
83 | { "rx_align_errors", IGB_STAT(stats.algnerrc) }, | 87 | IGB_STAT("rx_smbus", stats.mgprc), |
84 | { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, | 88 | IGB_STAT("dropped_smbus", stats.mgpdc), |
85 | { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, | 89 | }; |
86 | { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, | 90 | |
87 | { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, | 91 | #define IGB_NETDEV_STAT(_net_stat) { \ |
88 | { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, | 92 | .stat_string = __stringify(_net_stat), \ |
89 | { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, | 93 | .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ |
90 | { "rx_long_byte_count", IGB_STAT(stats.gorc) }, | 94 | .stat_offset = offsetof(struct net_device_stats, _net_stat) \ |
91 | { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, | 95 | } |
92 | { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, | 96 | static const struct igb_stats igb_gstrings_net_stats[] = { |
93 | { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, | 97 | IGB_NETDEV_STAT(rx_errors), |
94 | { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, | 98 | IGB_NETDEV_STAT(tx_errors), |
95 | { "tx_smbus", IGB_STAT(stats.mgptc) }, | 99 | IGB_NETDEV_STAT(tx_dropped), |
96 | { "rx_smbus", IGB_STAT(stats.mgprc) }, | 100 | IGB_NETDEV_STAT(rx_length_errors), |
97 | { "dropped_smbus", IGB_STAT(stats.mgpdc) }, | 101 | IGB_NETDEV_STAT(rx_over_errors), |
102 | IGB_NETDEV_STAT(rx_frame_errors), | ||
103 | IGB_NETDEV_STAT(rx_fifo_errors), | ||
104 | IGB_NETDEV_STAT(tx_fifo_errors), | ||
105 | IGB_NETDEV_STAT(tx_heartbeat_errors) | ||
98 | }; | 106 | }; |
99 | 107 | ||
100 | #define IGB_QUEUE_STATS_LEN \ | ||
101 | (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \ | ||
102 | (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ | ||
103 | ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \ | ||
104 | (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) | ||
105 | #define IGB_GLOBAL_STATS_LEN \ | 108 | #define IGB_GLOBAL_STATS_LEN \ |
106 | sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) | 109 | (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) |
107 | #define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) | 110 | #define IGB_NETDEV_STATS_LEN \ |
111 | (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) | ||
112 | #define IGB_RX_QUEUE_STATS_LEN \ | ||
113 | (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) | ||
114 | #define IGB_TX_QUEUE_STATS_LEN \ | ||
115 | (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) | ||
116 | #define IGB_QUEUE_STATS_LEN \ | ||
117 | ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ | ||
118 | IGB_RX_QUEUE_STATS_LEN) + \ | ||
119 | (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ | ||
120 | IGB_TX_QUEUE_STATS_LEN)) | ||
121 | #define IGB_STATS_LEN \ | ||
122 | (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) | ||
123 | |||
108 | static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { | 124 | static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { |
109 | "Register test (offline)", "Eeprom test (offline)", | 125 | "Register test (offline)", "Eeprom test (offline)", |
110 | "Interrupt test (offline)", "Loopback test (offline)", | 126 | "Interrupt test (offline)", "Loopback test (offline)", |
111 | "Link test (on/offline)" | 127 | "Link test (on/offline)" |
112 | }; | 128 | }; |
113 | #define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN | 129 | #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) |
114 | 130 | ||
115 | static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | 131 | static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) |
116 | { | 132 | { |
117 | struct igb_adapter *adapter = netdev_priv(netdev); | 133 | struct igb_adapter *adapter = netdev_priv(netdev); |
118 | struct e1000_hw *hw = &adapter->hw; | 134 | struct e1000_hw *hw = &adapter->hw; |
135 | u32 status; | ||
119 | 136 | ||
120 | if (hw->phy.media_type == e1000_media_type_copper) { | 137 | if (hw->phy.media_type == e1000_media_type_copper) { |
121 | 138 | ||
@@ -150,17 +167,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
150 | 167 | ||
151 | ecmd->transceiver = XCVR_INTERNAL; | 168 | ecmd->transceiver = XCVR_INTERNAL; |
152 | 169 | ||
153 | if (rd32(E1000_STATUS) & E1000_STATUS_LU) { | 170 | status = rd32(E1000_STATUS); |
154 | 171 | ||
155 | adapter->hw.mac.ops.get_speed_and_duplex(hw, | 172 | if (status & E1000_STATUS_LU) { |
156 | &adapter->link_speed, | ||
157 | &adapter->link_duplex); | ||
158 | ecmd->speed = adapter->link_speed; | ||
159 | 173 | ||
160 | /* unfortunately FULL_DUPLEX != DUPLEX_FULL | 174 | if ((status & E1000_STATUS_SPEED_1000) || |
161 | * and HALF_DUPLEX != DUPLEX_HALF */ | 175 | hw->phy.media_type != e1000_media_type_copper) |
176 | ecmd->speed = SPEED_1000; | ||
177 | else if (status & E1000_STATUS_SPEED_100) | ||
178 | ecmd->speed = SPEED_100; | ||
179 | else | ||
180 | ecmd->speed = SPEED_10; | ||
162 | 181 | ||
163 | if (adapter->link_duplex == FULL_DUPLEX) | 182 | if ((status & E1000_STATUS_FD) || |
183 | hw->phy.media_type != e1000_media_type_copper) | ||
164 | ecmd->duplex = DUPLEX_FULL; | 184 | ecmd->duplex = DUPLEX_FULL; |
165 | else | 185 | else |
166 | ecmd->duplex = DUPLEX_HALF; | 186 | ecmd->duplex = DUPLEX_HALF; |
@@ -215,6 +235,24 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
215 | return 0; | 235 | return 0; |
216 | } | 236 | } |
217 | 237 | ||
238 | static u32 igb_get_link(struct net_device *netdev) | ||
239 | { | ||
240 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
241 | struct e1000_mac_info *mac = &adapter->hw.mac; | ||
242 | |||
243 | /* | ||
244 | * If the link is not reported up to netdev, interrupts are disabled, | ||
245 | * and so the physical link state may have changed since we last | ||
246 | * looked. Set get_link_status to make sure that the true link | ||
247 | * state is interrogated, rather than pulling a cached and possibly | ||
248 | * stale link state from the driver. | ||
249 | */ | ||
250 | if (!netif_carrier_ok(netdev)) | ||
251 | mac->get_link_status = 1; | ||
252 | |||
253 | return igb_has_link(adapter); | ||
254 | } | ||
255 | |||
218 | static void igb_get_pauseparam(struct net_device *netdev, | 256 | static void igb_get_pauseparam(struct net_device *netdev, |
219 | struct ethtool_pauseparam *pause) | 257 | struct ethtool_pauseparam *pause) |
220 | { | 258 | { |
@@ -251,8 +289,9 @@ static int igb_set_pauseparam(struct net_device *netdev, | |||
251 | if (netif_running(adapter->netdev)) { | 289 | if (netif_running(adapter->netdev)) { |
252 | igb_down(adapter); | 290 | igb_down(adapter); |
253 | igb_up(adapter); | 291 | igb_up(adapter); |
254 | } else | 292 | } else { |
255 | igb_reset(adapter); | 293 | igb_reset(adapter); |
294 | } | ||
256 | } else { | 295 | } else { |
257 | if (pause->rx_pause && pause->tx_pause) | 296 | if (pause->rx_pause && pause->tx_pause) |
258 | hw->fc.requested_mode = e1000_fc_full; | 297 | hw->fc.requested_mode = e1000_fc_full; |
@@ -276,17 +315,20 @@ static int igb_set_pauseparam(struct net_device *netdev, | |||
276 | static u32 igb_get_rx_csum(struct net_device *netdev) | 315 | static u32 igb_get_rx_csum(struct net_device *netdev) |
277 | { | 316 | { |
278 | struct igb_adapter *adapter = netdev_priv(netdev); | 317 | struct igb_adapter *adapter = netdev_priv(netdev); |
279 | return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); | 318 | return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM); |
280 | } | 319 | } |
281 | 320 | ||
282 | static int igb_set_rx_csum(struct net_device *netdev, u32 data) | 321 | static int igb_set_rx_csum(struct net_device *netdev, u32 data) |
283 | { | 322 | { |
284 | struct igb_adapter *adapter = netdev_priv(netdev); | 323 | struct igb_adapter *adapter = netdev_priv(netdev); |
324 | int i; | ||
285 | 325 | ||
286 | if (data) | 326 | for (i = 0; i < adapter->num_rx_queues; i++) { |
287 | adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; | 327 | if (data) |
288 | else | 328 | adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM; |
289 | adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; | 329 | else |
330 | adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM; | ||
331 | } | ||
290 | 332 | ||
291 | return 0; | 333 | return 0; |
292 | } | 334 | } |
@@ -302,7 +344,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data) | |||
302 | 344 | ||
303 | if (data) { | 345 | if (data) { |
304 | netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | 346 | netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
305 | if (adapter->hw.mac.type == e1000_82576) | 347 | if (adapter->hw.mac.type >= e1000_82576) |
306 | netdev->features |= NETIF_F_SCTP_CSUM; | 348 | netdev->features |= NETIF_F_SCTP_CSUM; |
307 | } else { | 349 | } else { |
308 | netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 350 | netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
@@ -496,19 +538,10 @@ static void igb_get_regs(struct net_device *netdev, | |||
496 | regs_buff[119] = adapter->stats.scvpc; | 538 | regs_buff[119] = adapter->stats.scvpc; |
497 | regs_buff[120] = adapter->stats.hrmpc; | 539 | regs_buff[120] = adapter->stats.hrmpc; |
498 | 540 | ||
499 | /* These should probably be added to e1000_regs.h instead */ | ||
500 | #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4)) | ||
501 | #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) | ||
502 | #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) | ||
503 | #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) | ||
504 | #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) | ||
505 | #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) | ||
506 | #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) | ||
507 | |||
508 | for (i = 0; i < 4; i++) | 541 | for (i = 0; i < 4; i++) |
509 | regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); | 542 | regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); |
510 | for (i = 0; i < 4; i++) | 543 | for (i = 0; i < 4; i++) |
511 | regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); | 544 | regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); |
512 | for (i = 0; i < 4; i++) | 545 | for (i = 0; i < 4; i++) |
513 | regs_buff[129 + i] = rd32(E1000_RDBAL(i)); | 546 | regs_buff[129 + i] = rd32(E1000_RDBAL(i)); |
514 | for (i = 0; i < 4; i++) | 547 | for (i = 0; i < 4; i++) |
@@ -733,17 +766,17 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
733 | struct igb_adapter *adapter = netdev_priv(netdev); | 766 | struct igb_adapter *adapter = netdev_priv(netdev); |
734 | struct igb_ring *temp_ring; | 767 | struct igb_ring *temp_ring; |
735 | int i, err = 0; | 768 | int i, err = 0; |
736 | u32 new_rx_count, new_tx_count; | 769 | u16 new_rx_count, new_tx_count; |
737 | 770 | ||
738 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 771 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
739 | return -EINVAL; | 772 | return -EINVAL; |
740 | 773 | ||
741 | new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); | 774 | new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); |
742 | new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); | 775 | new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); |
743 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); | 776 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); |
744 | 777 | ||
745 | new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); | 778 | new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); |
746 | new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); | 779 | new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); |
747 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); | 780 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); |
748 | 781 | ||
749 | if ((new_tx_count == adapter->tx_ring_count) && | 782 | if ((new_tx_count == adapter->tx_ring_count) && |
@@ -757,9 +790,9 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
757 | 790 | ||
758 | if (!netif_running(adapter->netdev)) { | 791 | if (!netif_running(adapter->netdev)) { |
759 | for (i = 0; i < adapter->num_tx_queues; i++) | 792 | for (i = 0; i < adapter->num_tx_queues; i++) |
760 | adapter->tx_ring[i].count = new_tx_count; | 793 | adapter->tx_ring[i]->count = new_tx_count; |
761 | for (i = 0; i < adapter->num_rx_queues; i++) | 794 | for (i = 0; i < adapter->num_rx_queues; i++) |
762 | adapter->rx_ring[i].count = new_rx_count; | 795 | adapter->rx_ring[i]->count = new_rx_count; |
763 | adapter->tx_ring_count = new_tx_count; | 796 | adapter->tx_ring_count = new_tx_count; |
764 | adapter->rx_ring_count = new_rx_count; | 797 | adapter->rx_ring_count = new_rx_count; |
765 | goto clear_reset; | 798 | goto clear_reset; |
@@ -783,12 +816,12 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
783 | * to the tx and rx ring structs. | 816 | * to the tx and rx ring structs. |
784 | */ | 817 | */ |
785 | if (new_tx_count != adapter->tx_ring_count) { | 818 | if (new_tx_count != adapter->tx_ring_count) { |
786 | memcpy(temp_ring, adapter->tx_ring, | ||
787 | adapter->num_tx_queues * sizeof(struct igb_ring)); | ||
788 | |||
789 | for (i = 0; i < adapter->num_tx_queues; i++) { | 819 | for (i = 0; i < adapter->num_tx_queues; i++) { |
820 | memcpy(&temp_ring[i], adapter->tx_ring[i], | ||
821 | sizeof(struct igb_ring)); | ||
822 | |||
790 | temp_ring[i].count = new_tx_count; | 823 | temp_ring[i].count = new_tx_count; |
791 | err = igb_setup_tx_resources(adapter, &temp_ring[i]); | 824 | err = igb_setup_tx_resources(&temp_ring[i]); |
792 | if (err) { | 825 | if (err) { |
793 | while (i) { | 826 | while (i) { |
794 | i--; | 827 | i--; |
@@ -798,22 +831,23 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
798 | } | 831 | } |
799 | } | 832 | } |
800 | 833 | ||
801 | for (i = 0; i < adapter->num_tx_queues; i++) | 834 | for (i = 0; i < adapter->num_tx_queues; i++) { |
802 | igb_free_tx_resources(&adapter->tx_ring[i]); | 835 | igb_free_tx_resources(adapter->tx_ring[i]); |
803 | 836 | ||
804 | memcpy(adapter->tx_ring, temp_ring, | 837 | memcpy(adapter->tx_ring[i], &temp_ring[i], |
805 | adapter->num_tx_queues * sizeof(struct igb_ring)); | 838 | sizeof(struct igb_ring)); |
839 | } | ||
806 | 840 | ||
807 | adapter->tx_ring_count = new_tx_count; | 841 | adapter->tx_ring_count = new_tx_count; |
808 | } | 842 | } |
809 | 843 | ||
810 | if (new_rx_count != adapter->rx_ring->count) { | 844 | if (new_rx_count != adapter->rx_ring_count) { |
811 | memcpy(temp_ring, adapter->rx_ring, | ||
812 | adapter->num_rx_queues * sizeof(struct igb_ring)); | ||
813 | |||
814 | for (i = 0; i < adapter->num_rx_queues; i++) { | 845 | for (i = 0; i < adapter->num_rx_queues; i++) { |
846 | memcpy(&temp_ring[i], adapter->rx_ring[i], | ||
847 | sizeof(struct igb_ring)); | ||
848 | |||
815 | temp_ring[i].count = new_rx_count; | 849 | temp_ring[i].count = new_rx_count; |
816 | err = igb_setup_rx_resources(adapter, &temp_ring[i]); | 850 | err = igb_setup_rx_resources(&temp_ring[i]); |
817 | if (err) { | 851 | if (err) { |
818 | while (i) { | 852 | while (i) { |
819 | i--; | 853 | i--; |
@@ -824,11 +858,12 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
824 | 858 | ||
825 | } | 859 | } |
826 | 860 | ||
827 | for (i = 0; i < adapter->num_rx_queues; i++) | 861 | for (i = 0; i < adapter->num_rx_queues; i++) { |
828 | igb_free_rx_resources(&adapter->rx_ring[i]); | 862 | igb_free_rx_resources(adapter->rx_ring[i]); |
829 | 863 | ||
830 | memcpy(adapter->rx_ring, temp_ring, | 864 | memcpy(adapter->rx_ring[i], &temp_ring[i], |
831 | adapter->num_rx_queues * sizeof(struct igb_ring)); | 865 | sizeof(struct igb_ring)); |
866 | } | ||
832 | 867 | ||
833 | adapter->rx_ring_count = new_rx_count; | 868 | adapter->rx_ring_count = new_rx_count; |
834 | } | 869 | } |
@@ -867,6 +902,49 @@ struct igb_reg_test { | |||
867 | #define TABLE64_TEST_LO 5 | 902 | #define TABLE64_TEST_LO 5 |
868 | #define TABLE64_TEST_HI 6 | 903 | #define TABLE64_TEST_HI 6 |
869 | 904 | ||
905 | /* 82580 reg test */ | ||
906 | static struct igb_reg_test reg_test_82580[] = { | ||
907 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
908 | { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, | ||
909 | { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, | ||
910 | { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
911 | { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
912 | { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
913 | { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, | ||
914 | { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
915 | { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
916 | { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, | ||
917 | /* RDH is read-only for 82580, only test RDT. */ | ||
918 | { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
919 | { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
920 | { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, | ||
921 | { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
922 | { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, | ||
923 | { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
924 | { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
925 | { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, | ||
926 | { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
927 | { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
928 | { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, | ||
929 | { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
930 | { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
931 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, | ||
932 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, | ||
933 | { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, | ||
934 | { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, | ||
935 | { E1000_RA, 0, 16, TABLE64_TEST_LO, | ||
936 | 0xFFFFFFFF, 0xFFFFFFFF }, | ||
937 | { E1000_RA, 0, 16, TABLE64_TEST_HI, | ||
938 | 0x83FFFFFF, 0xFFFFFFFF }, | ||
939 | { E1000_RA2, 0, 8, TABLE64_TEST_LO, | ||
940 | 0xFFFFFFFF, 0xFFFFFFFF }, | ||
941 | { E1000_RA2, 0, 8, TABLE64_TEST_HI, | ||
942 | 0x83FFFFFF, 0xFFFFFFFF }, | ||
943 | { E1000_MTA, 0, 128, TABLE32_TEST, | ||
944 | 0xFFFFFFFF, 0xFFFFFFFF }, | ||
945 | { 0, 0, 0, 0 } | ||
946 | }; | ||
947 | |||
870 | /* 82576 reg test */ | 948 | /* 82576 reg test */ |
871 | static struct igb_reg_test reg_test_82576[] = { | 949 | static struct igb_reg_test reg_test_82576[] = { |
872 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | 950 | { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, |
@@ -944,7 +1022,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, | |||
944 | { | 1022 | { |
945 | struct e1000_hw *hw = &adapter->hw; | 1023 | struct e1000_hw *hw = &adapter->hw; |
946 | u32 pat, val; | 1024 | u32 pat, val; |
947 | u32 _test[] = | 1025 | static const u32 _test[] = |
948 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | 1026 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; |
949 | for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { | 1027 | for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { |
950 | wr32(reg, (_test[pat] & write)); | 1028 | wr32(reg, (_test[pat] & write)); |
@@ -957,6 +1035,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, | |||
957 | return 1; | 1035 | return 1; |
958 | } | 1036 | } |
959 | } | 1037 | } |
1038 | |||
960 | return 0; | 1039 | return 0; |
961 | } | 1040 | } |
962 | 1041 | ||
@@ -974,6 +1053,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, | |||
974 | *data = reg; | 1053 | *data = reg; |
975 | return 1; | 1054 | return 1; |
976 | } | 1055 | } |
1056 | |||
977 | return 0; | 1057 | return 0; |
978 | } | 1058 | } |
979 | 1059 | ||
@@ -996,14 +1076,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) | |||
996 | u32 value, before, after; | 1076 | u32 value, before, after; |
997 | u32 i, toggle; | 1077 | u32 i, toggle; |
998 | 1078 | ||
999 | toggle = 0x7FFFF3FF; | ||
1000 | |||
1001 | switch (adapter->hw.mac.type) { | 1079 | switch (adapter->hw.mac.type) { |
1080 | case e1000_82580: | ||
1081 | test = reg_test_82580; | ||
1082 | toggle = 0x7FEFF3FF; | ||
1083 | break; | ||
1002 | case e1000_82576: | 1084 | case e1000_82576: |
1003 | test = reg_test_82576; | 1085 | test = reg_test_82576; |
1086 | toggle = 0x7FFFF3FF; | ||
1004 | break; | 1087 | break; |
1005 | default: | 1088 | default: |
1006 | test = reg_test_82575; | 1089 | test = reg_test_82575; |
1090 | toggle = 0x7FFFF3FF; | ||
1007 | break; | 1091 | break; |
1008 | } | 1092 | } |
1009 | 1093 | ||
@@ -1081,8 +1165,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) | |||
1081 | *data = 0; | 1165 | *data = 0; |
1082 | /* Read and add up the contents of the EEPROM */ | 1166 | /* Read and add up the contents of the EEPROM */ |
1083 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { | 1167 | for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
1084 | if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) | 1168 | if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) { |
1085 | < 0) { | ||
1086 | *data = 1; | 1169 | *data = 1; |
1087 | break; | 1170 | break; |
1088 | } | 1171 | } |
@@ -1098,8 +1181,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) | |||
1098 | 1181 | ||
1099 | static irqreturn_t igb_test_intr(int irq, void *data) | 1182 | static irqreturn_t igb_test_intr(int irq, void *data) |
1100 | { | 1183 | { |
1101 | struct net_device *netdev = (struct net_device *) data; | 1184 | struct igb_adapter *adapter = (struct igb_adapter *) data; |
1102 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
1103 | struct e1000_hw *hw = &adapter->hw; | 1185 | struct e1000_hw *hw = &adapter->hw; |
1104 | 1186 | ||
1105 | adapter->test_icr |= rd32(E1000_ICR); | 1187 | adapter->test_icr |= rd32(E1000_ICR); |
@@ -1117,38 +1199,45 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1117 | *data = 0; | 1199 | *data = 0; |
1118 | 1200 | ||
1119 | /* Hook up test interrupt handler just for this test */ | 1201 | /* Hook up test interrupt handler just for this test */ |
1120 | if (adapter->msix_entries) | 1202 | if (adapter->msix_entries) { |
1121 | /* NOTE: we don't test MSI-X interrupts here, yet */ | 1203 | if (request_irq(adapter->msix_entries[0].vector, |
1122 | return 0; | 1204 | igb_test_intr, 0, netdev->name, adapter)) { |
1123 | 1205 | *data = 1; | |
1124 | if (adapter->flags & IGB_FLAG_HAS_MSI) { | 1206 | return -1; |
1207 | } | ||
1208 | } else if (adapter->flags & IGB_FLAG_HAS_MSI) { | ||
1125 | shared_int = false; | 1209 | shared_int = false; |
1126 | if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { | 1210 | if (request_irq(irq, |
1211 | igb_test_intr, 0, netdev->name, adapter)) { | ||
1127 | *data = 1; | 1212 | *data = 1; |
1128 | return -1; | 1213 | return -1; |
1129 | } | 1214 | } |
1130 | } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, | 1215 | } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, |
1131 | netdev->name, netdev)) { | 1216 | netdev->name, adapter)) { |
1132 | shared_int = false; | 1217 | shared_int = false; |
1133 | } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, | 1218 | } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, |
1134 | netdev->name, netdev)) { | 1219 | netdev->name, adapter)) { |
1135 | *data = 1; | 1220 | *data = 1; |
1136 | return -1; | 1221 | return -1; |
1137 | } | 1222 | } |
1138 | dev_info(&adapter->pdev->dev, "testing %s interrupt\n", | 1223 | dev_info(&adapter->pdev->dev, "testing %s interrupt\n", |
1139 | (shared_int ? "shared" : "unshared")); | 1224 | (shared_int ? "shared" : "unshared")); |
1225 | |||
1140 | /* Disable all the interrupts */ | 1226 | /* Disable all the interrupts */ |
1141 | wr32(E1000_IMC, 0xFFFFFFFF); | 1227 | wr32(E1000_IMC, ~0); |
1142 | msleep(10); | 1228 | msleep(10); |
1143 | 1229 | ||
1144 | /* Define all writable bits for ICS */ | 1230 | /* Define all writable bits for ICS */ |
1145 | switch(hw->mac.type) { | 1231 | switch (hw->mac.type) { |
1146 | case e1000_82575: | 1232 | case e1000_82575: |
1147 | ics_mask = 0x37F47EDD; | 1233 | ics_mask = 0x37F47EDD; |
1148 | break; | 1234 | break; |
1149 | case e1000_82576: | 1235 | case e1000_82576: |
1150 | ics_mask = 0x77D4FBFD; | 1236 | ics_mask = 0x77D4FBFD; |
1151 | break; | 1237 | break; |
1238 | case e1000_82580: | ||
1239 | ics_mask = 0x77DCFED5; | ||
1240 | break; | ||
1152 | default: | 1241 | default: |
1153 | ics_mask = 0x7FFFFFFF; | 1242 | ics_mask = 0x7FFFFFFF; |
1154 | break; | 1243 | break; |
@@ -1232,190 +1321,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) | |||
1232 | msleep(10); | 1321 | msleep(10); |
1233 | 1322 | ||
1234 | /* Unhook test interrupt handler */ | 1323 | /* Unhook test interrupt handler */ |
1235 | free_irq(irq, netdev); | 1324 | if (adapter->msix_entries) |
1325 | free_irq(adapter->msix_entries[0].vector, adapter); | ||
1326 | else | ||
1327 | free_irq(irq, adapter); | ||
1236 | 1328 | ||
1237 | return *data; | 1329 | return *data; |
1238 | } | 1330 | } |
1239 | 1331 | ||
1240 | static void igb_free_desc_rings(struct igb_adapter *adapter) | 1332 | static void igb_free_desc_rings(struct igb_adapter *adapter) |
1241 | { | 1333 | { |
1242 | struct igb_ring *tx_ring = &adapter->test_tx_ring; | 1334 | igb_free_tx_resources(&adapter->test_tx_ring); |
1243 | struct igb_ring *rx_ring = &adapter->test_rx_ring; | 1335 | igb_free_rx_resources(&adapter->test_rx_ring); |
1244 | struct pci_dev *pdev = adapter->pdev; | ||
1245 | int i; | ||
1246 | |||
1247 | if (tx_ring->desc && tx_ring->buffer_info) { | ||
1248 | for (i = 0; i < tx_ring->count; i++) { | ||
1249 | struct igb_buffer *buf = &(tx_ring->buffer_info[i]); | ||
1250 | if (buf->dma) | ||
1251 | pci_unmap_single(pdev, buf->dma, buf->length, | ||
1252 | PCI_DMA_TODEVICE); | ||
1253 | if (buf->skb) | ||
1254 | dev_kfree_skb(buf->skb); | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | if (rx_ring->desc && rx_ring->buffer_info) { | ||
1259 | for (i = 0; i < rx_ring->count; i++) { | ||
1260 | struct igb_buffer *buf = &(rx_ring->buffer_info[i]); | ||
1261 | if (buf->dma) | ||
1262 | pci_unmap_single(pdev, buf->dma, | ||
1263 | IGB_RXBUFFER_2048, | ||
1264 | PCI_DMA_FROMDEVICE); | ||
1265 | if (buf->skb) | ||
1266 | dev_kfree_skb(buf->skb); | ||
1267 | } | ||
1268 | } | ||
1269 | |||
1270 | if (tx_ring->desc) { | ||
1271 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, | ||
1272 | tx_ring->dma); | ||
1273 | tx_ring->desc = NULL; | ||
1274 | } | ||
1275 | if (rx_ring->desc) { | ||
1276 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, | ||
1277 | rx_ring->dma); | ||
1278 | rx_ring->desc = NULL; | ||
1279 | } | ||
1280 | |||
1281 | kfree(tx_ring->buffer_info); | ||
1282 | tx_ring->buffer_info = NULL; | ||
1283 | kfree(rx_ring->buffer_info); | ||
1284 | rx_ring->buffer_info = NULL; | ||
1285 | |||
1286 | return; | ||
1287 | } | 1336 | } |
1288 | 1337 | ||
1289 | static int igb_setup_desc_rings(struct igb_adapter *adapter) | 1338 | static int igb_setup_desc_rings(struct igb_adapter *adapter) |
1290 | { | 1339 | { |
1291 | struct e1000_hw *hw = &adapter->hw; | ||
1292 | struct igb_ring *tx_ring = &adapter->test_tx_ring; | 1340 | struct igb_ring *tx_ring = &adapter->test_tx_ring; |
1293 | struct igb_ring *rx_ring = &adapter->test_rx_ring; | 1341 | struct igb_ring *rx_ring = &adapter->test_rx_ring; |
1294 | struct pci_dev *pdev = adapter->pdev; | 1342 | struct e1000_hw *hw = &adapter->hw; |
1295 | struct igb_buffer *buffer_info; | 1343 | int ret_val; |
1296 | u32 rctl; | ||
1297 | int i, ret_val; | ||
1298 | 1344 | ||
1299 | /* Setup Tx descriptor ring and Tx buffers */ | 1345 | /* Setup Tx descriptor ring and Tx buffers */ |
1346 | tx_ring->count = IGB_DEFAULT_TXD; | ||
1347 | tx_ring->pdev = adapter->pdev; | ||
1348 | tx_ring->netdev = adapter->netdev; | ||
1349 | tx_ring->reg_idx = adapter->vfs_allocated_count; | ||
1300 | 1350 | ||
1301 | if (!tx_ring->count) | 1351 | if (igb_setup_tx_resources(tx_ring)) { |
1302 | tx_ring->count = IGB_DEFAULT_TXD; | ||
1303 | |||
1304 | tx_ring->buffer_info = kcalloc(tx_ring->count, | ||
1305 | sizeof(struct igb_buffer), | ||
1306 | GFP_KERNEL); | ||
1307 | if (!tx_ring->buffer_info) { | ||
1308 | ret_val = 1; | 1352 | ret_val = 1; |
1309 | goto err_nomem; | 1353 | goto err_nomem; |
1310 | } | 1354 | } |
1311 | 1355 | ||
1312 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); | 1356 | igb_setup_tctl(adapter); |
1313 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 1357 | igb_configure_tx_ring(adapter, tx_ring); |
1314 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | ||
1315 | &tx_ring->dma); | ||
1316 | if (!tx_ring->desc) { | ||
1317 | ret_val = 2; | ||
1318 | goto err_nomem; | ||
1319 | } | ||
1320 | tx_ring->next_to_use = tx_ring->next_to_clean = 0; | ||
1321 | |||
1322 | wr32(E1000_TDBAL(0), | ||
1323 | ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); | ||
1324 | wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); | ||
1325 | wr32(E1000_TDLEN(0), | ||
1326 | tx_ring->count * sizeof(union e1000_adv_tx_desc)); | ||
1327 | wr32(E1000_TDH(0), 0); | ||
1328 | wr32(E1000_TDT(0), 0); | ||
1329 | wr32(E1000_TCTL, | ||
1330 | E1000_TCTL_PSP | E1000_TCTL_EN | | ||
1331 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | ||
1332 | E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); | ||
1333 | |||
1334 | for (i = 0; i < tx_ring->count; i++) { | ||
1335 | union e1000_adv_tx_desc *tx_desc; | ||
1336 | struct sk_buff *skb; | ||
1337 | unsigned int size = 1024; | ||
1338 | |||
1339 | tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); | ||
1340 | skb = alloc_skb(size, GFP_KERNEL); | ||
1341 | if (!skb) { | ||
1342 | ret_val = 3; | ||
1343 | goto err_nomem; | ||
1344 | } | ||
1345 | skb_put(skb, size); | ||
1346 | buffer_info = &tx_ring->buffer_info[i]; | ||
1347 | buffer_info->skb = skb; | ||
1348 | buffer_info->length = skb->len; | ||
1349 | buffer_info->dma = pci_map_single(pdev, skb->data, skb->len, | ||
1350 | PCI_DMA_TODEVICE); | ||
1351 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | ||
1352 | tx_desc->read.olinfo_status = cpu_to_le32(skb->len) << | ||
1353 | E1000_ADVTXD_PAYLEN_SHIFT; | ||
1354 | tx_desc->read.cmd_type_len = cpu_to_le32(skb->len); | ||
1355 | tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP | | ||
1356 | E1000_TXD_CMD_IFCS | | ||
1357 | E1000_TXD_CMD_RS | | ||
1358 | E1000_ADVTXD_DTYP_DATA | | ||
1359 | E1000_ADVTXD_DCMD_DEXT); | ||
1360 | } | ||
1361 | 1358 | ||
1362 | /* Setup Rx descriptor ring and Rx buffers */ | 1359 | /* Setup Rx descriptor ring and Rx buffers */ |
1363 | 1360 | rx_ring->count = IGB_DEFAULT_RXD; | |
1364 | if (!rx_ring->count) | 1361 | rx_ring->pdev = adapter->pdev; |
1365 | rx_ring->count = IGB_DEFAULT_RXD; | 1362 | rx_ring->netdev = adapter->netdev; |
1366 | 1363 | rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; | |
1367 | rx_ring->buffer_info = kcalloc(rx_ring->count, | 1364 | rx_ring->reg_idx = adapter->vfs_allocated_count; |
1368 | sizeof(struct igb_buffer), | 1365 | |
1369 | GFP_KERNEL); | 1366 | if (igb_setup_rx_resources(rx_ring)) { |
1370 | if (!rx_ring->buffer_info) { | 1367 | ret_val = 3; |
1371 | ret_val = 4; | ||
1372 | goto err_nomem; | 1368 | goto err_nomem; |
1373 | } | 1369 | } |
1374 | 1370 | ||
1375 | rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); | 1371 | /* set the default queue to queue 0 of PF */ |
1376 | rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, | 1372 | wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); |
1377 | &rx_ring->dma); | ||
1378 | if (!rx_ring->desc) { | ||
1379 | ret_val = 5; | ||
1380 | goto err_nomem; | ||
1381 | } | ||
1382 | rx_ring->next_to_use = rx_ring->next_to_clean = 0; | ||
1383 | 1373 | ||
1384 | rctl = rd32(E1000_RCTL); | 1374 | /* enable receive ring */ |
1385 | wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); | 1375 | igb_setup_rctl(adapter); |
1386 | wr32(E1000_RDBAL(0), | 1376 | igb_configure_rx_ring(adapter, rx_ring); |
1387 | ((u64) rx_ring->dma & 0xFFFFFFFF)); | 1377 | |
1388 | wr32(E1000_RDBAH(0), | 1378 | igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring)); |
1389 | ((u64) rx_ring->dma >> 32)); | ||
1390 | wr32(E1000_RDLEN(0), rx_ring->size); | ||
1391 | wr32(E1000_RDH(0), 0); | ||
1392 | wr32(E1000_RDT(0), 0); | ||
1393 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); | ||
1394 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | | ||
1395 | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | ||
1396 | wr32(E1000_RCTL, rctl); | ||
1397 | wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF); | ||
1398 | |||
1399 | for (i = 0; i < rx_ring->count; i++) { | ||
1400 | union e1000_adv_rx_desc *rx_desc; | ||
1401 | struct sk_buff *skb; | ||
1402 | |||
1403 | buffer_info = &rx_ring->buffer_info[i]; | ||
1404 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); | ||
1405 | skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, | ||
1406 | GFP_KERNEL); | ||
1407 | if (!skb) { | ||
1408 | ret_val = 6; | ||
1409 | goto err_nomem; | ||
1410 | } | ||
1411 | skb_reserve(skb, NET_IP_ALIGN); | ||
1412 | buffer_info->skb = skb; | ||
1413 | buffer_info->dma = pci_map_single(pdev, skb->data, | ||
1414 | IGB_RXBUFFER_2048, | ||
1415 | PCI_DMA_FROMDEVICE); | ||
1416 | rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); | ||
1417 | memset(skb->data, 0x00, skb->len); | ||
1418 | } | ||
1419 | 1379 | ||
1420 | return 0; | 1380 | return 0; |
1421 | 1381 | ||
@@ -1449,6 +1409,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) | |||
1449 | igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); | 1409 | igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); |
1450 | /* autoneg off */ | 1410 | /* autoneg off */ |
1451 | igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); | 1411 | igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); |
1412 | } else if (hw->phy.type == e1000_phy_82580) { | ||
1413 | /* enable MII loopback */ | ||
1414 | igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); | ||
1452 | } | 1415 | } |
1453 | 1416 | ||
1454 | ctrl_reg = rd32(E1000_CTRL); | 1417 | ctrl_reg = rd32(E1000_CTRL); |
@@ -1491,7 +1454,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) | |||
1491 | struct e1000_hw *hw = &adapter->hw; | 1454 | struct e1000_hw *hw = &adapter->hw; |
1492 | u32 reg; | 1455 | u32 reg; |
1493 | 1456 | ||
1494 | if (hw->phy.media_type == e1000_media_type_internal_serdes) { | 1457 | reg = rd32(E1000_CTRL_EXT); |
1458 | |||
1459 | /* use CTRL_EXT to identify link type as SGMII can appear as copper */ | ||
1460 | if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { | ||
1495 | reg = rd32(E1000_RCTL); | 1461 | reg = rd32(E1000_RCTL); |
1496 | reg |= E1000_RCTL_LBM_TCVR; | 1462 | reg |= E1000_RCTL_LBM_TCVR; |
1497 | wr32(E1000_RCTL, reg); | 1463 | wr32(E1000_RCTL, reg); |
@@ -1522,11 +1488,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) | |||
1522 | wr32(E1000_PCS_LCTL, reg); | 1488 | wr32(E1000_PCS_LCTL, reg); |
1523 | 1489 | ||
1524 | return 0; | 1490 | return 0; |
1525 | } else if (hw->phy.media_type == e1000_media_type_copper) { | ||
1526 | return igb_set_phy_loopback(adapter); | ||
1527 | } | 1491 | } |
1528 | 1492 | ||
1529 | return 7; | 1493 | return igb_set_phy_loopback(adapter); |
1530 | } | 1494 | } |
1531 | 1495 | ||
1532 | static void igb_loopback_cleanup(struct igb_adapter *adapter) | 1496 | static void igb_loopback_cleanup(struct igb_adapter *adapter) |
@@ -1552,35 +1516,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, | |||
1552 | unsigned int frame_size) | 1516 | unsigned int frame_size) |
1553 | { | 1517 | { |
1554 | memset(skb->data, 0xFF, frame_size); | 1518 | memset(skb->data, 0xFF, frame_size); |
1555 | frame_size &= ~1; | 1519 | frame_size /= 2; |
1556 | memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); | 1520 | memset(&skb->data[frame_size], 0xAA, frame_size - 1); |
1557 | memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); | 1521 | memset(&skb->data[frame_size + 10], 0xBE, 1); |
1558 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); | 1522 | memset(&skb->data[frame_size + 12], 0xAF, 1); |
1559 | } | 1523 | } |
1560 | 1524 | ||
1561 | static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | 1525 | static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) |
1562 | { | 1526 | { |
1563 | frame_size &= ~1; | 1527 | frame_size /= 2; |
1564 | if (*(skb->data + 3) == 0xFF) | 1528 | if (*(skb->data + 3) == 0xFF) { |
1565 | if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && | 1529 | if ((*(skb->data + frame_size + 10) == 0xBE) && |
1566 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) | 1530 | (*(skb->data + frame_size + 12) == 0xAF)) { |
1567 | return 0; | 1531 | return 0; |
1532 | } | ||
1533 | } | ||
1568 | return 13; | 1534 | return 13; |
1569 | } | 1535 | } |
1570 | 1536 | ||
1537 | static int igb_clean_test_rings(struct igb_ring *rx_ring, | ||
1538 | struct igb_ring *tx_ring, | ||
1539 | unsigned int size) | ||
1540 | { | ||
1541 | union e1000_adv_rx_desc *rx_desc; | ||
1542 | struct igb_buffer *buffer_info; | ||
1543 | int rx_ntc, tx_ntc, count = 0; | ||
1544 | u32 staterr; | ||
1545 | |||
1546 | /* initialize next to clean and descriptor values */ | ||
1547 | rx_ntc = rx_ring->next_to_clean; | ||
1548 | tx_ntc = tx_ring->next_to_clean; | ||
1549 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); | ||
1550 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
1551 | |||
1552 | while (staterr & E1000_RXD_STAT_DD) { | ||
1553 | /* check rx buffer */ | ||
1554 | buffer_info = &rx_ring->buffer_info[rx_ntc]; | ||
1555 | |||
1556 | /* unmap rx buffer, will be remapped by alloc_rx_buffers */ | ||
1557 | pci_unmap_single(rx_ring->pdev, | ||
1558 | buffer_info->dma, | ||
1559 | rx_ring->rx_buffer_len, | ||
1560 | PCI_DMA_FROMDEVICE); | ||
1561 | buffer_info->dma = 0; | ||
1562 | |||
1563 | /* verify contents of skb */ | ||
1564 | if (!igb_check_lbtest_frame(buffer_info->skb, size)) | ||
1565 | count++; | ||
1566 | |||
1567 | /* unmap buffer on tx side */ | ||
1568 | buffer_info = &tx_ring->buffer_info[tx_ntc]; | ||
1569 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); | ||
1570 | |||
1571 | /* increment rx/tx next to clean counters */ | ||
1572 | rx_ntc++; | ||
1573 | if (rx_ntc == rx_ring->count) | ||
1574 | rx_ntc = 0; | ||
1575 | tx_ntc++; | ||
1576 | if (tx_ntc == tx_ring->count) | ||
1577 | tx_ntc = 0; | ||
1578 | |||
1579 | /* fetch next descriptor */ | ||
1580 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc); | ||
1581 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
1582 | } | ||
1583 | |||
1584 | /* re-map buffers to ring, store next to clean values */ | ||
1585 | igb_alloc_rx_buffers_adv(rx_ring, count); | ||
1586 | rx_ring->next_to_clean = rx_ntc; | ||
1587 | tx_ring->next_to_clean = tx_ntc; | ||
1588 | |||
1589 | return count; | ||
1590 | } | ||
1591 | |||
1571 | static int igb_run_loopback_test(struct igb_adapter *adapter) | 1592 | static int igb_run_loopback_test(struct igb_adapter *adapter) |
1572 | { | 1593 | { |
1573 | struct e1000_hw *hw = &adapter->hw; | ||
1574 | struct igb_ring *tx_ring = &adapter->test_tx_ring; | 1594 | struct igb_ring *tx_ring = &adapter->test_tx_ring; |
1575 | struct igb_ring *rx_ring = &adapter->test_rx_ring; | 1595 | struct igb_ring *rx_ring = &adapter->test_rx_ring; |
1576 | struct pci_dev *pdev = adapter->pdev; | 1596 | int i, j, lc, good_cnt, ret_val = 0; |
1577 | int i, j, k, l, lc, good_cnt; | 1597 | unsigned int size = 1024; |
1578 | int ret_val = 0; | 1598 | netdev_tx_t tx_ret_val; |
1579 | unsigned long time; | 1599 | struct sk_buff *skb; |
1600 | |||
1601 | /* allocate test skb */ | ||
1602 | skb = alloc_skb(size, GFP_KERNEL); | ||
1603 | if (!skb) | ||
1604 | return 11; | ||
1580 | 1605 | ||
1581 | wr32(E1000_RDT(0), rx_ring->count - 1); | 1606 | /* place data into test skb */ |
1607 | igb_create_lbtest_frame(skb, size); | ||
1608 | skb_put(skb, size); | ||
1582 | 1609 | ||
1583 | /* Calculate the loop count based on the largest descriptor ring | 1610 | /* |
1611 | * Calculate the loop count based on the largest descriptor ring | ||
1584 | * The idea is to wrap the largest ring a number of times using 64 | 1612 | * The idea is to wrap the largest ring a number of times using 64 |
1585 | * send/receive pairs during each loop | 1613 | * send/receive pairs during each loop |
1586 | */ | 1614 | */ |
@@ -1590,50 +1618,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) | |||
1590 | else | 1618 | else |
1591 | lc = ((rx_ring->count / 64) * 2) + 1; | 1619 | lc = ((rx_ring->count / 64) * 2) + 1; |
1592 | 1620 | ||
1593 | k = l = 0; | ||
1594 | for (j = 0; j <= lc; j++) { /* loop count loop */ | 1621 | for (j = 0; j <= lc; j++) { /* loop count loop */ |
1595 | for (i = 0; i < 64; i++) { /* send the packets */ | 1622 | /* reset count of good packets */ |
1596 | igb_create_lbtest_frame(tx_ring->buffer_info[k].skb, | ||
1597 | 1024); | ||
1598 | pci_dma_sync_single_for_device(pdev, | ||
1599 | tx_ring->buffer_info[k].dma, | ||
1600 | tx_ring->buffer_info[k].length, | ||
1601 | PCI_DMA_TODEVICE); | ||
1602 | k++; | ||
1603 | if (k == tx_ring->count) | ||
1604 | k = 0; | ||
1605 | } | ||
1606 | wr32(E1000_TDT(0), k); | ||
1607 | msleep(200); | ||
1608 | time = jiffies; /* set the start time for the receive */ | ||
1609 | good_cnt = 0; | 1623 | good_cnt = 0; |
1610 | do { /* receive the sent packets */ | 1624 | |
1611 | pci_dma_sync_single_for_cpu(pdev, | 1625 | /* place 64 packets on the transmit queue*/ |
1612 | rx_ring->buffer_info[l].dma, | 1626 | for (i = 0; i < 64; i++) { |
1613 | IGB_RXBUFFER_2048, | 1627 | skb_get(skb); |
1614 | PCI_DMA_FROMDEVICE); | 1628 | tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring); |
1615 | 1629 | if (tx_ret_val == NETDEV_TX_OK) | |
1616 | ret_val = igb_check_lbtest_frame( | ||
1617 | rx_ring->buffer_info[l].skb, 1024); | ||
1618 | if (!ret_val) | ||
1619 | good_cnt++; | 1630 | good_cnt++; |
1620 | l++; | 1631 | } |
1621 | if (l == rx_ring->count) | 1632 | |
1622 | l = 0; | ||
1623 | /* time + 20 msecs (200 msecs on 2.4) is more than | ||
1624 | * enough time to complete the receives, if it's | ||
1625 | * exceeded, break and error off | ||
1626 | */ | ||
1627 | } while (good_cnt < 64 && jiffies < (time + 20)); | ||
1628 | if (good_cnt != 64) { | 1633 | if (good_cnt != 64) { |
1629 | ret_val = 13; /* ret_val is the same as mis-compare */ | 1634 | ret_val = 12; |
1630 | break; | 1635 | break; |
1631 | } | 1636 | } |
1632 | if (jiffies >= (time + 20)) { | 1637 | |
1633 | ret_val = 14; /* error code for time out error */ | 1638 | /* allow 200 milliseconds for packets to go from tx to rx */ |
1639 | msleep(200); | ||
1640 | |||
1641 | good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); | ||
1642 | if (good_cnt != 64) { | ||
1643 | ret_val = 13; | ||
1634 | break; | 1644 | break; |
1635 | } | 1645 | } |
1636 | } /* end loop count loop */ | 1646 | } /* end loop count loop */ |
1647 | |||
1648 | /* free the original skb */ | ||
1649 | kfree_skb(skb); | ||
1650 | |||
1637 | return ret_val; | 1651 | return ret_val; |
1638 | } | 1652 | } |
1639 | 1653 | ||
@@ -1686,8 +1700,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) | |||
1686 | if (hw->mac.autoneg) | 1700 | if (hw->mac.autoneg) |
1687 | msleep(4000); | 1701 | msleep(4000); |
1688 | 1702 | ||
1689 | if (!(rd32(E1000_STATUS) & | 1703 | if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) |
1690 | E1000_STATUS_LU)) | ||
1691 | *data = 1; | 1704 | *data = 1; |
1692 | } | 1705 | } |
1693 | return *data; | 1706 | return *data; |
@@ -1712,6 +1725,9 @@ static void igb_diag_test(struct net_device *netdev, | |||
1712 | 1725 | ||
1713 | dev_info(&adapter->pdev->dev, "offline testing starting\n"); | 1726 | dev_info(&adapter->pdev->dev, "offline testing starting\n"); |
1714 | 1727 | ||
1728 | /* power up link for link test */ | ||
1729 | igb_power_up_link(adapter); | ||
1730 | |||
1715 | /* Link test performed before hardware reset so autoneg doesn't | 1731 | /* Link test performed before hardware reset so autoneg doesn't |
1716 | * interfere with test result */ | 1732 | * interfere with test result */ |
1717 | if (igb_link_test(adapter, &data[4])) | 1733 | if (igb_link_test(adapter, &data[4])) |
@@ -1735,6 +1751,8 @@ static void igb_diag_test(struct net_device *netdev, | |||
1735 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1751 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1736 | 1752 | ||
1737 | igb_reset(adapter); | 1753 | igb_reset(adapter); |
1754 | /* power up link for loopback test */ | ||
1755 | igb_power_up_link(adapter); | ||
1738 | if (igb_loopback_test(adapter, &data[3])) | 1756 | if (igb_loopback_test(adapter, &data[3])) |
1739 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1757 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1740 | 1758 | ||
@@ -1753,9 +1771,14 @@ static void igb_diag_test(struct net_device *netdev, | |||
1753 | dev_open(netdev); | 1771 | dev_open(netdev); |
1754 | } else { | 1772 | } else { |
1755 | dev_info(&adapter->pdev->dev, "online testing starting\n"); | 1773 | dev_info(&adapter->pdev->dev, "online testing starting\n"); |
1756 | /* Online tests */ | 1774 | |
1757 | if (igb_link_test(adapter, &data[4])) | 1775 | /* PHY is powered down when interface is down */ |
1758 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1776 | if (!netif_carrier_ok(netdev)) { |
1777 | data[4] = 0; | ||
1778 | } else { | ||
1779 | if (igb_link_test(adapter, &data[4])) | ||
1780 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1781 | } | ||
1759 | 1782 | ||
1760 | /* Online tests aren't run; pass by default */ | 1783 | /* Online tests aren't run; pass by default */ |
1761 | data[0] = 0; | 1784 | data[0] = 0; |
@@ -1791,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
1791 | retval = 0; | 1814 | retval = 0; |
1792 | break; | 1815 | break; |
1793 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1816 | case E1000_DEV_ID_82576_QUAD_COPPER: |
1817 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
1794 | /* quad port adapters only support WoL on port A */ | 1818 | /* quad port adapters only support WoL on port A */ |
1795 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { | 1819 | if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { |
1796 | wol->supported = 0; | 1820 | wol->supported = 0; |
@@ -1803,7 +1827,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter, | |||
1803 | /* dual port cards only support WoL on port A from now on | 1827 | /* dual port cards only support WoL on port A from now on |
1804 | * unless it was enabled in the eeprom for port B | 1828 | * unless it was enabled in the eeprom for port B |
1805 | * so exclude FUNC_1 ports from having WoL enabled */ | 1829 | * so exclude FUNC_1 ports from having WoL enabled */ |
1806 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && | 1830 | if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && |
1807 | !adapter->eeprom_wol) { | 1831 | !adapter->eeprom_wol) { |
1808 | wol->supported = 0; | 1832 | wol->supported = 0; |
1809 | break; | 1833 | break; |
@@ -1820,7 +1844,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1820 | struct igb_adapter *adapter = netdev_priv(netdev); | 1844 | struct igb_adapter *adapter = netdev_priv(netdev); |
1821 | 1845 | ||
1822 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 1846 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
1823 | WAKE_BCAST | WAKE_MAGIC; | 1847 | WAKE_BCAST | WAKE_MAGIC | |
1848 | WAKE_PHY; | ||
1824 | wol->wolopts = 0; | 1849 | wol->wolopts = 0; |
1825 | 1850 | ||
1826 | /* this function will set ->supported = 0 and return 1 if wol is not | 1851 | /* this function will set ->supported = 0 and return 1 if wol is not |
@@ -1843,15 +1868,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1843 | wol->wolopts |= WAKE_BCAST; | 1868 | wol->wolopts |= WAKE_BCAST; |
1844 | if (adapter->wol & E1000_WUFC_MAG) | 1869 | if (adapter->wol & E1000_WUFC_MAG) |
1845 | wol->wolopts |= WAKE_MAGIC; | 1870 | wol->wolopts |= WAKE_MAGIC; |
1846 | 1871 | if (adapter->wol & E1000_WUFC_LNKC) | |
1847 | return; | 1872 | wol->wolopts |= WAKE_PHY; |
1848 | } | 1873 | } |
1849 | 1874 | ||
1850 | static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | 1875 | static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
1851 | { | 1876 | { |
1852 | struct igb_adapter *adapter = netdev_priv(netdev); | 1877 | struct igb_adapter *adapter = netdev_priv(netdev); |
1853 | 1878 | ||
1854 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) | 1879 | if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) |
1855 | return -EOPNOTSUPP; | 1880 | return -EOPNOTSUPP; |
1856 | 1881 | ||
1857 | if (igb_wol_exclusion(adapter, wol) || | 1882 | if (igb_wol_exclusion(adapter, wol) || |
@@ -1869,7 +1894,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1869 | adapter->wol |= E1000_WUFC_BC; | 1894 | adapter->wol |= E1000_WUFC_BC; |
1870 | if (wol->wolopts & WAKE_MAGIC) | 1895 | if (wol->wolopts & WAKE_MAGIC) |
1871 | adapter->wol |= E1000_WUFC_MAG; | 1896 | adapter->wol |= E1000_WUFC_MAG; |
1872 | 1897 | if (wol->wolopts & WAKE_PHY) | |
1898 | adapter->wol |= E1000_WUFC_LNKC; | ||
1873 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | 1899 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); |
1874 | 1900 | ||
1875 | return 0; | 1901 | return 0; |
@@ -1882,12 +1908,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data) | |||
1882 | { | 1908 | { |
1883 | struct igb_adapter *adapter = netdev_priv(netdev); | 1909 | struct igb_adapter *adapter = netdev_priv(netdev); |
1884 | struct e1000_hw *hw = &adapter->hw; | 1910 | struct e1000_hw *hw = &adapter->hw; |
1911 | unsigned long timeout; | ||
1885 | 1912 | ||
1886 | if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) | 1913 | timeout = data * 1000; |
1887 | data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); | 1914 | |
1915 | /* | ||
1916 | * msleep_interruptable only accepts unsigned int so we are limited | ||
1917 | * in how long a duration we can wait | ||
1918 | */ | ||
1919 | if (!timeout || timeout > UINT_MAX) | ||
1920 | timeout = UINT_MAX; | ||
1888 | 1921 | ||
1889 | igb_blink_led(hw); | 1922 | igb_blink_led(hw); |
1890 | msleep_interruptible(data * 1000); | 1923 | msleep_interruptible(timeout); |
1891 | 1924 | ||
1892 | igb_led_off(hw); | 1925 | igb_led_off(hw); |
1893 | clear_bit(IGB_LED_ON, &adapter->led_status); | 1926 | clear_bit(IGB_LED_ON, &adapter->led_status); |
@@ -1900,7 +1933,6 @@ static int igb_set_coalesce(struct net_device *netdev, | |||
1900 | struct ethtool_coalesce *ec) | 1933 | struct ethtool_coalesce *ec) |
1901 | { | 1934 | { |
1902 | struct igb_adapter *adapter = netdev_priv(netdev); | 1935 | struct igb_adapter *adapter = netdev_priv(netdev); |
1903 | struct e1000_hw *hw = &adapter->hw; | ||
1904 | int i; | 1936 | int i; |
1905 | 1937 | ||
1906 | if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || | 1938 | if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || |
@@ -1909,17 +1941,39 @@ static int igb_set_coalesce(struct net_device *netdev, | |||
1909 | (ec->rx_coalesce_usecs == 2)) | 1941 | (ec->rx_coalesce_usecs == 2)) |
1910 | return -EINVAL; | 1942 | return -EINVAL; |
1911 | 1943 | ||
1944 | if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || | ||
1945 | ((ec->tx_coalesce_usecs > 3) && | ||
1946 | (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || | ||
1947 | (ec->tx_coalesce_usecs == 2)) | ||
1948 | return -EINVAL; | ||
1949 | |||
1950 | if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) | ||
1951 | return -EINVAL; | ||
1952 | |||
1912 | /* convert to rate of irq's per second */ | 1953 | /* convert to rate of irq's per second */ |
1913 | if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { | 1954 | if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) |
1914 | adapter->itr_setting = ec->rx_coalesce_usecs; | 1955 | adapter->rx_itr_setting = ec->rx_coalesce_usecs; |
1915 | adapter->itr = IGB_START_ITR; | 1956 | else |
1916 | } else { | 1957 | adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; |
1917 | adapter->itr_setting = ec->rx_coalesce_usecs << 2; | ||
1918 | adapter->itr = adapter->itr_setting; | ||
1919 | } | ||
1920 | 1958 | ||
1921 | for (i = 0; i < adapter->num_rx_queues; i++) | 1959 | /* convert to rate of irq's per second */ |
1922 | wr32(adapter->rx_ring[i].itr_register, adapter->itr); | 1960 | if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) |
1961 | adapter->tx_itr_setting = adapter->rx_itr_setting; | ||
1962 | else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) | ||
1963 | adapter->tx_itr_setting = ec->tx_coalesce_usecs; | ||
1964 | else | ||
1965 | adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; | ||
1966 | |||
1967 | for (i = 0; i < adapter->num_q_vectors; i++) { | ||
1968 | struct igb_q_vector *q_vector = adapter->q_vector[i]; | ||
1969 | if (q_vector->rx_ring) | ||
1970 | q_vector->itr_val = adapter->rx_itr_setting; | ||
1971 | else | ||
1972 | q_vector->itr_val = adapter->tx_itr_setting; | ||
1973 | if (q_vector->itr_val && q_vector->itr_val <= 3) | ||
1974 | q_vector->itr_val = IGB_START_ITR; | ||
1975 | q_vector->set_itr = 1; | ||
1976 | } | ||
1923 | 1977 | ||
1924 | return 0; | 1978 | return 0; |
1925 | } | 1979 | } |
@@ -1929,15 +1983,21 @@ static int igb_get_coalesce(struct net_device *netdev, | |||
1929 | { | 1983 | { |
1930 | struct igb_adapter *adapter = netdev_priv(netdev); | 1984 | struct igb_adapter *adapter = netdev_priv(netdev); |
1931 | 1985 | ||
1932 | if (adapter->itr_setting <= 3) | 1986 | if (adapter->rx_itr_setting <= 3) |
1933 | ec->rx_coalesce_usecs = adapter->itr_setting; | 1987 | ec->rx_coalesce_usecs = adapter->rx_itr_setting; |
1934 | else | 1988 | else |
1935 | ec->rx_coalesce_usecs = adapter->itr_setting >> 2; | 1989 | ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; |
1990 | |||
1991 | if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { | ||
1992 | if (adapter->tx_itr_setting <= 3) | ||
1993 | ec->tx_coalesce_usecs = adapter->tx_itr_setting; | ||
1994 | else | ||
1995 | ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; | ||
1996 | } | ||
1936 | 1997 | ||
1937 | return 0; | 1998 | return 0; |
1938 | } | 1999 | } |
1939 | 2000 | ||
1940 | |||
1941 | static int igb_nway_reset(struct net_device *netdev) | 2001 | static int igb_nway_reset(struct net_device *netdev) |
1942 | { | 2002 | { |
1943 | struct igb_adapter *adapter = netdev_priv(netdev); | 2003 | struct igb_adapter *adapter = netdev_priv(netdev); |
@@ -1962,31 +2022,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
1962 | struct ethtool_stats *stats, u64 *data) | 2022 | struct ethtool_stats *stats, u64 *data) |
1963 | { | 2023 | { |
1964 | struct igb_adapter *adapter = netdev_priv(netdev); | 2024 | struct igb_adapter *adapter = netdev_priv(netdev); |
2025 | struct net_device_stats *net_stats = &netdev->stats; | ||
1965 | u64 *queue_stat; | 2026 | u64 *queue_stat; |
1966 | int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); | 2027 | int i, j, k; |
1967 | int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); | 2028 | char *p; |
1968 | int j; | ||
1969 | int i; | ||
1970 | 2029 | ||
1971 | igb_update_stats(adapter); | 2030 | igb_update_stats(adapter); |
2031 | |||
1972 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { | 2032 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { |
1973 | char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; | 2033 | p = (char *)adapter + igb_gstrings_stats[i].stat_offset; |
1974 | data[i] = (igb_gstrings_stats[i].sizeof_stat == | 2034 | data[i] = (igb_gstrings_stats[i].sizeof_stat == |
1975 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 2035 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1976 | } | 2036 | } |
2037 | for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { | ||
2038 | p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; | ||
2039 | data[i] = (igb_gstrings_net_stats[j].sizeof_stat == | ||
2040 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | ||
2041 | } | ||
1977 | for (j = 0; j < adapter->num_tx_queues; j++) { | 2042 | for (j = 0; j < adapter->num_tx_queues; j++) { |
1978 | int k; | 2043 | queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; |
1979 | queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; | 2044 | for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) |
1980 | for (k = 0; k < stat_count_tx; k++) | 2045 | data[i] = queue_stat[k]; |
1981 | data[i + k] = queue_stat[k]; | ||
1982 | i += k; | ||
1983 | } | 2046 | } |
1984 | for (j = 0; j < adapter->num_rx_queues; j++) { | 2047 | for (j = 0; j < adapter->num_rx_queues; j++) { |
1985 | int k; | 2048 | queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; |
1986 | queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; | 2049 | for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) |
1987 | for (k = 0; k < stat_count_rx; k++) | 2050 | data[i] = queue_stat[k]; |
1988 | data[i + k] = queue_stat[k]; | ||
1989 | i += k; | ||
1990 | } | 2051 | } |
1991 | } | 2052 | } |
1992 | 2053 | ||
@@ -2007,11 +2068,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
2007 | ETH_GSTRING_LEN); | 2068 | ETH_GSTRING_LEN); |
2008 | p += ETH_GSTRING_LEN; | 2069 | p += ETH_GSTRING_LEN; |
2009 | } | 2070 | } |
2071 | for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { | ||
2072 | memcpy(p, igb_gstrings_net_stats[i].stat_string, | ||
2073 | ETH_GSTRING_LEN); | ||
2074 | p += ETH_GSTRING_LEN; | ||
2075 | } | ||
2010 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2076 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2011 | sprintf(p, "tx_queue_%u_packets", i); | 2077 | sprintf(p, "tx_queue_%u_packets", i); |
2012 | p += ETH_GSTRING_LEN; | 2078 | p += ETH_GSTRING_LEN; |
2013 | sprintf(p, "tx_queue_%u_bytes", i); | 2079 | sprintf(p, "tx_queue_%u_bytes", i); |
2014 | p += ETH_GSTRING_LEN; | 2080 | p += ETH_GSTRING_LEN; |
2081 | sprintf(p, "tx_queue_%u_restart", i); | ||
2082 | p += ETH_GSTRING_LEN; | ||
2015 | } | 2083 | } |
2016 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2084 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2017 | sprintf(p, "rx_queue_%u_packets", i); | 2085 | sprintf(p, "rx_queue_%u_packets", i); |
@@ -2020,6 +2088,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
2020 | p += ETH_GSTRING_LEN; | 2088 | p += ETH_GSTRING_LEN; |
2021 | sprintf(p, "rx_queue_%u_drops", i); | 2089 | sprintf(p, "rx_queue_%u_drops", i); |
2022 | p += ETH_GSTRING_LEN; | 2090 | p += ETH_GSTRING_LEN; |
2091 | sprintf(p, "rx_queue_%u_csum_err", i); | ||
2092 | p += ETH_GSTRING_LEN; | ||
2093 | sprintf(p, "rx_queue_%u_alloc_failed", i); | ||
2094 | p += ETH_GSTRING_LEN; | ||
2023 | } | 2095 | } |
2024 | /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ | 2096 | /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ |
2025 | break; | 2097 | break; |
@@ -2037,7 +2109,7 @@ static const struct ethtool_ops igb_ethtool_ops = { | |||
2037 | .get_msglevel = igb_get_msglevel, | 2109 | .get_msglevel = igb_get_msglevel, |
2038 | .set_msglevel = igb_set_msglevel, | 2110 | .set_msglevel = igb_set_msglevel, |
2039 | .nway_reset = igb_nway_reset, | 2111 | .nway_reset = igb_nway_reset, |
2040 | .get_link = ethtool_op_get_link, | 2112 | .get_link = igb_get_link, |
2041 | .get_eeprom_len = igb_get_eeprom_len, | 2113 | .get_eeprom_len = igb_get_eeprom_len, |
2042 | .get_eeprom = igb_get_eeprom, | 2114 | .get_eeprom = igb_get_eeprom, |
2043 | .set_eeprom = igb_set_eeprom, | 2115 | .set_eeprom = igb_set_eeprom, |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 714c3a4a44ef..c9baa2aa98cd 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/pagemap.h> | 32 | #include <linux/pagemap.h> |
33 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
34 | #include <linux/ipv6.h> | 34 | #include <linux/ipv6.h> |
35 | #include <linux/slab.h> | ||
35 | #include <net/checksum.h> | 36 | #include <net/checksum.h> |
36 | #include <net/ip6_checksum.h> | 37 | #include <net/ip6_checksum.h> |
37 | #include <linux/net_tstamp.h> | 38 | #include <linux/net_tstamp.h> |
@@ -49,7 +50,7 @@ | |||
49 | #endif | 50 | #endif |
50 | #include "igb.h" | 51 | #include "igb.h" |
51 | 52 | ||
52 | #define DRV_VERSION "1.3.16-k2" | 53 | #define DRV_VERSION "2.1.0-k2" |
53 | char igb_driver_name[] = "igb"; | 54 | char igb_driver_name[] = "igb"; |
54 | char igb_driver_version[] = DRV_VERSION; | 55 | char igb_driver_version[] = DRV_VERSION; |
55 | static const char igb_driver_string[] = | 56 | static const char igb_driver_string[] = |
@@ -60,12 +61,19 @@ static const struct e1000_info *igb_info_tbl[] = { | |||
60 | [board_82575] = &e1000_82575_info, | 61 | [board_82575] = &e1000_82575_info, |
61 | }; | 62 | }; |
62 | 63 | ||
63 | static struct pci_device_id igb_pci_tbl[] = { | 64 | static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { |
65 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, | ||
66 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, | ||
67 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, | ||
68 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, | ||
69 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, | ||
64 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, | 70 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, |
65 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, | 71 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, |
72 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, | ||
66 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, | 73 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, |
67 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, | 74 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, |
68 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, | 75 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, |
76 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, | ||
69 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, | 77 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, |
70 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, | 78 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, |
71 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, | 79 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, |
@@ -81,6 +89,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *); | |||
81 | static int igb_setup_all_rx_resources(struct igb_adapter *); | 89 | static int igb_setup_all_rx_resources(struct igb_adapter *); |
82 | static void igb_free_all_tx_resources(struct igb_adapter *); | 90 | static void igb_free_all_tx_resources(struct igb_adapter *); |
83 | static void igb_free_all_rx_resources(struct igb_adapter *); | 91 | static void igb_free_all_rx_resources(struct igb_adapter *); |
92 | static void igb_setup_mrqc(struct igb_adapter *); | ||
84 | void igb_update_stats(struct igb_adapter *); | 93 | void igb_update_stats(struct igb_adapter *); |
85 | static int igb_probe(struct pci_dev *, const struct pci_device_id *); | 94 | static int igb_probe(struct pci_dev *, const struct pci_device_id *); |
86 | static void __devexit igb_remove(struct pci_dev *pdev); | 95 | static void __devexit igb_remove(struct pci_dev *pdev); |
@@ -89,7 +98,6 @@ static int igb_open(struct net_device *); | |||
89 | static int igb_close(struct net_device *); | 98 | static int igb_close(struct net_device *); |
90 | static void igb_configure_tx(struct igb_adapter *); | 99 | static void igb_configure_tx(struct igb_adapter *); |
91 | static void igb_configure_rx(struct igb_adapter *); | 100 | static void igb_configure_rx(struct igb_adapter *); |
92 | static void igb_setup_rctl(struct igb_adapter *); | ||
93 | static void igb_clean_all_tx_rings(struct igb_adapter *); | 101 | static void igb_clean_all_tx_rings(struct igb_adapter *); |
94 | static void igb_clean_all_rx_rings(struct igb_adapter *); | 102 | static void igb_clean_all_rx_rings(struct igb_adapter *); |
95 | static void igb_clean_tx_ring(struct igb_ring *); | 103 | static void igb_clean_tx_ring(struct igb_ring *); |
@@ -98,28 +106,22 @@ static void igb_set_rx_mode(struct net_device *); | |||
98 | static void igb_update_phy_info(unsigned long); | 106 | static void igb_update_phy_info(unsigned long); |
99 | static void igb_watchdog(unsigned long); | 107 | static void igb_watchdog(unsigned long); |
100 | static void igb_watchdog_task(struct work_struct *); | 108 | static void igb_watchdog_task(struct work_struct *); |
101 | static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, | 109 | static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); |
102 | struct net_device *, | ||
103 | struct igb_ring *); | ||
104 | static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, | ||
105 | struct net_device *); | ||
106 | static struct net_device_stats *igb_get_stats(struct net_device *); | 110 | static struct net_device_stats *igb_get_stats(struct net_device *); |
107 | static int igb_change_mtu(struct net_device *, int); | 111 | static int igb_change_mtu(struct net_device *, int); |
108 | static int igb_set_mac(struct net_device *, void *); | 112 | static int igb_set_mac(struct net_device *, void *); |
113 | static void igb_set_uta(struct igb_adapter *adapter); | ||
109 | static irqreturn_t igb_intr(int irq, void *); | 114 | static irqreturn_t igb_intr(int irq, void *); |
110 | static irqreturn_t igb_intr_msi(int irq, void *); | 115 | static irqreturn_t igb_intr_msi(int irq, void *); |
111 | static irqreturn_t igb_msix_other(int irq, void *); | 116 | static irqreturn_t igb_msix_other(int irq, void *); |
112 | static irqreturn_t igb_msix_rx(int irq, void *); | 117 | static irqreturn_t igb_msix_ring(int irq, void *); |
113 | static irqreturn_t igb_msix_tx(int irq, void *); | ||
114 | #ifdef CONFIG_IGB_DCA | 118 | #ifdef CONFIG_IGB_DCA |
115 | static void igb_update_rx_dca(struct igb_ring *); | 119 | static void igb_update_dca(struct igb_q_vector *); |
116 | static void igb_update_tx_dca(struct igb_ring *); | ||
117 | static void igb_setup_dca(struct igb_adapter *); | 120 | static void igb_setup_dca(struct igb_adapter *); |
118 | #endif /* CONFIG_IGB_DCA */ | 121 | #endif /* CONFIG_IGB_DCA */ |
119 | static bool igb_clean_tx_irq(struct igb_ring *); | 122 | static bool igb_clean_tx_irq(struct igb_q_vector *); |
120 | static int igb_poll(struct napi_struct *, int); | 123 | static int igb_poll(struct napi_struct *, int); |
121 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); | 124 | static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); |
122 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); | ||
123 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); | 125 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
124 | static void igb_tx_timeout(struct net_device *); | 126 | static void igb_tx_timeout(struct net_device *); |
125 | static void igb_reset_task(struct work_struct *); | 127 | static void igb_reset_task(struct work_struct *); |
@@ -127,56 +129,18 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *); | |||
127 | static void igb_vlan_rx_add_vid(struct net_device *, u16); | 129 | static void igb_vlan_rx_add_vid(struct net_device *, u16); |
128 | static void igb_vlan_rx_kill_vid(struct net_device *, u16); | 130 | static void igb_vlan_rx_kill_vid(struct net_device *, u16); |
129 | static void igb_restore_vlan(struct igb_adapter *); | 131 | static void igb_restore_vlan(struct igb_adapter *); |
132 | static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); | ||
130 | static void igb_ping_all_vfs(struct igb_adapter *); | 133 | static void igb_ping_all_vfs(struct igb_adapter *); |
131 | static void igb_msg_task(struct igb_adapter *); | 134 | static void igb_msg_task(struct igb_adapter *); |
132 | static int igb_rcv_msg_from_vf(struct igb_adapter *, u32); | ||
133 | static inline void igb_set_rah_pool(struct e1000_hw *, int , int); | ||
134 | static void igb_vmm_control(struct igb_adapter *); | 135 | static void igb_vmm_control(struct igb_adapter *); |
135 | static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); | 136 | static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); |
136 | static void igb_restore_vf_multicasts(struct igb_adapter *adapter); | 137 | static void igb_restore_vf_multicasts(struct igb_adapter *adapter); |
137 | 138 | static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); | |
138 | static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn) | 139 | static int igb_ndo_set_vf_vlan(struct net_device *netdev, |
139 | { | 140 | int vf, u16 vlan, u8 qos); |
140 | u32 reg_data; | 141 | static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); |
141 | 142 | static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, | |
142 | reg_data = rd32(E1000_VMOLR(vfn)); | 143 | struct ifla_vf_info *ivi); |
143 | reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */ | ||
144 | E1000_VMOLR_ROPE | /* Accept packets matched in UTA */ | ||
145 | E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */ | ||
146 | E1000_VMOLR_AUPE | /* Accept untagged packets */ | ||
147 | E1000_VMOLR_STRVLAN; /* Strip vlan tags */ | ||
148 | wr32(E1000_VMOLR(vfn), reg_data); | ||
149 | } | ||
150 | |||
151 | static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, | ||
152 | int vfn) | ||
153 | { | ||
154 | struct e1000_hw *hw = &adapter->hw; | ||
155 | u32 vmolr; | ||
156 | |||
157 | /* if it isn't the PF check to see if VFs are enabled and | ||
158 | * increase the size to support vlan tags */ | ||
159 | if (vfn < adapter->vfs_allocated_count && | ||
160 | adapter->vf_data[vfn].vlans_enabled) | ||
161 | size += VLAN_TAG_SIZE; | ||
162 | |||
163 | vmolr = rd32(E1000_VMOLR(vfn)); | ||
164 | vmolr &= ~E1000_VMOLR_RLPML_MASK; | ||
165 | vmolr |= size | E1000_VMOLR_LPE; | ||
166 | wr32(E1000_VMOLR(vfn), vmolr); | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry) | ||
172 | { | ||
173 | u32 reg_data; | ||
174 | |||
175 | reg_data = rd32(E1000_RAH(entry)); | ||
176 | reg_data &= ~E1000_RAH_POOL_MASK; | ||
177 | reg_data |= E1000_RAH_POOL_1 << pool;; | ||
178 | wr32(E1000_RAH(entry), reg_data); | ||
179 | } | ||
180 | 144 | ||
181 | #ifdef CONFIG_PM | 145 | #ifdef CONFIG_PM |
182 | static int igb_suspend(struct pci_dev *, pm_message_t); | 146 | static int igb_suspend(struct pci_dev *, pm_message_t); |
@@ -228,46 +192,12 @@ static struct pci_driver igb_driver = { | |||
228 | .err_handler = &igb_err_handler | 192 | .err_handler = &igb_err_handler |
229 | }; | 193 | }; |
230 | 194 | ||
231 | static int global_quad_port_a; /* global quad port a indication */ | ||
232 | |||
233 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); | 195 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
234 | MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); | 196 | MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); |
235 | MODULE_LICENSE("GPL"); | 197 | MODULE_LICENSE("GPL"); |
236 | MODULE_VERSION(DRV_VERSION); | 198 | MODULE_VERSION(DRV_VERSION); |
237 | 199 | ||
238 | /** | 200 | /** |
239 | * Scale the NIC clock cycle by a large factor so that | ||
240 | * relatively small clock corrections can be added or | ||
241 | * substracted at each clock tick. The drawbacks of a | ||
242 | * large factor are a) that the clock register overflows | ||
243 | * more quickly (not such a big deal) and b) that the | ||
244 | * increment per tick has to fit into 24 bits. | ||
245 | * | ||
246 | * Note that | ||
247 | * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * | ||
248 | * IGB_TSYNC_SCALE | ||
249 | * TIMINCA += TIMINCA * adjustment [ppm] / 1e9 | ||
250 | * | ||
251 | * The base scale factor is intentionally a power of two | ||
252 | * so that the division in %struct timecounter can be done with | ||
253 | * a shift. | ||
254 | */ | ||
255 | #define IGB_TSYNC_SHIFT (19) | ||
256 | #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT) | ||
257 | |||
258 | /** | ||
259 | * The duration of one clock cycle of the NIC. | ||
260 | * | ||
261 | * @todo This hard-coded value is part of the specification and might change | ||
262 | * in future hardware revisions. Add revision check. | ||
263 | */ | ||
264 | #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16 | ||
265 | |||
266 | #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24) | ||
267 | # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA | ||
268 | #endif | ||
269 | |||
270 | /** | ||
271 | * igb_read_clock - read raw cycle counter (to be used by time counter) | 201 | * igb_read_clock - read raw cycle counter (to be used by time counter) |
272 | */ | 202 | */ |
273 | static cycle_t igb_read_clock(const struct cyclecounter *tc) | 203 | static cycle_t igb_read_clock(const struct cyclecounter *tc) |
@@ -275,11 +205,21 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc) | |||
275 | struct igb_adapter *adapter = | 205 | struct igb_adapter *adapter = |
276 | container_of(tc, struct igb_adapter, cycles); | 206 | container_of(tc, struct igb_adapter, cycles); |
277 | struct e1000_hw *hw = &adapter->hw; | 207 | struct e1000_hw *hw = &adapter->hw; |
278 | u64 stamp; | 208 | u64 stamp = 0; |
209 | int shift = 0; | ||
279 | 210 | ||
280 | stamp = rd32(E1000_SYSTIML); | 211 | /* |
281 | stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL; | 212 | * The timestamp latches on lowest register read. For the 82580 |
213 | * the lowest register is SYSTIMR instead of SYSTIML. However we never | ||
214 | * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it. | ||
215 | */ | ||
216 | if (hw->mac.type == e1000_82580) { | ||
217 | stamp = rd32(E1000_SYSTIMR) >> 8; | ||
218 | shift = IGB_82580_TSYNC_SHIFT; | ||
219 | } | ||
282 | 220 | ||
221 | stamp |= (u64)rd32(E1000_SYSTIML) << shift; | ||
222 | stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32); | ||
283 | return stamp; | 223 | return stamp; |
284 | } | 224 | } |
285 | 225 | ||
@@ -320,17 +260,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter, | |||
320 | #endif | 260 | #endif |
321 | 261 | ||
322 | /** | 262 | /** |
323 | * igb_desc_unused - calculate if we have unused descriptors | ||
324 | **/ | ||
325 | static int igb_desc_unused(struct igb_ring *ring) | ||
326 | { | ||
327 | if (ring->next_to_clean > ring->next_to_use) | ||
328 | return ring->next_to_clean - ring->next_to_use - 1; | ||
329 | |||
330 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * igb_init_module - Driver Registration Routine | 263 | * igb_init_module - Driver Registration Routine |
335 | * | 264 | * |
336 | * igb_init_module is the first routine called when the driver is | 265 | * igb_init_module is the first routine called when the driver is |
@@ -344,12 +273,9 @@ static int __init igb_init_module(void) | |||
344 | 273 | ||
345 | printk(KERN_INFO "%s\n", igb_copyright); | 274 | printk(KERN_INFO "%s\n", igb_copyright); |
346 | 275 | ||
347 | global_quad_port_a = 0; | ||
348 | |||
349 | #ifdef CONFIG_IGB_DCA | 276 | #ifdef CONFIG_IGB_DCA |
350 | dca_register_notify(&dca_notifier); | 277 | dca_register_notify(&dca_notifier); |
351 | #endif | 278 | #endif |
352 | |||
353 | ret = pci_register_driver(&igb_driver); | 279 | ret = pci_register_driver(&igb_driver); |
354 | return ret; | 280 | return ret; |
355 | } | 281 | } |
@@ -382,8 +308,8 @@ module_exit(igb_exit_module); | |||
382 | **/ | 308 | **/ |
383 | static void igb_cache_ring_register(struct igb_adapter *adapter) | 309 | static void igb_cache_ring_register(struct igb_adapter *adapter) |
384 | { | 310 | { |
385 | int i; | 311 | int i = 0, j = 0; |
386 | unsigned int rbase_offset = adapter->vfs_allocated_count; | 312 | u32 rbase_offset = adapter->vfs_allocated_count; |
387 | 313 | ||
388 | switch (adapter->hw.mac.type) { | 314 | switch (adapter->hw.mac.type) { |
389 | case e1000_82576: | 315 | case e1000_82576: |
@@ -392,23 +318,41 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
392 | * In order to avoid collision we start at the first free queue | 318 | * In order to avoid collision we start at the first free queue |
393 | * and continue consuming queues in the same sequence | 319 | * and continue consuming queues in the same sequence |
394 | */ | 320 | */ |
395 | for (i = 0; i < adapter->num_rx_queues; i++) | 321 | if (adapter->vfs_allocated_count) { |
396 | adapter->rx_ring[i].reg_idx = rbase_offset + | 322 | for (; i < adapter->rss_queues; i++) |
397 | Q_IDX_82576(i); | 323 | adapter->rx_ring[i]->reg_idx = rbase_offset + |
398 | for (i = 0; i < adapter->num_tx_queues; i++) | 324 | Q_IDX_82576(i); |
399 | adapter->tx_ring[i].reg_idx = rbase_offset + | 325 | for (; j < adapter->rss_queues; j++) |
400 | Q_IDX_82576(i); | 326 | adapter->tx_ring[j]->reg_idx = rbase_offset + |
401 | break; | 327 | Q_IDX_82576(j); |
328 | } | ||
402 | case e1000_82575: | 329 | case e1000_82575: |
330 | case e1000_82580: | ||
403 | default: | 331 | default: |
404 | for (i = 0; i < adapter->num_rx_queues; i++) | 332 | for (; i < adapter->num_rx_queues; i++) |
405 | adapter->rx_ring[i].reg_idx = i; | 333 | adapter->rx_ring[i]->reg_idx = rbase_offset + i; |
406 | for (i = 0; i < adapter->num_tx_queues; i++) | 334 | for (; j < adapter->num_tx_queues; j++) |
407 | adapter->tx_ring[i].reg_idx = i; | 335 | adapter->tx_ring[j]->reg_idx = rbase_offset + j; |
408 | break; | 336 | break; |
409 | } | 337 | } |
410 | } | 338 | } |
411 | 339 | ||
340 | static void igb_free_queues(struct igb_adapter *adapter) | ||
341 | { | ||
342 | int i; | ||
343 | |||
344 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
345 | kfree(adapter->tx_ring[i]); | ||
346 | adapter->tx_ring[i] = NULL; | ||
347 | } | ||
348 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
349 | kfree(adapter->rx_ring[i]); | ||
350 | adapter->rx_ring[i] = NULL; | ||
351 | } | ||
352 | adapter->num_rx_queues = 0; | ||
353 | adapter->num_tx_queues = 0; | ||
354 | } | ||
355 | |||
412 | /** | 356 | /** |
413 | * igb_alloc_queues - Allocate memory for all rings | 357 | * igb_alloc_queues - Allocate memory for all rings |
414 | * @adapter: board private structure to initialize | 358 | * @adapter: board private structure to initialize |
@@ -418,64 +362,63 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
418 | **/ | 362 | **/ |
419 | static int igb_alloc_queues(struct igb_adapter *adapter) | 363 | static int igb_alloc_queues(struct igb_adapter *adapter) |
420 | { | 364 | { |
365 | struct igb_ring *ring; | ||
421 | int i; | 366 | int i; |
422 | 367 | ||
423 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | ||
424 | sizeof(struct igb_ring), GFP_KERNEL); | ||
425 | if (!adapter->tx_ring) | ||
426 | return -ENOMEM; | ||
427 | |||
428 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | ||
429 | sizeof(struct igb_ring), GFP_KERNEL); | ||
430 | if (!adapter->rx_ring) { | ||
431 | kfree(adapter->tx_ring); | ||
432 | return -ENOMEM; | ||
433 | } | ||
434 | |||
435 | adapter->rx_ring->buddy = adapter->tx_ring; | ||
436 | |||
437 | for (i = 0; i < adapter->num_tx_queues; i++) { | 368 | for (i = 0; i < adapter->num_tx_queues; i++) { |
438 | struct igb_ring *ring = &(adapter->tx_ring[i]); | 369 | ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); |
370 | if (!ring) | ||
371 | goto err; | ||
439 | ring->count = adapter->tx_ring_count; | 372 | ring->count = adapter->tx_ring_count; |
440 | ring->adapter = adapter; | ||
441 | ring->queue_index = i; | 373 | ring->queue_index = i; |
374 | ring->pdev = adapter->pdev; | ||
375 | ring->netdev = adapter->netdev; | ||
376 | /* For 82575, context index must be unique per ring. */ | ||
377 | if (adapter->hw.mac.type == e1000_82575) | ||
378 | ring->flags = IGB_RING_FLAG_TX_CTX_IDX; | ||
379 | adapter->tx_ring[i] = ring; | ||
442 | } | 380 | } |
381 | |||
443 | for (i = 0; i < adapter->num_rx_queues; i++) { | 382 | for (i = 0; i < adapter->num_rx_queues; i++) { |
444 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 383 | ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); |
384 | if (!ring) | ||
385 | goto err; | ||
445 | ring->count = adapter->rx_ring_count; | 386 | ring->count = adapter->rx_ring_count; |
446 | ring->adapter = adapter; | ||
447 | ring->queue_index = i; | 387 | ring->queue_index = i; |
448 | ring->itr_register = E1000_ITR; | 388 | ring->pdev = adapter->pdev; |
449 | 389 | ring->netdev = adapter->netdev; | |
450 | /* set a default napi handler for each rx_ring */ | 390 | ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
451 | netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); | 391 | ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ |
392 | /* set flag indicating ring supports SCTP checksum offload */ | ||
393 | if (adapter->hw.mac.type >= e1000_82576) | ||
394 | ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; | ||
395 | adapter->rx_ring[i] = ring; | ||
452 | } | 396 | } |
453 | 397 | ||
454 | igb_cache_ring_register(adapter); | 398 | igb_cache_ring_register(adapter); |
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static void igb_free_queues(struct igb_adapter *adapter) | ||
459 | { | ||
460 | int i; | ||
461 | 399 | ||
462 | for (i = 0; i < adapter->num_rx_queues; i++) | 400 | return 0; |
463 | netif_napi_del(&adapter->rx_ring[i].napi); | ||
464 | 401 | ||
465 | adapter->num_rx_queues = 0; | 402 | err: |
466 | adapter->num_tx_queues = 0; | 403 | igb_free_queues(adapter); |
467 | 404 | ||
468 | kfree(adapter->tx_ring); | 405 | return -ENOMEM; |
469 | kfree(adapter->rx_ring); | ||
470 | } | 406 | } |
471 | 407 | ||
472 | #define IGB_N0_QUEUE -1 | 408 | #define IGB_N0_QUEUE -1 |
473 | static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | 409 | static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) |
474 | int tx_queue, int msix_vector) | ||
475 | { | 410 | { |
476 | u32 msixbm = 0; | 411 | u32 msixbm = 0; |
412 | struct igb_adapter *adapter = q_vector->adapter; | ||
477 | struct e1000_hw *hw = &adapter->hw; | 413 | struct e1000_hw *hw = &adapter->hw; |
478 | u32 ivar, index; | 414 | u32 ivar, index; |
415 | int rx_queue = IGB_N0_QUEUE; | ||
416 | int tx_queue = IGB_N0_QUEUE; | ||
417 | |||
418 | if (q_vector->rx_ring) | ||
419 | rx_queue = q_vector->rx_ring->reg_idx; | ||
420 | if (q_vector->tx_ring) | ||
421 | tx_queue = q_vector->tx_ring->reg_idx; | ||
479 | 422 | ||
480 | switch (hw->mac.type) { | 423 | switch (hw->mac.type) { |
481 | case e1000_82575: | 424 | case e1000_82575: |
@@ -483,16 +426,14 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
483 | bitmask for the EICR/EIMS/EIMC registers. To assign one | 426 | bitmask for the EICR/EIMS/EIMC registers. To assign one |
484 | or more queues to a vector, we write the appropriate bits | 427 | or more queues to a vector, we write the appropriate bits |
485 | into the MSIXBM register for that vector. */ | 428 | into the MSIXBM register for that vector. */ |
486 | if (rx_queue > IGB_N0_QUEUE) { | 429 | if (rx_queue > IGB_N0_QUEUE) |
487 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; | 430 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; |
488 | adapter->rx_ring[rx_queue].eims_value = msixbm; | 431 | if (tx_queue > IGB_N0_QUEUE) |
489 | } | ||
490 | if (tx_queue > IGB_N0_QUEUE) { | ||
491 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; | 432 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; |
492 | adapter->tx_ring[tx_queue].eims_value = | 433 | if (!adapter->msix_entries && msix_vector == 0) |
493 | E1000_EICR_TX_QUEUE0 << tx_queue; | 434 | msixbm |= E1000_EIMS_OTHER; |
494 | } | ||
495 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); | 435 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
436 | q_vector->eims_value = msixbm; | ||
496 | break; | 437 | break; |
497 | case e1000_82576: | 438 | case e1000_82576: |
498 | /* 82576 uses a table-based method for assigning vectors. | 439 | /* 82576 uses a table-based method for assigning vectors. |
@@ -500,7 +441,40 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
500 | a vector number along with a "valid" bit. Sadly, the layout | 441 | a vector number along with a "valid" bit. Sadly, the layout |
501 | of the table is somewhat counterintuitive. */ | 442 | of the table is somewhat counterintuitive. */ |
502 | if (rx_queue > IGB_N0_QUEUE) { | 443 | if (rx_queue > IGB_N0_QUEUE) { |
503 | index = (rx_queue >> 1) + adapter->vfs_allocated_count; | 444 | index = (rx_queue & 0x7); |
445 | ivar = array_rd32(E1000_IVAR0, index); | ||
446 | if (rx_queue < 8) { | ||
447 | /* vector goes into low byte of register */ | ||
448 | ivar = ivar & 0xFFFFFF00; | ||
449 | ivar |= msix_vector | E1000_IVAR_VALID; | ||
450 | } else { | ||
451 | /* vector goes into third byte of register */ | ||
452 | ivar = ivar & 0xFF00FFFF; | ||
453 | ivar |= (msix_vector | E1000_IVAR_VALID) << 16; | ||
454 | } | ||
455 | array_wr32(E1000_IVAR0, index, ivar); | ||
456 | } | ||
457 | if (tx_queue > IGB_N0_QUEUE) { | ||
458 | index = (tx_queue & 0x7); | ||
459 | ivar = array_rd32(E1000_IVAR0, index); | ||
460 | if (tx_queue < 8) { | ||
461 | /* vector goes into second byte of register */ | ||
462 | ivar = ivar & 0xFFFF00FF; | ||
463 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; | ||
464 | } else { | ||
465 | /* vector goes into high byte of register */ | ||
466 | ivar = ivar & 0x00FFFFFF; | ||
467 | ivar |= (msix_vector | E1000_IVAR_VALID) << 24; | ||
468 | } | ||
469 | array_wr32(E1000_IVAR0, index, ivar); | ||
470 | } | ||
471 | q_vector->eims_value = 1 << msix_vector; | ||
472 | break; | ||
473 | case e1000_82580: | ||
474 | /* 82580 uses the same table-based approach as 82576 but has fewer | ||
475 | entries as a result we carry over for queues greater than 4. */ | ||
476 | if (rx_queue > IGB_N0_QUEUE) { | ||
477 | index = (rx_queue >> 1); | ||
504 | ivar = array_rd32(E1000_IVAR0, index); | 478 | ivar = array_rd32(E1000_IVAR0, index); |
505 | if (rx_queue & 0x1) { | 479 | if (rx_queue & 0x1) { |
506 | /* vector goes into third byte of register */ | 480 | /* vector goes into third byte of register */ |
@@ -511,11 +485,10 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
511 | ivar = ivar & 0xFFFFFF00; | 485 | ivar = ivar & 0xFFFFFF00; |
512 | ivar |= msix_vector | E1000_IVAR_VALID; | 486 | ivar |= msix_vector | E1000_IVAR_VALID; |
513 | } | 487 | } |
514 | adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; | ||
515 | array_wr32(E1000_IVAR0, index, ivar); | 488 | array_wr32(E1000_IVAR0, index, ivar); |
516 | } | 489 | } |
517 | if (tx_queue > IGB_N0_QUEUE) { | 490 | if (tx_queue > IGB_N0_QUEUE) { |
518 | index = (tx_queue >> 1) + adapter->vfs_allocated_count; | 491 | index = (tx_queue >> 1); |
519 | ivar = array_rd32(E1000_IVAR0, index); | 492 | ivar = array_rd32(E1000_IVAR0, index); |
520 | if (tx_queue & 0x1) { | 493 | if (tx_queue & 0x1) { |
521 | /* vector goes into high byte of register */ | 494 | /* vector goes into high byte of register */ |
@@ -526,14 +499,20 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
526 | ivar = ivar & 0xFFFF00FF; | 499 | ivar = ivar & 0xFFFF00FF; |
527 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; | 500 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; |
528 | } | 501 | } |
529 | adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; | ||
530 | array_wr32(E1000_IVAR0, index, ivar); | 502 | array_wr32(E1000_IVAR0, index, ivar); |
531 | } | 503 | } |
504 | q_vector->eims_value = 1 << msix_vector; | ||
532 | break; | 505 | break; |
533 | default: | 506 | default: |
534 | BUG(); | 507 | BUG(); |
535 | break; | 508 | break; |
536 | } | 509 | } |
510 | |||
511 | /* add q_vector eims value to global eims_enable_mask */ | ||
512 | adapter->eims_enable_mask |= q_vector->eims_value; | ||
513 | |||
514 | /* configure q_vector to set itr on first interrupt */ | ||
515 | q_vector->set_itr = 1; | ||
537 | } | 516 | } |
538 | 517 | ||
539 | /** | 518 | /** |
@@ -549,43 +528,10 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
549 | struct e1000_hw *hw = &adapter->hw; | 528 | struct e1000_hw *hw = &adapter->hw; |
550 | 529 | ||
551 | adapter->eims_enable_mask = 0; | 530 | adapter->eims_enable_mask = 0; |
552 | if (hw->mac.type == e1000_82576) | ||
553 | /* Turn on MSI-X capability first, or our settings | ||
554 | * won't stick. And it will take days to debug. */ | ||
555 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | | ||
556 | E1000_GPIE_PBA | E1000_GPIE_EIAME | | ||
557 | E1000_GPIE_NSICR); | ||
558 | |||
559 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
560 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; | ||
561 | igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++); | ||
562 | adapter->eims_enable_mask |= tx_ring->eims_value; | ||
563 | if (tx_ring->itr_val) | ||
564 | writel(tx_ring->itr_val, | ||
565 | hw->hw_addr + tx_ring->itr_register); | ||
566 | else | ||
567 | writel(1, hw->hw_addr + tx_ring->itr_register); | ||
568 | } | ||
569 | |||
570 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
571 | struct igb_ring *rx_ring = &adapter->rx_ring[i]; | ||
572 | rx_ring->buddy = NULL; | ||
573 | igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); | ||
574 | adapter->eims_enable_mask |= rx_ring->eims_value; | ||
575 | if (rx_ring->itr_val) | ||
576 | writel(rx_ring->itr_val, | ||
577 | hw->hw_addr + rx_ring->itr_register); | ||
578 | else | ||
579 | writel(1, hw->hw_addr + rx_ring->itr_register); | ||
580 | } | ||
581 | |||
582 | 531 | ||
583 | /* set vector for other causes, i.e. link changes */ | 532 | /* set vector for other causes, i.e. link changes */ |
584 | switch (hw->mac.type) { | 533 | switch (hw->mac.type) { |
585 | case e1000_82575: | 534 | case e1000_82575: |
586 | array_wr32(E1000_MSIXBM(0), vector++, | ||
587 | E1000_EIMS_OTHER); | ||
588 | |||
589 | tmp = rd32(E1000_CTRL_EXT); | 535 | tmp = rd32(E1000_CTRL_EXT); |
590 | /* enable MSI-X PBA support*/ | 536 | /* enable MSI-X PBA support*/ |
591 | tmp |= E1000_CTRL_EXT_PBA_CLR; | 537 | tmp |= E1000_CTRL_EXT_PBA_CLR; |
@@ -595,22 +541,38 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
595 | tmp |= E1000_CTRL_EXT_IRCA; | 541 | tmp |= E1000_CTRL_EXT_IRCA; |
596 | 542 | ||
597 | wr32(E1000_CTRL_EXT, tmp); | 543 | wr32(E1000_CTRL_EXT, tmp); |
598 | adapter->eims_enable_mask |= E1000_EIMS_OTHER; | 544 | |
545 | /* enable msix_other interrupt */ | ||
546 | array_wr32(E1000_MSIXBM(0), vector++, | ||
547 | E1000_EIMS_OTHER); | ||
599 | adapter->eims_other = E1000_EIMS_OTHER; | 548 | adapter->eims_other = E1000_EIMS_OTHER; |
600 | 549 | ||
601 | break; | 550 | break; |
602 | 551 | ||
603 | case e1000_82576: | 552 | case e1000_82576: |
553 | case e1000_82580: | ||
554 | /* Turn on MSI-X capability first, or our settings | ||
555 | * won't stick. And it will take days to debug. */ | ||
556 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | | ||
557 | E1000_GPIE_PBA | E1000_GPIE_EIAME | | ||
558 | E1000_GPIE_NSICR); | ||
559 | |||
560 | /* enable msix_other interrupt */ | ||
561 | adapter->eims_other = 1 << vector; | ||
604 | tmp = (vector++ | E1000_IVAR_VALID) << 8; | 562 | tmp = (vector++ | E1000_IVAR_VALID) << 8; |
605 | wr32(E1000_IVAR_MISC, tmp); | ||
606 | 563 | ||
607 | adapter->eims_enable_mask = (1 << (vector)) - 1; | 564 | wr32(E1000_IVAR_MISC, tmp); |
608 | adapter->eims_other = 1 << (vector - 1); | ||
609 | break; | 565 | break; |
610 | default: | 566 | default: |
611 | /* do nothing, since nothing else supports MSI-X */ | 567 | /* do nothing, since nothing else supports MSI-X */ |
612 | break; | 568 | break; |
613 | } /* switch (hw->mac.type) */ | 569 | } /* switch (hw->mac.type) */ |
570 | |||
571 | adapter->eims_enable_mask |= adapter->eims_other; | ||
572 | |||
573 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
574 | igb_assign_vector(adapter->q_vector[i], vector++); | ||
575 | |||
614 | wrfl(); | 576 | wrfl(); |
615 | } | 577 | } |
616 | 578 | ||
@@ -623,43 +585,40 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
623 | static int igb_request_msix(struct igb_adapter *adapter) | 585 | static int igb_request_msix(struct igb_adapter *adapter) |
624 | { | 586 | { |
625 | struct net_device *netdev = adapter->netdev; | 587 | struct net_device *netdev = adapter->netdev; |
588 | struct e1000_hw *hw = &adapter->hw; | ||
626 | int i, err = 0, vector = 0; | 589 | int i, err = 0, vector = 0; |
627 | 590 | ||
628 | vector = 0; | 591 | err = request_irq(adapter->msix_entries[vector].vector, |
629 | 592 | igb_msix_other, 0, netdev->name, adapter); | |
630 | for (i = 0; i < adapter->num_tx_queues; i++) { | 593 | if (err) |
631 | struct igb_ring *ring = &(adapter->tx_ring[i]); | 594 | goto out; |
632 | sprintf(ring->name, "%s-tx-%d", netdev->name, i); | 595 | vector++; |
633 | err = request_irq(adapter->msix_entries[vector].vector, | 596 | |
634 | &igb_msix_tx, 0, ring->name, | 597 | for (i = 0; i < adapter->num_q_vectors; i++) { |
635 | &(adapter->tx_ring[i])); | 598 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
636 | if (err) | 599 | |
637 | goto out; | 600 | q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); |
638 | ring->itr_register = E1000_EITR(0) + (vector << 2); | 601 | |
639 | ring->itr_val = 976; /* ~4000 ints/sec */ | 602 | if (q_vector->rx_ring && q_vector->tx_ring) |
640 | vector++; | 603 | sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, |
641 | } | 604 | q_vector->rx_ring->queue_index); |
642 | for (i = 0; i < adapter->num_rx_queues; i++) { | 605 | else if (q_vector->tx_ring) |
643 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 606 | sprintf(q_vector->name, "%s-tx-%u", netdev->name, |
644 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | 607 | q_vector->tx_ring->queue_index); |
645 | sprintf(ring->name, "%s-rx-%d", netdev->name, i); | 608 | else if (q_vector->rx_ring) |
609 | sprintf(q_vector->name, "%s-rx-%u", netdev->name, | ||
610 | q_vector->rx_ring->queue_index); | ||
646 | else | 611 | else |
647 | memcpy(ring->name, netdev->name, IFNAMSIZ); | 612 | sprintf(q_vector->name, "%s-unused", netdev->name); |
613 | |||
648 | err = request_irq(adapter->msix_entries[vector].vector, | 614 | err = request_irq(adapter->msix_entries[vector].vector, |
649 | &igb_msix_rx, 0, ring->name, | 615 | igb_msix_ring, 0, q_vector->name, |
650 | &(adapter->rx_ring[i])); | 616 | q_vector); |
651 | if (err) | 617 | if (err) |
652 | goto out; | 618 | goto out; |
653 | ring->itr_register = E1000_EITR(0) + (vector << 2); | ||
654 | ring->itr_val = adapter->itr; | ||
655 | vector++; | 619 | vector++; |
656 | } | 620 | } |
657 | 621 | ||
658 | err = request_irq(adapter->msix_entries[vector].vector, | ||
659 | &igb_msix_other, 0, netdev->name, netdev); | ||
660 | if (err) | ||
661 | goto out; | ||
662 | |||
663 | igb_configure_msix(adapter); | 622 | igb_configure_msix(adapter); |
664 | return 0; | 623 | return 0; |
665 | out: | 624 | out: |
@@ -672,11 +631,46 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) | |||
672 | pci_disable_msix(adapter->pdev); | 631 | pci_disable_msix(adapter->pdev); |
673 | kfree(adapter->msix_entries); | 632 | kfree(adapter->msix_entries); |
674 | adapter->msix_entries = NULL; | 633 | adapter->msix_entries = NULL; |
675 | } else if (adapter->flags & IGB_FLAG_HAS_MSI) | 634 | } else if (adapter->flags & IGB_FLAG_HAS_MSI) { |
676 | pci_disable_msi(adapter->pdev); | 635 | pci_disable_msi(adapter->pdev); |
677 | return; | 636 | } |
678 | } | 637 | } |
679 | 638 | ||
639 | /** | ||
640 | * igb_free_q_vectors - Free memory allocated for interrupt vectors | ||
641 | * @adapter: board private structure to initialize | ||
642 | * | ||
643 | * This function frees the memory allocated to the q_vectors. In addition if | ||
644 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
645 | * to freeing the q_vector. | ||
646 | **/ | ||
647 | static void igb_free_q_vectors(struct igb_adapter *adapter) | ||
648 | { | ||
649 | int v_idx; | ||
650 | |||
651 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { | ||
652 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
653 | adapter->q_vector[v_idx] = NULL; | ||
654 | if (!q_vector) | ||
655 | continue; | ||
656 | netif_napi_del(&q_vector->napi); | ||
657 | kfree(q_vector); | ||
658 | } | ||
659 | adapter->num_q_vectors = 0; | ||
660 | } | ||
661 | |||
662 | /** | ||
663 | * igb_clear_interrupt_scheme - reset the device to a state of no interrupts | ||
664 | * | ||
665 | * This function resets the device so that it has 0 rx queues, tx queues, and | ||
666 | * MSI-X interrupts allocated. | ||
667 | */ | ||
668 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) | ||
669 | { | ||
670 | igb_free_queues(adapter); | ||
671 | igb_free_q_vectors(adapter); | ||
672 | igb_reset_interrupt_capability(adapter); | ||
673 | } | ||
680 | 674 | ||
681 | /** | 675 | /** |
682 | * igb_set_interrupt_capability - set MSI or MSI-X if supported | 676 | * igb_set_interrupt_capability - set MSI or MSI-X if supported |
@@ -690,11 +684,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) | |||
690 | int numvecs, i; | 684 | int numvecs, i; |
691 | 685 | ||
692 | /* Number of supported queues. */ | 686 | /* Number of supported queues. */ |
693 | /* Having more queues than CPUs doesn't make sense. */ | 687 | adapter->num_rx_queues = adapter->rss_queues; |
694 | adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); | 688 | adapter->num_tx_queues = adapter->rss_queues; |
695 | adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); | 689 | |
690 | /* start with one vector for every rx queue */ | ||
691 | numvecs = adapter->num_rx_queues; | ||
692 | |||
693 | /* if tx handler is separate add 1 for every tx queue */ | ||
694 | if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) | ||
695 | numvecs += adapter->num_tx_queues; | ||
696 | 696 | ||
697 | numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; | 697 | /* store the number of vectors reserved for queues */ |
698 | adapter->num_q_vectors = numvecs; | ||
699 | |||
700 | /* add 1 vector for link status interrupts */ | ||
701 | numvecs++; | ||
698 | adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), | 702 | adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), |
699 | GFP_KERNEL); | 703 | GFP_KERNEL); |
700 | if (!adapter->msix_entries) | 704 | if (!adapter->msix_entries) |
@@ -728,8 +732,12 @@ msi_only: | |||
728 | dev_info(&adapter->pdev->dev, "IOV Disabled\n"); | 732 | dev_info(&adapter->pdev->dev, "IOV Disabled\n"); |
729 | } | 733 | } |
730 | #endif | 734 | #endif |
735 | adapter->vfs_allocated_count = 0; | ||
736 | adapter->rss_queues = 1; | ||
737 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; | ||
731 | adapter->num_rx_queues = 1; | 738 | adapter->num_rx_queues = 1; |
732 | adapter->num_tx_queues = 1; | 739 | adapter->num_tx_queues = 1; |
740 | adapter->num_q_vectors = 1; | ||
733 | if (!pci_enable_msi(adapter->pdev)) | 741 | if (!pci_enable_msi(adapter->pdev)) |
734 | adapter->flags |= IGB_FLAG_HAS_MSI; | 742 | adapter->flags |= IGB_FLAG_HAS_MSI; |
735 | out: | 743 | out: |
@@ -739,6 +747,133 @@ out: | |||
739 | } | 747 | } |
740 | 748 | ||
741 | /** | 749 | /** |
750 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors | ||
751 | * @adapter: board private structure to initialize | ||
752 | * | ||
753 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
754 | * return -ENOMEM. | ||
755 | **/ | ||
756 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) | ||
757 | { | ||
758 | struct igb_q_vector *q_vector; | ||
759 | struct e1000_hw *hw = &adapter->hw; | ||
760 | int v_idx; | ||
761 | |||
762 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { | ||
763 | q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); | ||
764 | if (!q_vector) | ||
765 | goto err_out; | ||
766 | q_vector->adapter = adapter; | ||
767 | q_vector->itr_register = hw->hw_addr + E1000_EITR(0); | ||
768 | q_vector->itr_val = IGB_START_ITR; | ||
769 | netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); | ||
770 | adapter->q_vector[v_idx] = q_vector; | ||
771 | } | ||
772 | return 0; | ||
773 | |||
774 | err_out: | ||
775 | igb_free_q_vectors(adapter); | ||
776 | return -ENOMEM; | ||
777 | } | ||
778 | |||
779 | static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, | ||
780 | int ring_idx, int v_idx) | ||
781 | { | ||
782 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
783 | |||
784 | q_vector->rx_ring = adapter->rx_ring[ring_idx]; | ||
785 | q_vector->rx_ring->q_vector = q_vector; | ||
786 | q_vector->itr_val = adapter->rx_itr_setting; | ||
787 | if (q_vector->itr_val && q_vector->itr_val <= 3) | ||
788 | q_vector->itr_val = IGB_START_ITR; | ||
789 | } | ||
790 | |||
791 | static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, | ||
792 | int ring_idx, int v_idx) | ||
793 | { | ||
794 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
795 | |||
796 | q_vector->tx_ring = adapter->tx_ring[ring_idx]; | ||
797 | q_vector->tx_ring->q_vector = q_vector; | ||
798 | q_vector->itr_val = adapter->tx_itr_setting; | ||
799 | if (q_vector->itr_val && q_vector->itr_val <= 3) | ||
800 | q_vector->itr_val = IGB_START_ITR; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * igb_map_ring_to_vector - maps allocated queues to vectors | ||
805 | * | ||
806 | * This function maps the recently allocated queues to vectors. | ||
807 | **/ | ||
808 | static int igb_map_ring_to_vector(struct igb_adapter *adapter) | ||
809 | { | ||
810 | int i; | ||
811 | int v_idx = 0; | ||
812 | |||
813 | if ((adapter->num_q_vectors < adapter->num_rx_queues) || | ||
814 | (adapter->num_q_vectors < adapter->num_tx_queues)) | ||
815 | return -ENOMEM; | ||
816 | |||
817 | if (adapter->num_q_vectors >= | ||
818 | (adapter->num_rx_queues + adapter->num_tx_queues)) { | ||
819 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
820 | igb_map_rx_ring_to_vector(adapter, i, v_idx++); | ||
821 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
822 | igb_map_tx_ring_to_vector(adapter, i, v_idx++); | ||
823 | } else { | ||
824 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
825 | if (i < adapter->num_tx_queues) | ||
826 | igb_map_tx_ring_to_vector(adapter, i, v_idx); | ||
827 | igb_map_rx_ring_to_vector(adapter, i, v_idx++); | ||
828 | } | ||
829 | for (; i < adapter->num_tx_queues; i++) | ||
830 | igb_map_tx_ring_to_vector(adapter, i, v_idx++); | ||
831 | } | ||
832 | return 0; | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors | ||
837 | * | ||
838 | * This function initializes the interrupts and allocates all of the queues. | ||
839 | **/ | ||
840 | static int igb_init_interrupt_scheme(struct igb_adapter *adapter) | ||
841 | { | ||
842 | struct pci_dev *pdev = adapter->pdev; | ||
843 | int err; | ||
844 | |||
845 | igb_set_interrupt_capability(adapter); | ||
846 | |||
847 | err = igb_alloc_q_vectors(adapter); | ||
848 | if (err) { | ||
849 | dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); | ||
850 | goto err_alloc_q_vectors; | ||
851 | } | ||
852 | |||
853 | err = igb_alloc_queues(adapter); | ||
854 | if (err) { | ||
855 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | ||
856 | goto err_alloc_queues; | ||
857 | } | ||
858 | |||
859 | err = igb_map_ring_to_vector(adapter); | ||
860 | if (err) { | ||
861 | dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); | ||
862 | goto err_map_queues; | ||
863 | } | ||
864 | |||
865 | |||
866 | return 0; | ||
867 | err_map_queues: | ||
868 | igb_free_queues(adapter); | ||
869 | err_alloc_queues: | ||
870 | igb_free_q_vectors(adapter); | ||
871 | err_alloc_q_vectors: | ||
872 | igb_reset_interrupt_capability(adapter); | ||
873 | return err; | ||
874 | } | ||
875 | |||
876 | /** | ||
742 | * igb_request_irq - initialize interrupts | 877 | * igb_request_irq - initialize interrupts |
743 | * | 878 | * |
744 | * Attempts to configure interrupts using the best available | 879 | * Attempts to configure interrupts using the best available |
@@ -747,7 +882,7 @@ out: | |||
747 | static int igb_request_irq(struct igb_adapter *adapter) | 882 | static int igb_request_irq(struct igb_adapter *adapter) |
748 | { | 883 | { |
749 | struct net_device *netdev = adapter->netdev; | 884 | struct net_device *netdev = adapter->netdev; |
750 | struct e1000_hw *hw = &adapter->hw; | 885 | struct pci_dev *pdev = adapter->pdev; |
751 | int err = 0; | 886 | int err = 0; |
752 | 887 | ||
753 | if (adapter->msix_entries) { | 888 | if (adapter->msix_entries) { |
@@ -755,39 +890,46 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
755 | if (!err) | 890 | if (!err) |
756 | goto request_done; | 891 | goto request_done; |
757 | /* fall back to MSI */ | 892 | /* fall back to MSI */ |
758 | igb_reset_interrupt_capability(adapter); | 893 | igb_clear_interrupt_scheme(adapter); |
759 | if (!pci_enable_msi(adapter->pdev)) | 894 | if (!pci_enable_msi(adapter->pdev)) |
760 | adapter->flags |= IGB_FLAG_HAS_MSI; | 895 | adapter->flags |= IGB_FLAG_HAS_MSI; |
761 | igb_free_all_tx_resources(adapter); | 896 | igb_free_all_tx_resources(adapter); |
762 | igb_free_all_rx_resources(adapter); | 897 | igb_free_all_rx_resources(adapter); |
898 | adapter->num_tx_queues = 1; | ||
763 | adapter->num_rx_queues = 1; | 899 | adapter->num_rx_queues = 1; |
764 | igb_alloc_queues(adapter); | 900 | adapter->num_q_vectors = 1; |
765 | } else { | 901 | err = igb_alloc_q_vectors(adapter); |
766 | switch (hw->mac.type) { | 902 | if (err) { |
767 | case e1000_82575: | 903 | dev_err(&pdev->dev, |
768 | wr32(E1000_MSIXBM(0), | 904 | "Unable to allocate memory for vectors\n"); |
769 | (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); | 905 | goto request_done; |
770 | break; | ||
771 | case e1000_82576: | ||
772 | wr32(E1000_IVAR0, E1000_IVAR_VALID); | ||
773 | break; | ||
774 | default: | ||
775 | break; | ||
776 | } | 906 | } |
907 | err = igb_alloc_queues(adapter); | ||
908 | if (err) { | ||
909 | dev_err(&pdev->dev, | ||
910 | "Unable to allocate memory for queues\n"); | ||
911 | igb_free_q_vectors(adapter); | ||
912 | goto request_done; | ||
913 | } | ||
914 | igb_setup_all_tx_resources(adapter); | ||
915 | igb_setup_all_rx_resources(adapter); | ||
916 | } else { | ||
917 | igb_assign_vector(adapter->q_vector[0], 0); | ||
777 | } | 918 | } |
778 | 919 | ||
779 | if (adapter->flags & IGB_FLAG_HAS_MSI) { | 920 | if (adapter->flags & IGB_FLAG_HAS_MSI) { |
780 | err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, | 921 | err = request_irq(adapter->pdev->irq, igb_intr_msi, 0, |
781 | netdev->name, netdev); | 922 | netdev->name, adapter); |
782 | if (!err) | 923 | if (!err) |
783 | goto request_done; | 924 | goto request_done; |
925 | |||
784 | /* fall back to legacy interrupts */ | 926 | /* fall back to legacy interrupts */ |
785 | igb_reset_interrupt_capability(adapter); | 927 | igb_reset_interrupt_capability(adapter); |
786 | adapter->flags &= ~IGB_FLAG_HAS_MSI; | 928 | adapter->flags &= ~IGB_FLAG_HAS_MSI; |
787 | } | 929 | } |
788 | 930 | ||
789 | err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, | 931 | err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED, |
790 | netdev->name, netdev); | 932 | netdev->name, adapter); |
791 | 933 | ||
792 | if (err) | 934 | if (err) |
793 | dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", | 935 | dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", |
@@ -799,23 +941,19 @@ request_done: | |||
799 | 941 | ||
800 | static void igb_free_irq(struct igb_adapter *adapter) | 942 | static void igb_free_irq(struct igb_adapter *adapter) |
801 | { | 943 | { |
802 | struct net_device *netdev = adapter->netdev; | ||
803 | |||
804 | if (adapter->msix_entries) { | 944 | if (adapter->msix_entries) { |
805 | int vector = 0, i; | 945 | int vector = 0, i; |
806 | 946 | ||
807 | for (i = 0; i < adapter->num_tx_queues; i++) | 947 | free_irq(adapter->msix_entries[vector++].vector, adapter); |
808 | free_irq(adapter->msix_entries[vector++].vector, | ||
809 | &(adapter->tx_ring[i])); | ||
810 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
811 | free_irq(adapter->msix_entries[vector++].vector, | ||
812 | &(adapter->rx_ring[i])); | ||
813 | 948 | ||
814 | free_irq(adapter->msix_entries[vector++].vector, netdev); | 949 | for (i = 0; i < adapter->num_q_vectors; i++) { |
815 | return; | 950 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
951 | free_irq(adapter->msix_entries[vector++].vector, | ||
952 | q_vector); | ||
953 | } | ||
954 | } else { | ||
955 | free_irq(adapter->pdev->irq, adapter); | ||
816 | } | 956 | } |
817 | |||
818 | free_irq(adapter->pdev->irq, netdev); | ||
819 | } | 957 | } |
820 | 958 | ||
821 | /** | 959 | /** |
@@ -826,6 +964,11 @@ static void igb_irq_disable(struct igb_adapter *adapter) | |||
826 | { | 964 | { |
827 | struct e1000_hw *hw = &adapter->hw; | 965 | struct e1000_hw *hw = &adapter->hw; |
828 | 966 | ||
967 | /* | ||
968 | * we need to be careful when disabling interrupts. The VFs are also | ||
969 | * mapped into these registers and so clearing the bits can cause | ||
970 | * issues on the VF drivers so we only need to clear what we set | ||
971 | */ | ||
829 | if (adapter->msix_entries) { | 972 | if (adapter->msix_entries) { |
830 | u32 regval = rd32(E1000_EIAM); | 973 | u32 regval = rd32(E1000_EIAM); |
831 | wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); | 974 | wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); |
@@ -849,41 +992,47 @@ static void igb_irq_enable(struct igb_adapter *adapter) | |||
849 | struct e1000_hw *hw = &adapter->hw; | 992 | struct e1000_hw *hw = &adapter->hw; |
850 | 993 | ||
851 | if (adapter->msix_entries) { | 994 | if (adapter->msix_entries) { |
995 | u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC; | ||
852 | u32 regval = rd32(E1000_EIAC); | 996 | u32 regval = rd32(E1000_EIAC); |
853 | wr32(E1000_EIAC, regval | adapter->eims_enable_mask); | 997 | wr32(E1000_EIAC, regval | adapter->eims_enable_mask); |
854 | regval = rd32(E1000_EIAM); | 998 | regval = rd32(E1000_EIAM); |
855 | wr32(E1000_EIAM, regval | adapter->eims_enable_mask); | 999 | wr32(E1000_EIAM, regval | adapter->eims_enable_mask); |
856 | wr32(E1000_EIMS, adapter->eims_enable_mask); | 1000 | wr32(E1000_EIMS, adapter->eims_enable_mask); |
857 | if (adapter->vfs_allocated_count) | 1001 | if (adapter->vfs_allocated_count) { |
858 | wr32(E1000_MBVFIMR, 0xFF); | 1002 | wr32(E1000_MBVFIMR, 0xFF); |
859 | wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | | 1003 | ims |= E1000_IMS_VMMB; |
860 | E1000_IMS_DOUTSYNC)); | 1004 | } |
1005 | if (adapter->hw.mac.type == e1000_82580) | ||
1006 | ims |= E1000_IMS_DRSTA; | ||
1007 | |||
1008 | wr32(E1000_IMS, ims); | ||
861 | } else { | 1009 | } else { |
862 | wr32(E1000_IMS, IMS_ENABLE_MASK); | 1010 | wr32(E1000_IMS, IMS_ENABLE_MASK | |
863 | wr32(E1000_IAM, IMS_ENABLE_MASK); | 1011 | E1000_IMS_DRSTA); |
1012 | wr32(E1000_IAM, IMS_ENABLE_MASK | | ||
1013 | E1000_IMS_DRSTA); | ||
864 | } | 1014 | } |
865 | } | 1015 | } |
866 | 1016 | ||
867 | static void igb_update_mng_vlan(struct igb_adapter *adapter) | 1017 | static void igb_update_mng_vlan(struct igb_adapter *adapter) |
868 | { | 1018 | { |
869 | struct net_device *netdev = adapter->netdev; | 1019 | struct e1000_hw *hw = &adapter->hw; |
870 | u16 vid = adapter->hw.mng_cookie.vlan_id; | 1020 | u16 vid = adapter->hw.mng_cookie.vlan_id; |
871 | u16 old_vid = adapter->mng_vlan_id; | 1021 | u16 old_vid = adapter->mng_vlan_id; |
872 | if (adapter->vlgrp) { | ||
873 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { | ||
874 | if (adapter->hw.mng_cookie.status & | ||
875 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { | ||
876 | igb_vlan_rx_add_vid(netdev, vid); | ||
877 | adapter->mng_vlan_id = vid; | ||
878 | } else | ||
879 | adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; | ||
880 | 1022 | ||
881 | if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && | 1023 | if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { |
882 | (vid != old_vid) && | 1024 | /* add VID to filter table */ |
883 | !vlan_group_get_device(adapter->vlgrp, old_vid)) | 1025 | igb_vfta_set(hw, vid, true); |
884 | igb_vlan_rx_kill_vid(netdev, old_vid); | 1026 | adapter->mng_vlan_id = vid; |
885 | } else | 1027 | } else { |
886 | adapter->mng_vlan_id = vid; | 1028 | adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; |
1029 | } | ||
1030 | |||
1031 | if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && | ||
1032 | (vid != old_vid) && | ||
1033 | !vlan_group_get_device(adapter->vlgrp, old_vid)) { | ||
1034 | /* remove VID from filter table */ | ||
1035 | igb_vfta_set(hw, old_vid, false); | ||
887 | } | 1036 | } |
888 | } | 1037 | } |
889 | 1038 | ||
@@ -907,7 +1056,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter) | |||
907 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | 1056 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
908 | } | 1057 | } |
909 | 1058 | ||
910 | |||
911 | /** | 1059 | /** |
912 | * igb_get_hw_control - get control of the h/w from f/w | 1060 | * igb_get_hw_control - get control of the h/w from f/w |
913 | * @adapter: address of board private structure | 1061 | * @adapter: address of board private structure |
@@ -942,8 +1090,11 @@ static void igb_configure(struct igb_adapter *adapter) | |||
942 | 1090 | ||
943 | igb_restore_vlan(adapter); | 1091 | igb_restore_vlan(adapter); |
944 | 1092 | ||
945 | igb_configure_tx(adapter); | 1093 | igb_setup_tctl(adapter); |
1094 | igb_setup_mrqc(adapter); | ||
946 | igb_setup_rctl(adapter); | 1095 | igb_setup_rctl(adapter); |
1096 | |||
1097 | igb_configure_tx(adapter); | ||
947 | igb_configure_rx(adapter); | 1098 | igb_configure_rx(adapter); |
948 | 1099 | ||
949 | igb_rx_fifo_flush_82575(&adapter->hw); | 1100 | igb_rx_fifo_flush_82575(&adapter->hw); |
@@ -952,20 +1103,39 @@ static void igb_configure(struct igb_adapter *adapter) | |||
952 | * at least 1 descriptor unused to make sure | 1103 | * at least 1 descriptor unused to make sure |
953 | * next_to_use != next_to_clean */ | 1104 | * next_to_use != next_to_clean */ |
954 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1105 | for (i = 0; i < adapter->num_rx_queues; i++) { |
955 | struct igb_ring *ring = &adapter->rx_ring[i]; | 1106 | struct igb_ring *ring = adapter->rx_ring[i]; |
956 | igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); | 1107 | igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); |
957 | } | 1108 | } |
1109 | } | ||
958 | 1110 | ||
959 | 1111 | /** | |
960 | adapter->tx_queue_len = netdev->tx_queue_len; | 1112 | * igb_power_up_link - Power up the phy/serdes link |
1113 | * @adapter: address of board private structure | ||
1114 | **/ | ||
1115 | void igb_power_up_link(struct igb_adapter *adapter) | ||
1116 | { | ||
1117 | if (adapter->hw.phy.media_type == e1000_media_type_copper) | ||
1118 | igb_power_up_phy_copper(&adapter->hw); | ||
1119 | else | ||
1120 | igb_power_up_serdes_link_82575(&adapter->hw); | ||
961 | } | 1121 | } |
962 | 1122 | ||
1123 | /** | ||
1124 | * igb_power_down_link - Power down the phy/serdes link | ||
1125 | * @adapter: address of board private structure | ||
1126 | */ | ||
1127 | static void igb_power_down_link(struct igb_adapter *adapter) | ||
1128 | { | ||
1129 | if (adapter->hw.phy.media_type == e1000_media_type_copper) | ||
1130 | igb_power_down_phy_copper_82575(&adapter->hw); | ||
1131 | else | ||
1132 | igb_shutdown_serdes_link_82575(&adapter->hw); | ||
1133 | } | ||
963 | 1134 | ||
964 | /** | 1135 | /** |
965 | * igb_up - Open the interface and prepare it to handle traffic | 1136 | * igb_up - Open the interface and prepare it to handle traffic |
966 | * @adapter: board private structure | 1137 | * @adapter: board private structure |
967 | **/ | 1138 | **/ |
968 | |||
969 | int igb_up(struct igb_adapter *adapter) | 1139 | int igb_up(struct igb_adapter *adapter) |
970 | { | 1140 | { |
971 | struct e1000_hw *hw = &adapter->hw; | 1141 | struct e1000_hw *hw = &adapter->hw; |
@@ -976,30 +1146,39 @@ int igb_up(struct igb_adapter *adapter) | |||
976 | 1146 | ||
977 | clear_bit(__IGB_DOWN, &adapter->state); | 1147 | clear_bit(__IGB_DOWN, &adapter->state); |
978 | 1148 | ||
979 | for (i = 0; i < adapter->num_rx_queues; i++) | 1149 | for (i = 0; i < adapter->num_q_vectors; i++) { |
980 | napi_enable(&adapter->rx_ring[i].napi); | 1150 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1151 | napi_enable(&q_vector->napi); | ||
1152 | } | ||
981 | if (adapter->msix_entries) | 1153 | if (adapter->msix_entries) |
982 | igb_configure_msix(adapter); | 1154 | igb_configure_msix(adapter); |
983 | 1155 | else | |
984 | igb_vmm_control(adapter); | 1156 | igb_assign_vector(adapter->q_vector[0], 0); |
985 | igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); | ||
986 | igb_set_vmolr(hw, adapter->vfs_allocated_count); | ||
987 | 1157 | ||
988 | /* Clear any pending interrupts. */ | 1158 | /* Clear any pending interrupts. */ |
989 | rd32(E1000_ICR); | 1159 | rd32(E1000_ICR); |
990 | igb_irq_enable(adapter); | 1160 | igb_irq_enable(adapter); |
991 | 1161 | ||
1162 | /* notify VFs that reset has been completed */ | ||
1163 | if (adapter->vfs_allocated_count) { | ||
1164 | u32 reg_data = rd32(E1000_CTRL_EXT); | ||
1165 | reg_data |= E1000_CTRL_EXT_PFRSTD; | ||
1166 | wr32(E1000_CTRL_EXT, reg_data); | ||
1167 | } | ||
1168 | |||
992 | netif_tx_start_all_queues(adapter->netdev); | 1169 | netif_tx_start_all_queues(adapter->netdev); |
993 | 1170 | ||
994 | /* Fire a link change interrupt to start the watchdog. */ | 1171 | /* start the watchdog. */ |
995 | wr32(E1000_ICS, E1000_ICS_LSC); | 1172 | hw->mac.get_link_status = 1; |
1173 | schedule_work(&adapter->watchdog_task); | ||
1174 | |||
996 | return 0; | 1175 | return 0; |
997 | } | 1176 | } |
998 | 1177 | ||
999 | void igb_down(struct igb_adapter *adapter) | 1178 | void igb_down(struct igb_adapter *adapter) |
1000 | { | 1179 | { |
1001 | struct e1000_hw *hw = &adapter->hw; | ||
1002 | struct net_device *netdev = adapter->netdev; | 1180 | struct net_device *netdev = adapter->netdev; |
1181 | struct e1000_hw *hw = &adapter->hw; | ||
1003 | u32 tctl, rctl; | 1182 | u32 tctl, rctl; |
1004 | int i; | 1183 | int i; |
1005 | 1184 | ||
@@ -1022,15 +1201,16 @@ void igb_down(struct igb_adapter *adapter) | |||
1022 | wrfl(); | 1201 | wrfl(); |
1023 | msleep(10); | 1202 | msleep(10); |
1024 | 1203 | ||
1025 | for (i = 0; i < adapter->num_rx_queues; i++) | 1204 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1026 | napi_disable(&adapter->rx_ring[i].napi); | 1205 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1206 | napi_disable(&q_vector->napi); | ||
1207 | } | ||
1027 | 1208 | ||
1028 | igb_irq_disable(adapter); | 1209 | igb_irq_disable(adapter); |
1029 | 1210 | ||
1030 | del_timer_sync(&adapter->watchdog_timer); | 1211 | del_timer_sync(&adapter->watchdog_timer); |
1031 | del_timer_sync(&adapter->phy_info_timer); | 1212 | del_timer_sync(&adapter->phy_info_timer); |
1032 | 1213 | ||
1033 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
1034 | netif_carrier_off(netdev); | 1214 | netif_carrier_off(netdev); |
1035 | 1215 | ||
1036 | /* record the stats before reset*/ | 1216 | /* record the stats before reset*/ |
@@ -1062,6 +1242,7 @@ void igb_reinit_locked(struct igb_adapter *adapter) | |||
1062 | 1242 | ||
1063 | void igb_reset(struct igb_adapter *adapter) | 1243 | void igb_reset(struct igb_adapter *adapter) |
1064 | { | 1244 | { |
1245 | struct pci_dev *pdev = adapter->pdev; | ||
1065 | struct e1000_hw *hw = &adapter->hw; | 1246 | struct e1000_hw *hw = &adapter->hw; |
1066 | struct e1000_mac_info *mac = &hw->mac; | 1247 | struct e1000_mac_info *mac = &hw->mac; |
1067 | struct e1000_fc_info *fc = &hw->fc; | 1248 | struct e1000_fc_info *fc = &hw->fc; |
@@ -1072,8 +1253,13 @@ void igb_reset(struct igb_adapter *adapter) | |||
1072 | * To take effect CTRL.RST is required. | 1253 | * To take effect CTRL.RST is required. |
1073 | */ | 1254 | */ |
1074 | switch (mac->type) { | 1255 | switch (mac->type) { |
1256 | case e1000_82580: | ||
1257 | pba = rd32(E1000_RXPBS); | ||
1258 | pba = igb_rxpbs_adjust_82580(pba); | ||
1259 | break; | ||
1075 | case e1000_82576: | 1260 | case e1000_82576: |
1076 | pba = E1000_PBA_64K; | 1261 | pba = rd32(E1000_RXPBS); |
1262 | pba &= E1000_RXPBS_SIZE_MASK_82576; | ||
1077 | break; | 1263 | break; |
1078 | case e1000_82575: | 1264 | case e1000_82575: |
1079 | default: | 1265 | default: |
@@ -1133,13 +1319,8 @@ void igb_reset(struct igb_adapter *adapter) | |||
1133 | hwm = min(((pba << 10) * 9 / 10), | 1319 | hwm = min(((pba << 10) * 9 / 10), |
1134 | ((pba << 10) - 2 * adapter->max_frame_size)); | 1320 | ((pba << 10) - 2 * adapter->max_frame_size)); |
1135 | 1321 | ||
1136 | if (mac->type < e1000_82576) { | 1322 | fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ |
1137 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ | 1323 | fc->low_water = fc->high_water - 16; |
1138 | fc->low_water = fc->high_water - 8; | ||
1139 | } else { | ||
1140 | fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ | ||
1141 | fc->low_water = fc->high_water - 16; | ||
1142 | } | ||
1143 | fc->pause_time = 0xFFFF; | 1324 | fc->pause_time = 0xFFFF; |
1144 | fc->send_xon = 1; | 1325 | fc->send_xon = 1; |
1145 | fc->current_mode = fc->requested_mode; | 1326 | fc->current_mode = fc->requested_mode; |
@@ -1148,10 +1329,10 @@ void igb_reset(struct igb_adapter *adapter) | |||
1148 | if (adapter->vfs_allocated_count) { | 1329 | if (adapter->vfs_allocated_count) { |
1149 | int i; | 1330 | int i; |
1150 | for (i = 0 ; i < adapter->vfs_allocated_count; i++) | 1331 | for (i = 0 ; i < adapter->vfs_allocated_count; i++) |
1151 | adapter->vf_data[i].clear_to_send = false; | 1332 | adapter->vf_data[i].flags = 0; |
1152 | 1333 | ||
1153 | /* ping all the active vfs to let them know we are going down */ | 1334 | /* ping all the active vfs to let them know we are going down */ |
1154 | igb_ping_all_vfs(adapter); | 1335 | igb_ping_all_vfs(adapter); |
1155 | 1336 | ||
1156 | /* disable transmits and receives */ | 1337 | /* disable transmits and receives */ |
1157 | wr32(E1000_VFRE, 0); | 1338 | wr32(E1000_VFRE, 0); |
@@ -1159,23 +1340,30 @@ void igb_reset(struct igb_adapter *adapter) | |||
1159 | } | 1340 | } |
1160 | 1341 | ||
1161 | /* Allow time for pending master requests to run */ | 1342 | /* Allow time for pending master requests to run */ |
1162 | adapter->hw.mac.ops.reset_hw(&adapter->hw); | 1343 | hw->mac.ops.reset_hw(hw); |
1163 | wr32(E1000_WUC, 0); | 1344 | wr32(E1000_WUC, 0); |
1164 | 1345 | ||
1165 | if (adapter->hw.mac.ops.init_hw(&adapter->hw)) | 1346 | if (hw->mac.ops.init_hw(hw)) |
1166 | dev_err(&adapter->pdev->dev, "Hardware Error\n"); | 1347 | dev_err(&pdev->dev, "Hardware Error\n"); |
1348 | |||
1349 | if (hw->mac.type == e1000_82580) { | ||
1350 | u32 reg = rd32(E1000_PCIEMISC); | ||
1351 | wr32(E1000_PCIEMISC, | ||
1352 | reg & ~E1000_PCIEMISC_LX_DECISION); | ||
1353 | } | ||
1354 | if (!netif_running(adapter->netdev)) | ||
1355 | igb_power_down_link(adapter); | ||
1167 | 1356 | ||
1168 | igb_update_mng_vlan(adapter); | 1357 | igb_update_mng_vlan(adapter); |
1169 | 1358 | ||
1170 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 1359 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
1171 | wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); | 1360 | wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); |
1172 | 1361 | ||
1173 | igb_reset_adaptive(&adapter->hw); | 1362 | igb_get_phy_info(hw); |
1174 | igb_get_phy_info(&adapter->hw); | ||
1175 | } | 1363 | } |
1176 | 1364 | ||
1177 | static const struct net_device_ops igb_netdev_ops = { | 1365 | static const struct net_device_ops igb_netdev_ops = { |
1178 | .ndo_open = igb_open, | 1366 | .ndo_open = igb_open, |
1179 | .ndo_stop = igb_close, | 1367 | .ndo_stop = igb_close, |
1180 | .ndo_start_xmit = igb_xmit_frame_adv, | 1368 | .ndo_start_xmit = igb_xmit_frame_adv, |
1181 | .ndo_get_stats = igb_get_stats, | 1369 | .ndo_get_stats = igb_get_stats, |
@@ -1189,6 +1377,10 @@ static const struct net_device_ops igb_netdev_ops = { | |||
1189 | .ndo_vlan_rx_register = igb_vlan_rx_register, | 1377 | .ndo_vlan_rx_register = igb_vlan_rx_register, |
1190 | .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, | 1378 | .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, |
1191 | .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, | 1379 | .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, |
1380 | .ndo_set_vf_mac = igb_ndo_set_vf_mac, | ||
1381 | .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, | ||
1382 | .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, | ||
1383 | .ndo_get_vf_config = igb_ndo_get_vf_config, | ||
1192 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1384 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1193 | .ndo_poll_controller = igb_netpoll, | 1385 | .ndo_poll_controller = igb_netpoll, |
1194 | #endif | 1386 | #endif |
@@ -1211,10 +1403,11 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1211 | struct net_device *netdev; | 1403 | struct net_device *netdev; |
1212 | struct igb_adapter *adapter; | 1404 | struct igb_adapter *adapter; |
1213 | struct e1000_hw *hw; | 1405 | struct e1000_hw *hw; |
1406 | u16 eeprom_data = 0; | ||
1407 | static int global_quad_port_a; /* global quad port a indication */ | ||
1214 | const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; | 1408 | const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; |
1215 | unsigned long mmio_start, mmio_len; | 1409 | unsigned long mmio_start, mmio_len; |
1216 | int err, pci_using_dac; | 1410 | int err, pci_using_dac; |
1217 | u16 eeprom_data = 0; | ||
1218 | u16 eeprom_apme_mask = IGB_EEPROM_APME; | 1411 | u16 eeprom_apme_mask = IGB_EEPROM_APME; |
1219 | u32 part_num; | 1412 | u32 part_num; |
1220 | 1413 | ||
@@ -1291,8 +1484,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1291 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | 1484 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
1292 | hw->subsystem_device_id = pdev->subsystem_device; | 1485 | hw->subsystem_device_id = pdev->subsystem_device; |
1293 | 1486 | ||
1294 | /* setup the private structure */ | ||
1295 | hw->back = adapter; | ||
1296 | /* Copy the default MAC, PHY and NVM function pointers */ | 1487 | /* Copy the default MAC, PHY and NVM function pointers */ |
1297 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); | 1488 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); |
1298 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); | 1489 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); |
@@ -1302,46 +1493,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1302 | if (err) | 1493 | if (err) |
1303 | goto err_sw_init; | 1494 | goto err_sw_init; |
1304 | 1495 | ||
1305 | #ifdef CONFIG_PCI_IOV | ||
1306 | /* since iov functionality isn't critical to base device function we | ||
1307 | * can accept failure. If it fails we don't allow iov to be enabled */ | ||
1308 | if (hw->mac.type == e1000_82576) { | ||
1309 | /* 82576 supports a maximum of 7 VFs in addition to the PF */ | ||
1310 | unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs; | ||
1311 | int i; | ||
1312 | unsigned char mac_addr[ETH_ALEN]; | ||
1313 | |||
1314 | if (num_vfs) { | ||
1315 | adapter->vf_data = kcalloc(num_vfs, | ||
1316 | sizeof(struct vf_data_storage), | ||
1317 | GFP_KERNEL); | ||
1318 | if (!adapter->vf_data) { | ||
1319 | dev_err(&pdev->dev, | ||
1320 | "Could not allocate VF private data - " | ||
1321 | "IOV enable failed\n"); | ||
1322 | } else { | ||
1323 | err = pci_enable_sriov(pdev, num_vfs); | ||
1324 | if (!err) { | ||
1325 | adapter->vfs_allocated_count = num_vfs; | ||
1326 | dev_info(&pdev->dev, | ||
1327 | "%d vfs allocated\n", | ||
1328 | num_vfs); | ||
1329 | for (i = 0; | ||
1330 | i < adapter->vfs_allocated_count; | ||
1331 | i++) { | ||
1332 | random_ether_addr(mac_addr); | ||
1333 | igb_set_vf_mac(adapter, i, | ||
1334 | mac_addr); | ||
1335 | } | ||
1336 | } else { | ||
1337 | kfree(adapter->vf_data); | ||
1338 | adapter->vf_data = NULL; | ||
1339 | } | ||
1340 | } | ||
1341 | } | ||
1342 | } | ||
1343 | |||
1344 | #endif | ||
1345 | /* setup the private structure */ | 1496 | /* setup the private structure */ |
1346 | err = igb_sw_init(adapter); | 1497 | err = igb_sw_init(adapter); |
1347 | if (err) | 1498 | if (err) |
@@ -1349,18 +1500,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1349 | 1500 | ||
1350 | igb_get_bus_info_pcie(hw); | 1501 | igb_get_bus_info_pcie(hw); |
1351 | 1502 | ||
1352 | /* set flags */ | ||
1353 | switch (hw->mac.type) { | ||
1354 | case e1000_82575: | ||
1355 | adapter->flags |= IGB_FLAG_NEED_CTX_IDX; | ||
1356 | break; | ||
1357 | case e1000_82576: | ||
1358 | default: | ||
1359 | break; | ||
1360 | } | ||
1361 | |||
1362 | hw->phy.autoneg_wait_to_complete = false; | 1503 | hw->phy.autoneg_wait_to_complete = false; |
1363 | hw->mac.adaptive_ifs = true; | ||
1364 | 1504 | ||
1365 | /* Copper options */ | 1505 | /* Copper options */ |
1366 | if (hw->phy.media_type == e1000_media_type_copper) { | 1506 | if (hw->phy.media_type == e1000_media_type_copper) { |
@@ -1382,7 +1522,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1382 | netdev->features |= NETIF_F_IPV6_CSUM; | 1522 | netdev->features |= NETIF_F_IPV6_CSUM; |
1383 | netdev->features |= NETIF_F_TSO; | 1523 | netdev->features |= NETIF_F_TSO; |
1384 | netdev->features |= NETIF_F_TSO6; | 1524 | netdev->features |= NETIF_F_TSO6; |
1385 | |||
1386 | netdev->features |= NETIF_F_GRO; | 1525 | netdev->features |= NETIF_F_GRO; |
1387 | 1526 | ||
1388 | netdev->vlan_features |= NETIF_F_TSO; | 1527 | netdev->vlan_features |= NETIF_F_TSO; |
@@ -1394,10 +1533,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1394 | if (pci_using_dac) | 1533 | if (pci_using_dac) |
1395 | netdev->features |= NETIF_F_HIGHDMA; | 1534 | netdev->features |= NETIF_F_HIGHDMA; |
1396 | 1535 | ||
1397 | if (adapter->hw.mac.type == e1000_82576) | 1536 | if (hw->mac.type >= e1000_82576) |
1398 | netdev->features |= NETIF_F_SCTP_CSUM; | 1537 | netdev->features |= NETIF_F_SCTP_CSUM; |
1399 | 1538 | ||
1400 | adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); | 1539 | adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); |
1401 | 1540 | ||
1402 | /* before reading the NVM, reset the controller to put the device in a | 1541 | /* before reading the NVM, reset the controller to put the device in a |
1403 | * known good starting state */ | 1542 | * known good starting state */ |
@@ -1439,9 +1578,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1439 | hw->fc.requested_mode = e1000_fc_default; | 1578 | hw->fc.requested_mode = e1000_fc_default; |
1440 | hw->fc.current_mode = e1000_fc_default; | 1579 | hw->fc.current_mode = e1000_fc_default; |
1441 | 1580 | ||
1442 | adapter->itr_setting = IGB_DEFAULT_ITR; | ||
1443 | adapter->itr = IGB_START_ITR; | ||
1444 | |||
1445 | igb_validate_mdi_setting(hw); | 1581 | igb_validate_mdi_setting(hw); |
1446 | 1582 | ||
1447 | /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, | 1583 | /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, |
@@ -1450,6 +1586,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1450 | 1586 | ||
1451 | if (hw->bus.func == 0) | 1587 | if (hw->bus.func == 0) |
1452 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | 1588 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
1589 | else if (hw->mac.type == e1000_82580) | ||
1590 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + | ||
1591 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, | ||
1592 | &eeprom_data); | ||
1453 | else if (hw->bus.func == 1) | 1593 | else if (hw->bus.func == 1) |
1454 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 1594 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
1455 | 1595 | ||
@@ -1472,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1472 | adapter->eeprom_wol = 0; | 1612 | adapter->eeprom_wol = 0; |
1473 | break; | 1613 | break; |
1474 | case E1000_DEV_ID_82576_QUAD_COPPER: | 1614 | case E1000_DEV_ID_82576_QUAD_COPPER: |
1615 | case E1000_DEV_ID_82576_QUAD_COPPER_ET2: | ||
1475 | /* if quad port adapter, disable WoL on all but port A */ | 1616 | /* if quad port adapter, disable WoL on all but port A */ |
1476 | if (global_quad_port_a != 0) | 1617 | if (global_quad_port_a != 0) |
1477 | adapter->eeprom_wol = 0; | 1618 | adapter->eeprom_wol = 0; |
@@ -1508,66 +1649,14 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1508 | dev_info(&pdev->dev, "DCA enabled\n"); | 1649 | dev_info(&pdev->dev, "DCA enabled\n"); |
1509 | igb_setup_dca(adapter); | 1650 | igb_setup_dca(adapter); |
1510 | } | 1651 | } |
1511 | #endif | ||
1512 | |||
1513 | /* | ||
1514 | * Initialize hardware timer: we keep it running just in case | ||
1515 | * that some program needs it later on. | ||
1516 | */ | ||
1517 | memset(&adapter->cycles, 0, sizeof(adapter->cycles)); | ||
1518 | adapter->cycles.read = igb_read_clock; | ||
1519 | adapter->cycles.mask = CLOCKSOURCE_MASK(64); | ||
1520 | adapter->cycles.mult = 1; | ||
1521 | adapter->cycles.shift = IGB_TSYNC_SHIFT; | ||
1522 | wr32(E1000_TIMINCA, | ||
1523 | (1<<24) | | ||
1524 | IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE); | ||
1525 | #if 0 | ||
1526 | /* | ||
1527 | * Avoid rollover while we initialize by resetting the time counter. | ||
1528 | */ | ||
1529 | wr32(E1000_SYSTIML, 0x00000000); | ||
1530 | wr32(E1000_SYSTIMH, 0x00000000); | ||
1531 | #else | ||
1532 | /* | ||
1533 | * Set registers so that rollover occurs soon to test this. | ||
1534 | */ | ||
1535 | wr32(E1000_SYSTIML, 0x00000000); | ||
1536 | wr32(E1000_SYSTIMH, 0xFF800000); | ||
1537 | #endif | ||
1538 | wrfl(); | ||
1539 | timecounter_init(&adapter->clock, | ||
1540 | &adapter->cycles, | ||
1541 | ktime_to_ns(ktime_get_real())); | ||
1542 | |||
1543 | /* | ||
1544 | * Synchronize our NIC clock against system wall clock. NIC | ||
1545 | * time stamp reading requires ~3us per sample, each sample | ||
1546 | * was pretty stable even under load => only require 10 | ||
1547 | * samples for each offset comparison. | ||
1548 | */ | ||
1549 | memset(&adapter->compare, 0, sizeof(adapter->compare)); | ||
1550 | adapter->compare.source = &adapter->clock; | ||
1551 | adapter->compare.target = ktime_get_real; | ||
1552 | adapter->compare.num_samples = 10; | ||
1553 | timecompare_update(&adapter->compare, 0); | ||
1554 | 1652 | ||
1555 | #ifdef DEBUG | ||
1556 | { | ||
1557 | char buffer[160]; | ||
1558 | printk(KERN_DEBUG | ||
1559 | "igb: %s: hw %p initialized timer\n", | ||
1560 | igb_get_time_str(adapter, buffer), | ||
1561 | &adapter->hw); | ||
1562 | } | ||
1563 | #endif | 1653 | #endif |
1564 | |||
1565 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); | 1654 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); |
1566 | /* print bus type/speed/width info */ | 1655 | /* print bus type/speed/width info */ |
1567 | dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", | 1656 | dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", |
1568 | netdev->name, | 1657 | netdev->name, |
1569 | ((hw->bus.speed == e1000_bus_speed_2500) | 1658 | ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : |
1570 | ? "2.5Gb/s" : "unknown"), | 1659 | "unknown"), |
1571 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : | 1660 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : |
1572 | (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : | 1661 | (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : |
1573 | (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : | 1662 | (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : |
@@ -1594,15 +1683,14 @@ err_eeprom: | |||
1594 | 1683 | ||
1595 | if (hw->flash_address) | 1684 | if (hw->flash_address) |
1596 | iounmap(hw->flash_address); | 1685 | iounmap(hw->flash_address); |
1597 | |||
1598 | igb_free_queues(adapter); | ||
1599 | err_sw_init: | 1686 | err_sw_init: |
1687 | igb_clear_interrupt_scheme(adapter); | ||
1600 | iounmap(hw->hw_addr); | 1688 | iounmap(hw->hw_addr); |
1601 | err_ioremap: | 1689 | err_ioremap: |
1602 | free_netdev(netdev); | 1690 | free_netdev(netdev); |
1603 | err_alloc_etherdev: | 1691 | err_alloc_etherdev: |
1604 | pci_release_selected_regions(pdev, pci_select_bars(pdev, | 1692 | pci_release_selected_regions(pdev, |
1605 | IORESOURCE_MEM)); | 1693 | pci_select_bars(pdev, IORESOURCE_MEM)); |
1606 | err_pci_reg: | 1694 | err_pci_reg: |
1607 | err_dma: | 1695 | err_dma: |
1608 | pci_disable_device(pdev); | 1696 | pci_disable_device(pdev); |
@@ -1647,12 +1735,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1647 | 1735 | ||
1648 | unregister_netdev(netdev); | 1736 | unregister_netdev(netdev); |
1649 | 1737 | ||
1650 | if (!igb_check_reset_block(&adapter->hw)) | 1738 | igb_clear_interrupt_scheme(adapter); |
1651 | igb_reset_phy(&adapter->hw); | ||
1652 | |||
1653 | igb_reset_interrupt_capability(adapter); | ||
1654 | |||
1655 | igb_free_queues(adapter); | ||
1656 | 1739 | ||
1657 | #ifdef CONFIG_PCI_IOV | 1740 | #ifdef CONFIG_PCI_IOV |
1658 | /* reclaim resources allocated to VFs */ | 1741 | /* reclaim resources allocated to VFs */ |
@@ -1668,11 +1751,12 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1668 | dev_info(&pdev->dev, "IOV Disabled\n"); | 1751 | dev_info(&pdev->dev, "IOV Disabled\n"); |
1669 | } | 1752 | } |
1670 | #endif | 1753 | #endif |
1754 | |||
1671 | iounmap(hw->hw_addr); | 1755 | iounmap(hw->hw_addr); |
1672 | if (hw->flash_address) | 1756 | if (hw->flash_address) |
1673 | iounmap(hw->flash_address); | 1757 | iounmap(hw->flash_address); |
1674 | pci_release_selected_regions(pdev, pci_select_bars(pdev, | 1758 | pci_release_selected_regions(pdev, |
1675 | IORESOURCE_MEM)); | 1759 | pci_select_bars(pdev, IORESOURCE_MEM)); |
1676 | 1760 | ||
1677 | free_netdev(netdev); | 1761 | free_netdev(netdev); |
1678 | 1762 | ||
@@ -1682,6 +1766,160 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1682 | } | 1766 | } |
1683 | 1767 | ||
1684 | /** | 1768 | /** |
1769 | * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space | ||
1770 | * @adapter: board private structure to initialize | ||
1771 | * | ||
1772 | * This function initializes the vf specific data storage and then attempts to | ||
1773 | * allocate the VFs. The reason for ordering it this way is because it is much | ||
1774 | * mor expensive time wise to disable SR-IOV than it is to allocate and free | ||
1775 | * the memory for the VFs. | ||
1776 | **/ | ||
1777 | static void __devinit igb_probe_vfs(struct igb_adapter * adapter) | ||
1778 | { | ||
1779 | #ifdef CONFIG_PCI_IOV | ||
1780 | struct pci_dev *pdev = adapter->pdev; | ||
1781 | |||
1782 | if (adapter->vfs_allocated_count > 7) | ||
1783 | adapter->vfs_allocated_count = 7; | ||
1784 | |||
1785 | if (adapter->vfs_allocated_count) { | ||
1786 | adapter->vf_data = kcalloc(adapter->vfs_allocated_count, | ||
1787 | sizeof(struct vf_data_storage), | ||
1788 | GFP_KERNEL); | ||
1789 | /* if allocation failed then we do not support SR-IOV */ | ||
1790 | if (!adapter->vf_data) { | ||
1791 | adapter->vfs_allocated_count = 0; | ||
1792 | dev_err(&pdev->dev, "Unable to allocate memory for VF " | ||
1793 | "Data Storage\n"); | ||
1794 | } | ||
1795 | } | ||
1796 | |||
1797 | if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) { | ||
1798 | kfree(adapter->vf_data); | ||
1799 | adapter->vf_data = NULL; | ||
1800 | #endif /* CONFIG_PCI_IOV */ | ||
1801 | adapter->vfs_allocated_count = 0; | ||
1802 | #ifdef CONFIG_PCI_IOV | ||
1803 | } else { | ||
1804 | unsigned char mac_addr[ETH_ALEN]; | ||
1805 | int i; | ||
1806 | dev_info(&pdev->dev, "%d vfs allocated\n", | ||
1807 | adapter->vfs_allocated_count); | ||
1808 | for (i = 0; i < adapter->vfs_allocated_count; i++) { | ||
1809 | random_ether_addr(mac_addr); | ||
1810 | igb_set_vf_mac(adapter, i, mac_addr); | ||
1811 | } | ||
1812 | } | ||
1813 | #endif /* CONFIG_PCI_IOV */ | ||
1814 | } | ||
1815 | |||
1816 | |||
1817 | /** | ||
1818 | * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp | ||
1819 | * @adapter: board private structure to initialize | ||
1820 | * | ||
1821 | * igb_init_hw_timer initializes the function pointer and values for the hw | ||
1822 | * timer found in hardware. | ||
1823 | **/ | ||
1824 | static void igb_init_hw_timer(struct igb_adapter *adapter) | ||
1825 | { | ||
1826 | struct e1000_hw *hw = &adapter->hw; | ||
1827 | |||
1828 | switch (hw->mac.type) { | ||
1829 | case e1000_82580: | ||
1830 | memset(&adapter->cycles, 0, sizeof(adapter->cycles)); | ||
1831 | adapter->cycles.read = igb_read_clock; | ||
1832 | adapter->cycles.mask = CLOCKSOURCE_MASK(64); | ||
1833 | adapter->cycles.mult = 1; | ||
1834 | /* | ||
1835 | * The 82580 timesync updates the system timer every 8ns by 8ns | ||
1836 | * and the value cannot be shifted. Instead we need to shift | ||
1837 | * the registers to generate a 64bit timer value. As a result | ||
1838 | * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by | ||
1839 | * 24 in order to generate a larger value for synchronization. | ||
1840 | */ | ||
1841 | adapter->cycles.shift = IGB_82580_TSYNC_SHIFT; | ||
1842 | /* disable system timer temporarily by setting bit 31 */ | ||
1843 | wr32(E1000_TSAUXC, 0x80000000); | ||
1844 | wrfl(); | ||
1845 | |||
1846 | /* Set registers so that rollover occurs soon to test this. */ | ||
1847 | wr32(E1000_SYSTIMR, 0x00000000); | ||
1848 | wr32(E1000_SYSTIML, 0x80000000); | ||
1849 | wr32(E1000_SYSTIMH, 0x000000FF); | ||
1850 | wrfl(); | ||
1851 | |||
1852 | /* enable system timer by clearing bit 31 */ | ||
1853 | wr32(E1000_TSAUXC, 0x0); | ||
1854 | wrfl(); | ||
1855 | |||
1856 | timecounter_init(&adapter->clock, | ||
1857 | &adapter->cycles, | ||
1858 | ktime_to_ns(ktime_get_real())); | ||
1859 | /* | ||
1860 | * Synchronize our NIC clock against system wall clock. NIC | ||
1861 | * time stamp reading requires ~3us per sample, each sample | ||
1862 | * was pretty stable even under load => only require 10 | ||
1863 | * samples for each offset comparison. | ||
1864 | */ | ||
1865 | memset(&adapter->compare, 0, sizeof(adapter->compare)); | ||
1866 | adapter->compare.source = &adapter->clock; | ||
1867 | adapter->compare.target = ktime_get_real; | ||
1868 | adapter->compare.num_samples = 10; | ||
1869 | timecompare_update(&adapter->compare, 0); | ||
1870 | break; | ||
1871 | case e1000_82576: | ||
1872 | /* | ||
1873 | * Initialize hardware timer: we keep it running just in case | ||
1874 | * that some program needs it later on. | ||
1875 | */ | ||
1876 | memset(&adapter->cycles, 0, sizeof(adapter->cycles)); | ||
1877 | adapter->cycles.read = igb_read_clock; | ||
1878 | adapter->cycles.mask = CLOCKSOURCE_MASK(64); | ||
1879 | adapter->cycles.mult = 1; | ||
1880 | /** | ||
1881 | * Scale the NIC clock cycle by a large factor so that | ||
1882 | * relatively small clock corrections can be added or | ||
1883 | * substracted at each clock tick. The drawbacks of a large | ||
1884 | * factor are a) that the clock register overflows more quickly | ||
1885 | * (not such a big deal) and b) that the increment per tick has | ||
1886 | * to fit into 24 bits. As a result we need to use a shift of | ||
1887 | * 19 so we can fit a value of 16 into the TIMINCA register. | ||
1888 | */ | ||
1889 | adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; | ||
1890 | wr32(E1000_TIMINCA, | ||
1891 | (1 << E1000_TIMINCA_16NS_SHIFT) | | ||
1892 | (16 << IGB_82576_TSYNC_SHIFT)); | ||
1893 | |||
1894 | /* Set registers so that rollover occurs soon to test this. */ | ||
1895 | wr32(E1000_SYSTIML, 0x00000000); | ||
1896 | wr32(E1000_SYSTIMH, 0xFF800000); | ||
1897 | wrfl(); | ||
1898 | |||
1899 | timecounter_init(&adapter->clock, | ||
1900 | &adapter->cycles, | ||
1901 | ktime_to_ns(ktime_get_real())); | ||
1902 | /* | ||
1903 | * Synchronize our NIC clock against system wall clock. NIC | ||
1904 | * time stamp reading requires ~3us per sample, each sample | ||
1905 | * was pretty stable even under load => only require 10 | ||
1906 | * samples for each offset comparison. | ||
1907 | */ | ||
1908 | memset(&adapter->compare, 0, sizeof(adapter->compare)); | ||
1909 | adapter->compare.source = &adapter->clock; | ||
1910 | adapter->compare.target = ktime_get_real; | ||
1911 | adapter->compare.num_samples = 10; | ||
1912 | timecompare_update(&adapter->compare, 0); | ||
1913 | break; | ||
1914 | case e1000_82575: | ||
1915 | /* 82575 does not support timesync */ | ||
1916 | default: | ||
1917 | break; | ||
1918 | } | ||
1919 | |||
1920 | } | ||
1921 | |||
1922 | /** | ||
1685 | * igb_sw_init - Initialize general software structures (struct igb_adapter) | 1923 | * igb_sw_init - Initialize general software structures (struct igb_adapter) |
1686 | * @adapter: board private structure to initialize | 1924 | * @adapter: board private structure to initialize |
1687 | * | 1925 | * |
@@ -1699,20 +1937,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) | |||
1699 | 1937 | ||
1700 | adapter->tx_ring_count = IGB_DEFAULT_TXD; | 1938 | adapter->tx_ring_count = IGB_DEFAULT_TXD; |
1701 | adapter->rx_ring_count = IGB_DEFAULT_RXD; | 1939 | adapter->rx_ring_count = IGB_DEFAULT_RXD; |
1702 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 1940 | adapter->rx_itr_setting = IGB_DEFAULT_ITR; |
1703 | adapter->rx_ps_hdr_size = 0; /* disable packet split */ | 1941 | adapter->tx_itr_setting = IGB_DEFAULT_ITR; |
1942 | |||
1704 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1943 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
1705 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | 1944 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
1706 | 1945 | ||
1707 | /* This call may decrease the number of queues depending on | 1946 | #ifdef CONFIG_PCI_IOV |
1708 | * interrupt mode. */ | 1947 | if (hw->mac.type == e1000_82576) |
1709 | igb_set_interrupt_capability(adapter); | 1948 | adapter->vfs_allocated_count = max_vfs; |
1949 | |||
1950 | #endif /* CONFIG_PCI_IOV */ | ||
1951 | adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); | ||
1952 | |||
1953 | /* | ||
1954 | * if rss_queues > 4 or vfs are going to be allocated with rss_queues | ||
1955 | * then we should combine the queues into a queue pair in order to | ||
1956 | * conserve interrupts due to limited supply | ||
1957 | */ | ||
1958 | if ((adapter->rss_queues > 4) || | ||
1959 | ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) | ||
1960 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; | ||
1710 | 1961 | ||
1711 | if (igb_alloc_queues(adapter)) { | 1962 | /* This call may decrease the number of queues */ |
1963 | if (igb_init_interrupt_scheme(adapter)) { | ||
1712 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | 1964 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
1713 | return -ENOMEM; | 1965 | return -ENOMEM; |
1714 | } | 1966 | } |
1715 | 1967 | ||
1968 | igb_init_hw_timer(adapter); | ||
1969 | igb_probe_vfs(adapter); | ||
1970 | |||
1716 | /* Explicitly disable IRQ since the NIC can be in any state. */ | 1971 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
1717 | igb_irq_disable(adapter); | 1972 | igb_irq_disable(adapter); |
1718 | 1973 | ||
@@ -1755,12 +2010,7 @@ static int igb_open(struct net_device *netdev) | |||
1755 | if (err) | 2010 | if (err) |
1756 | goto err_setup_rx; | 2011 | goto err_setup_rx; |
1757 | 2012 | ||
1758 | /* e1000_power_up_phy(adapter); */ | 2013 | igb_power_up_link(adapter); |
1759 | |||
1760 | adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; | ||
1761 | if ((adapter->hw.mng_cookie.status & | ||
1762 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) | ||
1763 | igb_update_mng_vlan(adapter); | ||
1764 | 2014 | ||
1765 | /* before we allocate an interrupt, we must be ready to handle it. | 2015 | /* before we allocate an interrupt, we must be ready to handle it. |
1766 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 2016 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
@@ -1768,10 +2018,6 @@ static int igb_open(struct net_device *netdev) | |||
1768 | * clean_rx handler before we do so. */ | 2018 | * clean_rx handler before we do so. */ |
1769 | igb_configure(adapter); | 2019 | igb_configure(adapter); |
1770 | 2020 | ||
1771 | igb_vmm_control(adapter); | ||
1772 | igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); | ||
1773 | igb_set_vmolr(hw, adapter->vfs_allocated_count); | ||
1774 | |||
1775 | err = igb_request_irq(adapter); | 2021 | err = igb_request_irq(adapter); |
1776 | if (err) | 2022 | if (err) |
1777 | goto err_req_irq; | 2023 | goto err_req_irq; |
@@ -1779,24 +2025,34 @@ static int igb_open(struct net_device *netdev) | |||
1779 | /* From here on the code is the same as igb_up() */ | 2025 | /* From here on the code is the same as igb_up() */ |
1780 | clear_bit(__IGB_DOWN, &adapter->state); | 2026 | clear_bit(__IGB_DOWN, &adapter->state); |
1781 | 2027 | ||
1782 | for (i = 0; i < adapter->num_rx_queues; i++) | 2028 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1783 | napi_enable(&adapter->rx_ring[i].napi); | 2029 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
2030 | napi_enable(&q_vector->napi); | ||
2031 | } | ||
1784 | 2032 | ||
1785 | /* Clear any pending interrupts. */ | 2033 | /* Clear any pending interrupts. */ |
1786 | rd32(E1000_ICR); | 2034 | rd32(E1000_ICR); |
1787 | 2035 | ||
1788 | igb_irq_enable(adapter); | 2036 | igb_irq_enable(adapter); |
1789 | 2037 | ||
2038 | /* notify VFs that reset has been completed */ | ||
2039 | if (adapter->vfs_allocated_count) { | ||
2040 | u32 reg_data = rd32(E1000_CTRL_EXT); | ||
2041 | reg_data |= E1000_CTRL_EXT_PFRSTD; | ||
2042 | wr32(E1000_CTRL_EXT, reg_data); | ||
2043 | } | ||
2044 | |||
1790 | netif_tx_start_all_queues(netdev); | 2045 | netif_tx_start_all_queues(netdev); |
1791 | 2046 | ||
1792 | /* Fire a link status change interrupt to start the watchdog. */ | 2047 | /* start the watchdog. */ |
1793 | wr32(E1000_ICS, E1000_ICS_LSC); | 2048 | hw->mac.get_link_status = 1; |
2049 | schedule_work(&adapter->watchdog_task); | ||
1794 | 2050 | ||
1795 | return 0; | 2051 | return 0; |
1796 | 2052 | ||
1797 | err_req_irq: | 2053 | err_req_irq: |
1798 | igb_release_hw_control(adapter); | 2054 | igb_release_hw_control(adapter); |
1799 | /* e1000_power_down_phy(adapter); */ | 2055 | igb_power_down_link(adapter); |
1800 | igb_free_all_rx_resources(adapter); | 2056 | igb_free_all_rx_resources(adapter); |
1801 | err_setup_rx: | 2057 | err_setup_rx: |
1802 | igb_free_all_tx_resources(adapter); | 2058 | igb_free_all_tx_resources(adapter); |
@@ -1829,28 +2085,18 @@ static int igb_close(struct net_device *netdev) | |||
1829 | igb_free_all_tx_resources(adapter); | 2085 | igb_free_all_tx_resources(adapter); |
1830 | igb_free_all_rx_resources(adapter); | 2086 | igb_free_all_rx_resources(adapter); |
1831 | 2087 | ||
1832 | /* kill manageability vlan ID if supported, but not if a vlan with | ||
1833 | * the same ID is registered on the host OS (let 8021q kill it) */ | ||
1834 | if ((adapter->hw.mng_cookie.status & | ||
1835 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | ||
1836 | !(adapter->vlgrp && | ||
1837 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) | ||
1838 | igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | ||
1839 | |||
1840 | return 0; | 2088 | return 0; |
1841 | } | 2089 | } |
1842 | 2090 | ||
1843 | /** | 2091 | /** |
1844 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) | 2092 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) |
1845 | * @adapter: board private structure | ||
1846 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | 2093 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
1847 | * | 2094 | * |
1848 | * Return 0 on success, negative on failure | 2095 | * Return 0 on success, negative on failure |
1849 | **/ | 2096 | **/ |
1850 | int igb_setup_tx_resources(struct igb_adapter *adapter, | 2097 | int igb_setup_tx_resources(struct igb_ring *tx_ring) |
1851 | struct igb_ring *tx_ring) | ||
1852 | { | 2098 | { |
1853 | struct pci_dev *pdev = adapter->pdev; | 2099 | struct pci_dev *pdev = tx_ring->pdev; |
1854 | int size; | 2100 | int size; |
1855 | 2101 | ||
1856 | size = sizeof(struct igb_buffer) * tx_ring->count; | 2102 | size = sizeof(struct igb_buffer) * tx_ring->count; |
@@ -1863,20 +2109,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter, | |||
1863 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); | 2109 | tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); |
1864 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 2110 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
1865 | 2111 | ||
1866 | tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | 2112 | tx_ring->desc = pci_alloc_consistent(pdev, |
2113 | tx_ring->size, | ||
1867 | &tx_ring->dma); | 2114 | &tx_ring->dma); |
1868 | 2115 | ||
1869 | if (!tx_ring->desc) | 2116 | if (!tx_ring->desc) |
1870 | goto err; | 2117 | goto err; |
1871 | 2118 | ||
1872 | tx_ring->adapter = adapter; | ||
1873 | tx_ring->next_to_use = 0; | 2119 | tx_ring->next_to_use = 0; |
1874 | tx_ring->next_to_clean = 0; | 2120 | tx_ring->next_to_clean = 0; |
1875 | return 0; | 2121 | return 0; |
1876 | 2122 | ||
1877 | err: | 2123 | err: |
1878 | vfree(tx_ring->buffer_info); | 2124 | vfree(tx_ring->buffer_info); |
1879 | dev_err(&adapter->pdev->dev, | 2125 | dev_err(&pdev->dev, |
1880 | "Unable to allocate memory for the transmit descriptor ring\n"); | 2126 | "Unable to allocate memory for the transmit descriptor ring\n"); |
1881 | return -ENOMEM; | 2127 | return -ENOMEM; |
1882 | } | 2128 | } |
@@ -1890,71 +2136,38 @@ err: | |||
1890 | **/ | 2136 | **/ |
1891 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | 2137 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) |
1892 | { | 2138 | { |
2139 | struct pci_dev *pdev = adapter->pdev; | ||
1893 | int i, err = 0; | 2140 | int i, err = 0; |
1894 | int r_idx; | ||
1895 | 2141 | ||
1896 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2142 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1897 | err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 2143 | err = igb_setup_tx_resources(adapter->tx_ring[i]); |
1898 | if (err) { | 2144 | if (err) { |
1899 | dev_err(&adapter->pdev->dev, | 2145 | dev_err(&pdev->dev, |
1900 | "Allocation for Tx Queue %u failed\n", i); | 2146 | "Allocation for Tx Queue %u failed\n", i); |
1901 | for (i--; i >= 0; i--) | 2147 | for (i--; i >= 0; i--) |
1902 | igb_free_tx_resources(&adapter->tx_ring[i]); | 2148 | igb_free_tx_resources(adapter->tx_ring[i]); |
1903 | break; | 2149 | break; |
1904 | } | 2150 | } |
1905 | } | 2151 | } |
1906 | 2152 | ||
1907 | for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { | 2153 | for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { |
1908 | r_idx = i % adapter->num_tx_queues; | 2154 | int r_idx = i % adapter->num_tx_queues; |
1909 | adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; | 2155 | adapter->multi_tx_table[i] = adapter->tx_ring[r_idx]; |
1910 | } | 2156 | } |
1911 | return err; | 2157 | return err; |
1912 | } | 2158 | } |
1913 | 2159 | ||
1914 | /** | 2160 | /** |
1915 | * igb_configure_tx - Configure transmit Unit after Reset | 2161 | * igb_setup_tctl - configure the transmit control registers |
1916 | * @adapter: board private structure | 2162 | * @adapter: Board private structure |
1917 | * | ||
1918 | * Configure the Tx unit of the MAC after a reset. | ||
1919 | **/ | 2163 | **/ |
1920 | static void igb_configure_tx(struct igb_adapter *adapter) | 2164 | void igb_setup_tctl(struct igb_adapter *adapter) |
1921 | { | 2165 | { |
1922 | u64 tdba; | ||
1923 | struct e1000_hw *hw = &adapter->hw; | 2166 | struct e1000_hw *hw = &adapter->hw; |
1924 | u32 tctl; | 2167 | u32 tctl; |
1925 | u32 txdctl, txctrl; | ||
1926 | int i, j; | ||
1927 | |||
1928 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1929 | struct igb_ring *ring = &adapter->tx_ring[i]; | ||
1930 | j = ring->reg_idx; | ||
1931 | wr32(E1000_TDLEN(j), | ||
1932 | ring->count * sizeof(union e1000_adv_tx_desc)); | ||
1933 | tdba = ring->dma; | ||
1934 | wr32(E1000_TDBAL(j), | ||
1935 | tdba & 0x00000000ffffffffULL); | ||
1936 | wr32(E1000_TDBAH(j), tdba >> 32); | ||
1937 | |||
1938 | ring->head = E1000_TDH(j); | ||
1939 | ring->tail = E1000_TDT(j); | ||
1940 | writel(0, hw->hw_addr + ring->tail); | ||
1941 | writel(0, hw->hw_addr + ring->head); | ||
1942 | txdctl = rd32(E1000_TXDCTL(j)); | ||
1943 | txdctl |= E1000_TXDCTL_QUEUE_ENABLE; | ||
1944 | wr32(E1000_TXDCTL(j), txdctl); | ||
1945 | |||
1946 | /* Turn off Relaxed Ordering on head write-backs. The | ||
1947 | * writebacks MUST be delivered in order or it will | ||
1948 | * completely screw up our bookeeping. | ||
1949 | */ | ||
1950 | txctrl = rd32(E1000_DCA_TXCTRL(j)); | ||
1951 | txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; | ||
1952 | wr32(E1000_DCA_TXCTRL(j), txctrl); | ||
1953 | } | ||
1954 | 2168 | ||
1955 | /* disable queue 0 to prevent tail bump w/o re-configuration */ | 2169 | /* disable queue 0 which is enabled by default on 82575 and 82576 */ |
1956 | if (adapter->vfs_allocated_count) | 2170 | wr32(E1000_TXDCTL(0), 0); |
1957 | wr32(E1000_TXDCTL(0), 0); | ||
1958 | 2171 | ||
1959 | /* Program the Transmit Control Register */ | 2172 | /* Program the Transmit Control Register */ |
1960 | tctl = rd32(E1000_TCTL); | 2173 | tctl = rd32(E1000_TCTL); |
@@ -1964,9 +2177,6 @@ static void igb_configure_tx(struct igb_adapter *adapter) | |||
1964 | 2177 | ||
1965 | igb_config_collision_dist(hw); | 2178 | igb_config_collision_dist(hw); |
1966 | 2179 | ||
1967 | /* Setup Transmit Descriptor Settings for eop descriptor */ | ||
1968 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS; | ||
1969 | |||
1970 | /* Enable transmits */ | 2180 | /* Enable transmits */ |
1971 | tctl |= E1000_TCTL_EN; | 2181 | tctl |= E1000_TCTL_EN; |
1972 | 2182 | ||
@@ -1974,16 +2184,69 @@ static void igb_configure_tx(struct igb_adapter *adapter) | |||
1974 | } | 2184 | } |
1975 | 2185 | ||
1976 | /** | 2186 | /** |
1977 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) | 2187 | * igb_configure_tx_ring - Configure transmit ring after Reset |
1978 | * @adapter: board private structure | 2188 | * @adapter: board private structure |
2189 | * @ring: tx ring to configure | ||
2190 | * | ||
2191 | * Configure a transmit ring after a reset. | ||
2192 | **/ | ||
2193 | void igb_configure_tx_ring(struct igb_adapter *adapter, | ||
2194 | struct igb_ring *ring) | ||
2195 | { | ||
2196 | struct e1000_hw *hw = &adapter->hw; | ||
2197 | u32 txdctl; | ||
2198 | u64 tdba = ring->dma; | ||
2199 | int reg_idx = ring->reg_idx; | ||
2200 | |||
2201 | /* disable the queue */ | ||
2202 | txdctl = rd32(E1000_TXDCTL(reg_idx)); | ||
2203 | wr32(E1000_TXDCTL(reg_idx), | ||
2204 | txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); | ||
2205 | wrfl(); | ||
2206 | mdelay(10); | ||
2207 | |||
2208 | wr32(E1000_TDLEN(reg_idx), | ||
2209 | ring->count * sizeof(union e1000_adv_tx_desc)); | ||
2210 | wr32(E1000_TDBAL(reg_idx), | ||
2211 | tdba & 0x00000000ffffffffULL); | ||
2212 | wr32(E1000_TDBAH(reg_idx), tdba >> 32); | ||
2213 | |||
2214 | ring->head = hw->hw_addr + E1000_TDH(reg_idx); | ||
2215 | ring->tail = hw->hw_addr + E1000_TDT(reg_idx); | ||
2216 | writel(0, ring->head); | ||
2217 | writel(0, ring->tail); | ||
2218 | |||
2219 | txdctl |= IGB_TX_PTHRESH; | ||
2220 | txdctl |= IGB_TX_HTHRESH << 8; | ||
2221 | txdctl |= IGB_TX_WTHRESH << 16; | ||
2222 | |||
2223 | txdctl |= E1000_TXDCTL_QUEUE_ENABLE; | ||
2224 | wr32(E1000_TXDCTL(reg_idx), txdctl); | ||
2225 | } | ||
2226 | |||
2227 | /** | ||
2228 | * igb_configure_tx - Configure transmit Unit after Reset | ||
2229 | * @adapter: board private structure | ||
2230 | * | ||
2231 | * Configure the Tx unit of the MAC after a reset. | ||
2232 | **/ | ||
2233 | static void igb_configure_tx(struct igb_adapter *adapter) | ||
2234 | { | ||
2235 | int i; | ||
2236 | |||
2237 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2238 | igb_configure_tx_ring(adapter, adapter->tx_ring[i]); | ||
2239 | } | ||
2240 | |||
2241 | /** | ||
2242 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) | ||
1979 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 2243 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
1980 | * | 2244 | * |
1981 | * Returns 0 on success, negative on failure | 2245 | * Returns 0 on success, negative on failure |
1982 | **/ | 2246 | **/ |
1983 | int igb_setup_rx_resources(struct igb_adapter *adapter, | 2247 | int igb_setup_rx_resources(struct igb_ring *rx_ring) |
1984 | struct igb_ring *rx_ring) | ||
1985 | { | 2248 | { |
1986 | struct pci_dev *pdev = adapter->pdev; | 2249 | struct pci_dev *pdev = rx_ring->pdev; |
1987 | int size, desc_len; | 2250 | int size, desc_len; |
1988 | 2251 | ||
1989 | size = sizeof(struct igb_buffer) * rx_ring->count; | 2252 | size = sizeof(struct igb_buffer) * rx_ring->count; |
@@ -2007,13 +2270,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, | |||
2007 | rx_ring->next_to_clean = 0; | 2270 | rx_ring->next_to_clean = 0; |
2008 | rx_ring->next_to_use = 0; | 2271 | rx_ring->next_to_use = 0; |
2009 | 2272 | ||
2010 | rx_ring->adapter = adapter; | ||
2011 | |||
2012 | return 0; | 2273 | return 0; |
2013 | 2274 | ||
2014 | err: | 2275 | err: |
2015 | vfree(rx_ring->buffer_info); | 2276 | vfree(rx_ring->buffer_info); |
2016 | dev_err(&adapter->pdev->dev, "Unable to allocate memory for " | 2277 | rx_ring->buffer_info = NULL; |
2278 | dev_err(&pdev->dev, "Unable to allocate memory for " | ||
2017 | "the receive descriptor ring\n"); | 2279 | "the receive descriptor ring\n"); |
2018 | return -ENOMEM; | 2280 | return -ENOMEM; |
2019 | } | 2281 | } |
@@ -2027,15 +2289,16 @@ err: | |||
2027 | **/ | 2289 | **/ |
2028 | static int igb_setup_all_rx_resources(struct igb_adapter *adapter) | 2290 | static int igb_setup_all_rx_resources(struct igb_adapter *adapter) |
2029 | { | 2291 | { |
2292 | struct pci_dev *pdev = adapter->pdev; | ||
2030 | int i, err = 0; | 2293 | int i, err = 0; |
2031 | 2294 | ||
2032 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2295 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2033 | err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 2296 | err = igb_setup_rx_resources(adapter->rx_ring[i]); |
2034 | if (err) { | 2297 | if (err) { |
2035 | dev_err(&adapter->pdev->dev, | 2298 | dev_err(&pdev->dev, |
2036 | "Allocation for Rx Queue %u failed\n", i); | 2299 | "Allocation for Rx Queue %u failed\n", i); |
2037 | for (i--; i >= 0; i--) | 2300 | for (i--; i >= 0; i--) |
2038 | igb_free_rx_resources(&adapter->rx_ring[i]); | 2301 | igb_free_rx_resources(adapter->rx_ring[i]); |
2039 | break; | 2302 | break; |
2040 | } | 2303 | } |
2041 | } | 2304 | } |
@@ -2044,15 +2307,122 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) | |||
2044 | } | 2307 | } |
2045 | 2308 | ||
2046 | /** | 2309 | /** |
2310 | * igb_setup_mrqc - configure the multiple receive queue control registers | ||
2311 | * @adapter: Board private structure | ||
2312 | **/ | ||
2313 | static void igb_setup_mrqc(struct igb_adapter *adapter) | ||
2314 | { | ||
2315 | struct e1000_hw *hw = &adapter->hw; | ||
2316 | u32 mrqc, rxcsum; | ||
2317 | u32 j, num_rx_queues, shift = 0, shift2 = 0; | ||
2318 | union e1000_reta { | ||
2319 | u32 dword; | ||
2320 | u8 bytes[4]; | ||
2321 | } reta; | ||
2322 | static const u8 rsshash[40] = { | ||
2323 | 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, | ||
2324 | 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, | ||
2325 | 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, | ||
2326 | 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; | ||
2327 | |||
2328 | /* Fill out hash function seeds */ | ||
2329 | for (j = 0; j < 10; j++) { | ||
2330 | u32 rsskey = rsshash[(j * 4)]; | ||
2331 | rsskey |= rsshash[(j * 4) + 1] << 8; | ||
2332 | rsskey |= rsshash[(j * 4) + 2] << 16; | ||
2333 | rsskey |= rsshash[(j * 4) + 3] << 24; | ||
2334 | array_wr32(E1000_RSSRK(0), j, rsskey); | ||
2335 | } | ||
2336 | |||
2337 | num_rx_queues = adapter->rss_queues; | ||
2338 | |||
2339 | if (adapter->vfs_allocated_count) { | ||
2340 | /* 82575 and 82576 supports 2 RSS queues for VMDq */ | ||
2341 | switch (hw->mac.type) { | ||
2342 | case e1000_82580: | ||
2343 | num_rx_queues = 1; | ||
2344 | shift = 0; | ||
2345 | break; | ||
2346 | case e1000_82576: | ||
2347 | shift = 3; | ||
2348 | num_rx_queues = 2; | ||
2349 | break; | ||
2350 | case e1000_82575: | ||
2351 | shift = 2; | ||
2352 | shift2 = 6; | ||
2353 | default: | ||
2354 | break; | ||
2355 | } | ||
2356 | } else { | ||
2357 | if (hw->mac.type == e1000_82575) | ||
2358 | shift = 6; | ||
2359 | } | ||
2360 | |||
2361 | for (j = 0; j < (32 * 4); j++) { | ||
2362 | reta.bytes[j & 3] = (j % num_rx_queues) << shift; | ||
2363 | if (shift2) | ||
2364 | reta.bytes[j & 3] |= num_rx_queues << shift2; | ||
2365 | if ((j & 3) == 3) | ||
2366 | wr32(E1000_RETA(j >> 2), reta.dword); | ||
2367 | } | ||
2368 | |||
2369 | /* | ||
2370 | * Disable raw packet checksumming so that RSS hash is placed in | ||
2371 | * descriptor on writeback. No need to enable TCP/UDP/IP checksum | ||
2372 | * offloads as they are enabled by default | ||
2373 | */ | ||
2374 | rxcsum = rd32(E1000_RXCSUM); | ||
2375 | rxcsum |= E1000_RXCSUM_PCSD; | ||
2376 | |||
2377 | if (adapter->hw.mac.type >= e1000_82576) | ||
2378 | /* Enable Receive Checksum Offload for SCTP */ | ||
2379 | rxcsum |= E1000_RXCSUM_CRCOFL; | ||
2380 | |||
2381 | /* Don't need to set TUOFL or IPOFL, they default to 1 */ | ||
2382 | wr32(E1000_RXCSUM, rxcsum); | ||
2383 | |||
2384 | /* If VMDq is enabled then we set the appropriate mode for that, else | ||
2385 | * we default to RSS so that an RSS hash is calculated per packet even | ||
2386 | * if we are only using one queue */ | ||
2387 | if (adapter->vfs_allocated_count) { | ||
2388 | if (hw->mac.type > e1000_82575) { | ||
2389 | /* Set the default pool for the PF's first queue */ | ||
2390 | u32 vtctl = rd32(E1000_VT_CTL); | ||
2391 | vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | | ||
2392 | E1000_VT_CTL_DISABLE_DEF_POOL); | ||
2393 | vtctl |= adapter->vfs_allocated_count << | ||
2394 | E1000_VT_CTL_DEFAULT_POOL_SHIFT; | ||
2395 | wr32(E1000_VT_CTL, vtctl); | ||
2396 | } | ||
2397 | if (adapter->rss_queues > 1) | ||
2398 | mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; | ||
2399 | else | ||
2400 | mrqc = E1000_MRQC_ENABLE_VMDQ; | ||
2401 | } else { | ||
2402 | mrqc = E1000_MRQC_ENABLE_RSS_4Q; | ||
2403 | } | ||
2404 | igb_vmm_control(adapter); | ||
2405 | |||
2406 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
2407 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
2408 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | | ||
2409 | E1000_MRQC_RSS_FIELD_IPV6_TCP); | ||
2410 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | | ||
2411 | E1000_MRQC_RSS_FIELD_IPV6_UDP); | ||
2412 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | | ||
2413 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); | ||
2414 | |||
2415 | wr32(E1000_MRQC, mrqc); | ||
2416 | } | ||
2417 | |||
2418 | /** | ||
2047 | * igb_setup_rctl - configure the receive control registers | 2419 | * igb_setup_rctl - configure the receive control registers |
2048 | * @adapter: Board private structure | 2420 | * @adapter: Board private structure |
2049 | **/ | 2421 | **/ |
2050 | static void igb_setup_rctl(struct igb_adapter *adapter) | 2422 | void igb_setup_rctl(struct igb_adapter *adapter) |
2051 | { | 2423 | { |
2052 | struct e1000_hw *hw = &adapter->hw; | 2424 | struct e1000_hw *hw = &adapter->hw; |
2053 | u32 rctl; | 2425 | u32 rctl; |
2054 | u32 srrctl = 0; | ||
2055 | int i; | ||
2056 | 2426 | ||
2057 | rctl = rd32(E1000_RCTL); | 2427 | rctl = rd32(E1000_RCTL); |
2058 | 2428 | ||
@@ -2069,75 +2439,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter) | |||
2069 | */ | 2439 | */ |
2070 | rctl |= E1000_RCTL_SECRC; | 2440 | rctl |= E1000_RCTL_SECRC; |
2071 | 2441 | ||
2072 | /* | 2442 | /* disable store bad packets and clear size bits. */ |
2073 | * disable store bad packets and clear size bits. | ||
2074 | */ | ||
2075 | rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); | 2443 | rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); |
2076 | 2444 | ||
2077 | /* enable LPE when to prevent packets larger than max_frame_size */ | 2445 | /* enable LPE to prevent packets larger than max_frame_size */ |
2078 | rctl |= E1000_RCTL_LPE; | 2446 | rctl |= E1000_RCTL_LPE; |
2079 | 2447 | ||
2080 | /* Setup buffer sizes */ | 2448 | /* disable queue 0 to prevent tail write w/o re-config */ |
2081 | switch (adapter->rx_buffer_len) { | 2449 | wr32(E1000_RXDCTL(0), 0); |
2082 | case IGB_RXBUFFER_256: | ||
2083 | rctl |= E1000_RCTL_SZ_256; | ||
2084 | break; | ||
2085 | case IGB_RXBUFFER_512: | ||
2086 | rctl |= E1000_RCTL_SZ_512; | ||
2087 | break; | ||
2088 | default: | ||
2089 | srrctl = ALIGN(adapter->rx_buffer_len, 1024) | ||
2090 | >> E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
2091 | break; | ||
2092 | } | ||
2093 | |||
2094 | /* 82575 and greater support packet-split where the protocol | ||
2095 | * header is placed in skb->data and the packet data is | ||
2096 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. | ||
2097 | * In the case of a non-split, skb->data is linearly filled, | ||
2098 | * followed by the page buffers. Therefore, skb->data is | ||
2099 | * sized to hold the largest protocol header. | ||
2100 | */ | ||
2101 | /* allocations using alloc_page take too long for regular MTU | ||
2102 | * so only enable packet split for jumbo frames */ | ||
2103 | if (adapter->netdev->mtu > ETH_DATA_LEN) { | ||
2104 | adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; | ||
2105 | srrctl |= adapter->rx_ps_hdr_size << | ||
2106 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | ||
2107 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | ||
2108 | } else { | ||
2109 | adapter->rx_ps_hdr_size = 0; | ||
2110 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; | ||
2111 | } | ||
2112 | 2450 | ||
2113 | /* Attention!!! For SR-IOV PF driver operations you must enable | 2451 | /* Attention!!! For SR-IOV PF driver operations you must enable |
2114 | * queue drop for all VF and PF queues to prevent head of line blocking | 2452 | * queue drop for all VF and PF queues to prevent head of line blocking |
2115 | * if an un-trusted VF does not provide descriptors to hardware. | 2453 | * if an un-trusted VF does not provide descriptors to hardware. |
2116 | */ | 2454 | */ |
2117 | if (adapter->vfs_allocated_count) { | 2455 | if (adapter->vfs_allocated_count) { |
2118 | u32 vmolr; | ||
2119 | |||
2120 | /* set all queue drop enable bits */ | 2456 | /* set all queue drop enable bits */ |
2121 | wr32(E1000_QDE, ALL_QUEUES); | 2457 | wr32(E1000_QDE, ALL_QUEUES); |
2122 | srrctl |= E1000_SRRCTL_DROP_EN; | 2458 | } |
2123 | 2459 | ||
2124 | /* disable queue 0 to prevent tail write w/o re-config */ | 2460 | wr32(E1000_RCTL, rctl); |
2125 | wr32(E1000_RXDCTL(0), 0); | 2461 | } |
2126 | 2462 | ||
2127 | vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); | 2463 | static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, |
2128 | if (rctl & E1000_RCTL_LPE) | 2464 | int vfn) |
2129 | vmolr |= E1000_VMOLR_LPE; | 2465 | { |
2130 | if (adapter->num_rx_queues > 1) | 2466 | struct e1000_hw *hw = &adapter->hw; |
2131 | vmolr |= E1000_VMOLR_RSSE; | 2467 | u32 vmolr; |
2132 | wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); | ||
2133 | } | ||
2134 | 2468 | ||
2135 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2469 | /* if it isn't the PF check to see if VFs are enabled and |
2136 | int j = adapter->rx_ring[i].reg_idx; | 2470 | * increase the size to support vlan tags */ |
2137 | wr32(E1000_SRRCTL(j), srrctl); | 2471 | if (vfn < adapter->vfs_allocated_count && |
2138 | } | 2472 | adapter->vf_data[vfn].vlans_enabled) |
2473 | size += VLAN_TAG_SIZE; | ||
2139 | 2474 | ||
2140 | wr32(E1000_RCTL, rctl); | 2475 | vmolr = rd32(E1000_VMOLR(vfn)); |
2476 | vmolr &= ~E1000_VMOLR_RLPML_MASK; | ||
2477 | vmolr |= size | E1000_VMOLR_LPE; | ||
2478 | wr32(E1000_VMOLR(vfn), vmolr); | ||
2479 | |||
2480 | return 0; | ||
2141 | } | 2481 | } |
2142 | 2482 | ||
2143 | /** | 2483 | /** |
@@ -2159,33 +2499,114 @@ static void igb_rlpml_set(struct igb_adapter *adapter) | |||
2159 | * size and set the VMOLR RLPML to the size we need */ | 2499 | * size and set the VMOLR RLPML to the size we need */ |
2160 | if (pf_id) { | 2500 | if (pf_id) { |
2161 | igb_set_vf_rlpml(adapter, max_frame_size, pf_id); | 2501 | igb_set_vf_rlpml(adapter, max_frame_size, pf_id); |
2162 | max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; | 2502 | max_frame_size = MAX_JUMBO_FRAME_SIZE; |
2163 | } | 2503 | } |
2164 | 2504 | ||
2165 | wr32(E1000_RLPML, max_frame_size); | 2505 | wr32(E1000_RLPML, max_frame_size); |
2166 | } | 2506 | } |
2167 | 2507 | ||
2508 | static inline void igb_set_vmolr(struct igb_adapter *adapter, | ||
2509 | int vfn, bool aupe) | ||
2510 | { | ||
2511 | struct e1000_hw *hw = &adapter->hw; | ||
2512 | u32 vmolr; | ||
2513 | |||
2514 | /* | ||
2515 | * This register exists only on 82576 and newer so if we are older then | ||
2516 | * we should exit and do nothing | ||
2517 | */ | ||
2518 | if (hw->mac.type < e1000_82576) | ||
2519 | return; | ||
2520 | |||
2521 | vmolr = rd32(E1000_VMOLR(vfn)); | ||
2522 | vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ | ||
2523 | if (aupe) | ||
2524 | vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ | ||
2525 | else | ||
2526 | vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ | ||
2527 | |||
2528 | /* clear all bits that might not be set */ | ||
2529 | vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); | ||
2530 | |||
2531 | if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) | ||
2532 | vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ | ||
2533 | /* | ||
2534 | * for VMDq only allow the VFs and pool 0 to accept broadcast and | ||
2535 | * multicast packets | ||
2536 | */ | ||
2537 | if (vfn <= adapter->vfs_allocated_count) | ||
2538 | vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ | ||
2539 | |||
2540 | wr32(E1000_VMOLR(vfn), vmolr); | ||
2541 | } | ||
2542 | |||
2168 | /** | 2543 | /** |
2169 | * igb_configure_vt_default_pool - Configure VT default pool | 2544 | * igb_configure_rx_ring - Configure a receive ring after Reset |
2170 | * @adapter: board private structure | 2545 | * @adapter: board private structure |
2546 | * @ring: receive ring to be configured | ||
2171 | * | 2547 | * |
2172 | * Configure the default pool | 2548 | * Configure the Rx unit of the MAC after a reset. |
2173 | **/ | 2549 | **/ |
2174 | static void igb_configure_vt_default_pool(struct igb_adapter *adapter) | 2550 | void igb_configure_rx_ring(struct igb_adapter *adapter, |
2551 | struct igb_ring *ring) | ||
2175 | { | 2552 | { |
2176 | struct e1000_hw *hw = &adapter->hw; | 2553 | struct e1000_hw *hw = &adapter->hw; |
2177 | u16 pf_id = adapter->vfs_allocated_count; | 2554 | u64 rdba = ring->dma; |
2178 | u32 vtctl; | 2555 | int reg_idx = ring->reg_idx; |
2556 | u32 srrctl, rxdctl; | ||
2557 | |||
2558 | /* disable the queue */ | ||
2559 | rxdctl = rd32(E1000_RXDCTL(reg_idx)); | ||
2560 | wr32(E1000_RXDCTL(reg_idx), | ||
2561 | rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); | ||
2562 | |||
2563 | /* Set DMA base address registers */ | ||
2564 | wr32(E1000_RDBAL(reg_idx), | ||
2565 | rdba & 0x00000000ffffffffULL); | ||
2566 | wr32(E1000_RDBAH(reg_idx), rdba >> 32); | ||
2567 | wr32(E1000_RDLEN(reg_idx), | ||
2568 | ring->count * sizeof(union e1000_adv_rx_desc)); | ||
2569 | |||
2570 | /* initialize head and tail */ | ||
2571 | ring->head = hw->hw_addr + E1000_RDH(reg_idx); | ||
2572 | ring->tail = hw->hw_addr + E1000_RDT(reg_idx); | ||
2573 | writel(0, ring->head); | ||
2574 | writel(0, ring->tail); | ||
2575 | |||
2576 | /* set descriptor configuration */ | ||
2577 | if (ring->rx_buffer_len < IGB_RXBUFFER_1024) { | ||
2578 | srrctl = ALIGN(ring->rx_buffer_len, 64) << | ||
2579 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | ||
2580 | #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 | ||
2581 | srrctl |= IGB_RXBUFFER_16384 >> | ||
2582 | E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
2583 | #else | ||
2584 | srrctl |= (PAGE_SIZE / 2) >> | ||
2585 | E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
2586 | #endif | ||
2587 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | ||
2588 | } else { | ||
2589 | srrctl = ALIGN(ring->rx_buffer_len, 1024) >> | ||
2590 | E1000_SRRCTL_BSIZEPKT_SHIFT; | ||
2591 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; | ||
2592 | } | ||
2593 | /* Only set Drop Enable if we are supporting multiple queues */ | ||
2594 | if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) | ||
2595 | srrctl |= E1000_SRRCTL_DROP_EN; | ||
2179 | 2596 | ||
2180 | /* not in sr-iov mode - do nothing */ | 2597 | wr32(E1000_SRRCTL(reg_idx), srrctl); |
2181 | if (!pf_id) | ||
2182 | return; | ||
2183 | 2598 | ||
2184 | vtctl = rd32(E1000_VT_CTL); | 2599 | /* set filtering for VMDQ pools */ |
2185 | vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | | 2600 | igb_set_vmolr(adapter, reg_idx & 0x7, true); |
2186 | E1000_VT_CTL_DISABLE_DEF_POOL); | 2601 | |
2187 | vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; | 2602 | /* enable receive descriptor fetching */ |
2188 | wr32(E1000_VT_CTL, vtctl); | 2603 | rxdctl = rd32(E1000_RXDCTL(reg_idx)); |
2604 | rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; | ||
2605 | rxdctl &= 0xFFF00000; | ||
2606 | rxdctl |= IGB_RX_PTHRESH; | ||
2607 | rxdctl |= IGB_RX_HTHRESH << 8; | ||
2608 | rxdctl |= IGB_RX_WTHRESH << 16; | ||
2609 | wr32(E1000_RXDCTL(reg_idx), rxdctl); | ||
2189 | } | 2610 | } |
2190 | 2611 | ||
2191 | /** | 2612 | /** |
@@ -2196,112 +2617,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter) | |||
2196 | **/ | 2617 | **/ |
2197 | static void igb_configure_rx(struct igb_adapter *adapter) | 2618 | static void igb_configure_rx(struct igb_adapter *adapter) |
2198 | { | 2619 | { |
2199 | u64 rdba; | ||
2200 | struct e1000_hw *hw = &adapter->hw; | ||
2201 | u32 rctl, rxcsum; | ||
2202 | u32 rxdctl; | ||
2203 | int i; | 2620 | int i; |
2204 | 2621 | ||
2205 | /* disable receives while setting up the descriptors */ | 2622 | /* set UTA to appropriate mode */ |
2206 | rctl = rd32(E1000_RCTL); | 2623 | igb_set_uta(adapter); |
2207 | wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); | ||
2208 | wrfl(); | ||
2209 | mdelay(10); | ||
2210 | 2624 | ||
2211 | if (adapter->itr_setting > 3) | 2625 | /* set the correct pool for the PF default MAC address in entry 0 */ |
2212 | wr32(E1000_ITR, adapter->itr); | 2626 | igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, |
2627 | adapter->vfs_allocated_count); | ||
2213 | 2628 | ||
2214 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 2629 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
2215 | * the Base and Length of the Rx Descriptor Ring */ | 2630 | * the Base and Length of the Rx Descriptor Ring */ |
2216 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2631 | for (i = 0; i < adapter->num_rx_queues; i++) |
2217 | struct igb_ring *ring = &adapter->rx_ring[i]; | 2632 | igb_configure_rx_ring(adapter, adapter->rx_ring[i]); |
2218 | int j = ring->reg_idx; | ||
2219 | rdba = ring->dma; | ||
2220 | wr32(E1000_RDBAL(j), | ||
2221 | rdba & 0x00000000ffffffffULL); | ||
2222 | wr32(E1000_RDBAH(j), rdba >> 32); | ||
2223 | wr32(E1000_RDLEN(j), | ||
2224 | ring->count * sizeof(union e1000_adv_rx_desc)); | ||
2225 | |||
2226 | ring->head = E1000_RDH(j); | ||
2227 | ring->tail = E1000_RDT(j); | ||
2228 | writel(0, hw->hw_addr + ring->tail); | ||
2229 | writel(0, hw->hw_addr + ring->head); | ||
2230 | |||
2231 | rxdctl = rd32(E1000_RXDCTL(j)); | ||
2232 | rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; | ||
2233 | rxdctl &= 0xFFF00000; | ||
2234 | rxdctl |= IGB_RX_PTHRESH; | ||
2235 | rxdctl |= IGB_RX_HTHRESH << 8; | ||
2236 | rxdctl |= IGB_RX_WTHRESH << 16; | ||
2237 | wr32(E1000_RXDCTL(j), rxdctl); | ||
2238 | } | ||
2239 | |||
2240 | if (adapter->num_rx_queues > 1) { | ||
2241 | u32 random[10]; | ||
2242 | u32 mrqc; | ||
2243 | u32 j, shift; | ||
2244 | union e1000_reta { | ||
2245 | u32 dword; | ||
2246 | u8 bytes[4]; | ||
2247 | } reta; | ||
2248 | |||
2249 | get_random_bytes(&random[0], 40); | ||
2250 | |||
2251 | if (hw->mac.type >= e1000_82576) | ||
2252 | shift = 0; | ||
2253 | else | ||
2254 | shift = 6; | ||
2255 | for (j = 0; j < (32 * 4); j++) { | ||
2256 | reta.bytes[j & 3] = | ||
2257 | adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift; | ||
2258 | if ((j & 3) == 3) | ||
2259 | writel(reta.dword, | ||
2260 | hw->hw_addr + E1000_RETA(0) + (j & ~3)); | ||
2261 | } | ||
2262 | if (adapter->vfs_allocated_count) | ||
2263 | mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; | ||
2264 | else | ||
2265 | mrqc = E1000_MRQC_ENABLE_RSS_4Q; | ||
2266 | |||
2267 | /* Fill out hash function seeds */ | ||
2268 | for (j = 0; j < 10; j++) | ||
2269 | array_wr32(E1000_RSSRK(0), j, random[j]); | ||
2270 | |||
2271 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
2272 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
2273 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | | ||
2274 | E1000_MRQC_RSS_FIELD_IPV6_TCP); | ||
2275 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP | | ||
2276 | E1000_MRQC_RSS_FIELD_IPV6_UDP); | ||
2277 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | | ||
2278 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); | ||
2279 | |||
2280 | wr32(E1000_MRQC, mrqc); | ||
2281 | } else if (adapter->vfs_allocated_count) { | ||
2282 | /* Enable multi-queue for sr-iov */ | ||
2283 | wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ); | ||
2284 | } | ||
2285 | |||
2286 | /* Enable Receive Checksum Offload for TCP and UDP */ | ||
2287 | rxcsum = rd32(E1000_RXCSUM); | ||
2288 | /* Disable raw packet checksumming */ | ||
2289 | rxcsum |= E1000_RXCSUM_PCSD; | ||
2290 | |||
2291 | if (adapter->hw.mac.type == e1000_82576) | ||
2292 | /* Enable Receive Checksum Offload for SCTP */ | ||
2293 | rxcsum |= E1000_RXCSUM_CRCOFL; | ||
2294 | |||
2295 | /* Don't need to set TUOFL or IPOFL, they default to 1 */ | ||
2296 | wr32(E1000_RXCSUM, rxcsum); | ||
2297 | |||
2298 | /* Set the default pool for the PF's first queue */ | ||
2299 | igb_configure_vt_default_pool(adapter); | ||
2300 | |||
2301 | igb_rlpml_set(adapter); | ||
2302 | |||
2303 | /* Enable Receives */ | ||
2304 | wr32(E1000_RCTL, rctl); | ||
2305 | } | 2633 | } |
2306 | 2634 | ||
2307 | /** | 2635 | /** |
@@ -2312,14 +2640,17 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
2312 | **/ | 2640 | **/ |
2313 | void igb_free_tx_resources(struct igb_ring *tx_ring) | 2641 | void igb_free_tx_resources(struct igb_ring *tx_ring) |
2314 | { | 2642 | { |
2315 | struct pci_dev *pdev = tx_ring->adapter->pdev; | ||
2316 | |||
2317 | igb_clean_tx_ring(tx_ring); | 2643 | igb_clean_tx_ring(tx_ring); |
2318 | 2644 | ||
2319 | vfree(tx_ring->buffer_info); | 2645 | vfree(tx_ring->buffer_info); |
2320 | tx_ring->buffer_info = NULL; | 2646 | tx_ring->buffer_info = NULL; |
2321 | 2647 | ||
2322 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); | 2648 | /* if not set, then don't free */ |
2649 | if (!tx_ring->desc) | ||
2650 | return; | ||
2651 | |||
2652 | pci_free_consistent(tx_ring->pdev, tx_ring->size, | ||
2653 | tx_ring->desc, tx_ring->dma); | ||
2323 | 2654 | ||
2324 | tx_ring->desc = NULL; | 2655 | tx_ring->desc = NULL; |
2325 | } | 2656 | } |
@@ -2335,21 +2666,33 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) | |||
2335 | int i; | 2666 | int i; |
2336 | 2667 | ||
2337 | for (i = 0; i < adapter->num_tx_queues; i++) | 2668 | for (i = 0; i < adapter->num_tx_queues; i++) |
2338 | igb_free_tx_resources(&adapter->tx_ring[i]); | 2669 | igb_free_tx_resources(adapter->tx_ring[i]); |
2339 | } | 2670 | } |
2340 | 2671 | ||
2341 | static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, | 2672 | void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring, |
2342 | struct igb_buffer *buffer_info) | 2673 | struct igb_buffer *buffer_info) |
2343 | { | 2674 | { |
2344 | buffer_info->dma = 0; | 2675 | if (buffer_info->dma) { |
2676 | if (buffer_info->mapped_as_page) | ||
2677 | pci_unmap_page(tx_ring->pdev, | ||
2678 | buffer_info->dma, | ||
2679 | buffer_info->length, | ||
2680 | PCI_DMA_TODEVICE); | ||
2681 | else | ||
2682 | pci_unmap_single(tx_ring->pdev, | ||
2683 | buffer_info->dma, | ||
2684 | buffer_info->length, | ||
2685 | PCI_DMA_TODEVICE); | ||
2686 | buffer_info->dma = 0; | ||
2687 | } | ||
2345 | if (buffer_info->skb) { | 2688 | if (buffer_info->skb) { |
2346 | skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, | ||
2347 | DMA_TO_DEVICE); | ||
2348 | dev_kfree_skb_any(buffer_info->skb); | 2689 | dev_kfree_skb_any(buffer_info->skb); |
2349 | buffer_info->skb = NULL; | 2690 | buffer_info->skb = NULL; |
2350 | } | 2691 | } |
2351 | buffer_info->time_stamp = 0; | 2692 | buffer_info->time_stamp = 0; |
2352 | /* buffer_info must be completely set up in the transmit path */ | 2693 | buffer_info->length = 0; |
2694 | buffer_info->next_to_watch = 0; | ||
2695 | buffer_info->mapped_as_page = false; | ||
2353 | } | 2696 | } |
2354 | 2697 | ||
2355 | /** | 2698 | /** |
@@ -2358,7 +2701,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, | |||
2358 | **/ | 2701 | **/ |
2359 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) | 2702 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
2360 | { | 2703 | { |
2361 | struct igb_adapter *adapter = tx_ring->adapter; | ||
2362 | struct igb_buffer *buffer_info; | 2704 | struct igb_buffer *buffer_info; |
2363 | unsigned long size; | 2705 | unsigned long size; |
2364 | unsigned int i; | 2706 | unsigned int i; |
@@ -2369,21 +2711,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) | |||
2369 | 2711 | ||
2370 | for (i = 0; i < tx_ring->count; i++) { | 2712 | for (i = 0; i < tx_ring->count; i++) { |
2371 | buffer_info = &tx_ring->buffer_info[i]; | 2713 | buffer_info = &tx_ring->buffer_info[i]; |
2372 | igb_unmap_and_free_tx_resource(adapter, buffer_info); | 2714 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); |
2373 | } | 2715 | } |
2374 | 2716 | ||
2375 | size = sizeof(struct igb_buffer) * tx_ring->count; | 2717 | size = sizeof(struct igb_buffer) * tx_ring->count; |
2376 | memset(tx_ring->buffer_info, 0, size); | 2718 | memset(tx_ring->buffer_info, 0, size); |
2377 | 2719 | ||
2378 | /* Zero out the descriptor ring */ | 2720 | /* Zero out the descriptor ring */ |
2379 | |||
2380 | memset(tx_ring->desc, 0, tx_ring->size); | 2721 | memset(tx_ring->desc, 0, tx_ring->size); |
2381 | 2722 | ||
2382 | tx_ring->next_to_use = 0; | 2723 | tx_ring->next_to_use = 0; |
2383 | tx_ring->next_to_clean = 0; | 2724 | tx_ring->next_to_clean = 0; |
2384 | |||
2385 | writel(0, adapter->hw.hw_addr + tx_ring->head); | ||
2386 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | ||
2387 | } | 2725 | } |
2388 | 2726 | ||
2389 | /** | 2727 | /** |
@@ -2395,7 +2733,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |||
2395 | int i; | 2733 | int i; |
2396 | 2734 | ||
2397 | for (i = 0; i < adapter->num_tx_queues; i++) | 2735 | for (i = 0; i < adapter->num_tx_queues; i++) |
2398 | igb_clean_tx_ring(&adapter->tx_ring[i]); | 2736 | igb_clean_tx_ring(adapter->tx_ring[i]); |
2399 | } | 2737 | } |
2400 | 2738 | ||
2401 | /** | 2739 | /** |
@@ -2406,14 +2744,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |||
2406 | **/ | 2744 | **/ |
2407 | void igb_free_rx_resources(struct igb_ring *rx_ring) | 2745 | void igb_free_rx_resources(struct igb_ring *rx_ring) |
2408 | { | 2746 | { |
2409 | struct pci_dev *pdev = rx_ring->adapter->pdev; | ||
2410 | |||
2411 | igb_clean_rx_ring(rx_ring); | 2747 | igb_clean_rx_ring(rx_ring); |
2412 | 2748 | ||
2413 | vfree(rx_ring->buffer_info); | 2749 | vfree(rx_ring->buffer_info); |
2414 | rx_ring->buffer_info = NULL; | 2750 | rx_ring->buffer_info = NULL; |
2415 | 2751 | ||
2416 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); | 2752 | /* if not set, then don't free */ |
2753 | if (!rx_ring->desc) | ||
2754 | return; | ||
2755 | |||
2756 | pci_free_consistent(rx_ring->pdev, rx_ring->size, | ||
2757 | rx_ring->desc, rx_ring->dma); | ||
2417 | 2758 | ||
2418 | rx_ring->desc = NULL; | 2759 | rx_ring->desc = NULL; |
2419 | } | 2760 | } |
@@ -2429,7 +2770,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |||
2429 | int i; | 2770 | int i; |
2430 | 2771 | ||
2431 | for (i = 0; i < adapter->num_rx_queues; i++) | 2772 | for (i = 0; i < adapter->num_rx_queues; i++) |
2432 | igb_free_rx_resources(&adapter->rx_ring[i]); | 2773 | igb_free_rx_resources(adapter->rx_ring[i]); |
2433 | } | 2774 | } |
2434 | 2775 | ||
2435 | /** | 2776 | /** |
@@ -2438,26 +2779,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |||
2438 | **/ | 2779 | **/ |
2439 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) | 2780 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) |
2440 | { | 2781 | { |
2441 | struct igb_adapter *adapter = rx_ring->adapter; | ||
2442 | struct igb_buffer *buffer_info; | 2782 | struct igb_buffer *buffer_info; |
2443 | struct pci_dev *pdev = adapter->pdev; | ||
2444 | unsigned long size; | 2783 | unsigned long size; |
2445 | unsigned int i; | 2784 | unsigned int i; |
2446 | 2785 | ||
2447 | if (!rx_ring->buffer_info) | 2786 | if (!rx_ring->buffer_info) |
2448 | return; | 2787 | return; |
2788 | |||
2449 | /* Free all the Rx ring sk_buffs */ | 2789 | /* Free all the Rx ring sk_buffs */ |
2450 | for (i = 0; i < rx_ring->count; i++) { | 2790 | for (i = 0; i < rx_ring->count; i++) { |
2451 | buffer_info = &rx_ring->buffer_info[i]; | 2791 | buffer_info = &rx_ring->buffer_info[i]; |
2452 | if (buffer_info->dma) { | 2792 | if (buffer_info->dma) { |
2453 | if (adapter->rx_ps_hdr_size) | 2793 | pci_unmap_single(rx_ring->pdev, |
2454 | pci_unmap_single(pdev, buffer_info->dma, | 2794 | buffer_info->dma, |
2455 | adapter->rx_ps_hdr_size, | 2795 | rx_ring->rx_buffer_len, |
2456 | PCI_DMA_FROMDEVICE); | 2796 | PCI_DMA_FROMDEVICE); |
2457 | else | ||
2458 | pci_unmap_single(pdev, buffer_info->dma, | ||
2459 | adapter->rx_buffer_len, | ||
2460 | PCI_DMA_FROMDEVICE); | ||
2461 | buffer_info->dma = 0; | 2797 | buffer_info->dma = 0; |
2462 | } | 2798 | } |
2463 | 2799 | ||
@@ -2465,14 +2801,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
2465 | dev_kfree_skb(buffer_info->skb); | 2801 | dev_kfree_skb(buffer_info->skb); |
2466 | buffer_info->skb = NULL; | 2802 | buffer_info->skb = NULL; |
2467 | } | 2803 | } |
2804 | if (buffer_info->page_dma) { | ||
2805 | pci_unmap_page(rx_ring->pdev, | ||
2806 | buffer_info->page_dma, | ||
2807 | PAGE_SIZE / 2, | ||
2808 | PCI_DMA_FROMDEVICE); | ||
2809 | buffer_info->page_dma = 0; | ||
2810 | } | ||
2468 | if (buffer_info->page) { | 2811 | if (buffer_info->page) { |
2469 | if (buffer_info->page_dma) | ||
2470 | pci_unmap_page(pdev, buffer_info->page_dma, | ||
2471 | PAGE_SIZE / 2, | ||
2472 | PCI_DMA_FROMDEVICE); | ||
2473 | put_page(buffer_info->page); | 2812 | put_page(buffer_info->page); |
2474 | buffer_info->page = NULL; | 2813 | buffer_info->page = NULL; |
2475 | buffer_info->page_dma = 0; | ||
2476 | buffer_info->page_offset = 0; | 2814 | buffer_info->page_offset = 0; |
2477 | } | 2815 | } |
2478 | } | 2816 | } |
@@ -2485,9 +2823,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
2485 | 2823 | ||
2486 | rx_ring->next_to_clean = 0; | 2824 | rx_ring->next_to_clean = 0; |
2487 | rx_ring->next_to_use = 0; | 2825 | rx_ring->next_to_use = 0; |
2488 | |||
2489 | writel(0, adapter->hw.hw_addr + rx_ring->head); | ||
2490 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | ||
2491 | } | 2826 | } |
2492 | 2827 | ||
2493 | /** | 2828 | /** |
@@ -2499,7 +2834,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) | |||
2499 | int i; | 2834 | int i; |
2500 | 2835 | ||
2501 | for (i = 0; i < adapter->num_rx_queues; i++) | 2836 | for (i = 0; i < adapter->num_rx_queues; i++) |
2502 | igb_clean_rx_ring(&adapter->rx_ring[i]); | 2837 | igb_clean_rx_ring(adapter->rx_ring[i]); |
2503 | } | 2838 | } |
2504 | 2839 | ||
2505 | /** | 2840 | /** |
@@ -2521,61 +2856,83 @@ static int igb_set_mac(struct net_device *netdev, void *p) | |||
2521 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 2856 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
2522 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); | 2857 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
2523 | 2858 | ||
2524 | igb_rar_set(hw, hw->mac.addr, 0); | 2859 | /* set the correct pool for the new PF MAC address in entry 0 */ |
2525 | igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); | 2860 | igb_rar_set_qsel(adapter, hw->mac.addr, 0, |
2861 | adapter->vfs_allocated_count); | ||
2526 | 2862 | ||
2527 | return 0; | 2863 | return 0; |
2528 | } | 2864 | } |
2529 | 2865 | ||
2530 | /** | 2866 | /** |
2531 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set | 2867 | * igb_write_mc_addr_list - write multicast addresses to MTA |
2532 | * @netdev: network interface device structure | 2868 | * @netdev: network interface device structure |
2533 | * | 2869 | * |
2534 | * The set_rx_mode entry point is called whenever the unicast or multicast | 2870 | * Writes multicast address list to the MTA hash table. |
2535 | * address lists or the network interface flags are updated. This routine is | 2871 | * Returns: -ENOMEM on failure |
2536 | * responsible for configuring the hardware for proper unicast, multicast, | 2872 | * 0 on no addresses written |
2537 | * promiscuous mode, and all-multi behavior. | 2873 | * X on writing X addresses to MTA |
2538 | **/ | 2874 | **/ |
2539 | static void igb_set_rx_mode(struct net_device *netdev) | 2875 | static int igb_write_mc_addr_list(struct net_device *netdev) |
2540 | { | 2876 | { |
2541 | struct igb_adapter *adapter = netdev_priv(netdev); | 2877 | struct igb_adapter *adapter = netdev_priv(netdev); |
2542 | struct e1000_hw *hw = &adapter->hw; | 2878 | struct e1000_hw *hw = &adapter->hw; |
2543 | unsigned int rar_entries = hw->mac.rar_entry_count - | 2879 | struct dev_mc_list *mc_ptr; |
2544 | (adapter->vfs_allocated_count + 1); | 2880 | u8 *mta_list; |
2545 | struct dev_mc_list *mc_ptr = netdev->mc_list; | ||
2546 | u8 *mta_list = NULL; | ||
2547 | u32 rctl; | ||
2548 | int i; | 2881 | int i; |
2549 | 2882 | ||
2550 | /* Check for Promiscuous and All Multicast modes */ | 2883 | if (netdev_mc_empty(netdev)) { |
2551 | rctl = rd32(E1000_RCTL); | 2884 | /* nothing to program, so clear mc list */ |
2885 | igb_update_mc_addr_list(hw, NULL, 0); | ||
2886 | igb_restore_vf_multicasts(adapter); | ||
2887 | return 0; | ||
2888 | } | ||
2552 | 2889 | ||
2553 | if (netdev->flags & IFF_PROMISC) { | 2890 | mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); |
2554 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 2891 | if (!mta_list) |
2555 | rctl &= ~E1000_RCTL_VFE; | 2892 | return -ENOMEM; |
2556 | } else { | ||
2557 | if (netdev->flags & IFF_ALLMULTI) | ||
2558 | rctl |= E1000_RCTL_MPE; | ||
2559 | else | ||
2560 | rctl &= ~E1000_RCTL_MPE; | ||
2561 | 2893 | ||
2562 | if (netdev->uc.count > rar_entries) | 2894 | /* The shared function expects a packed array of only addresses. */ |
2563 | rctl |= E1000_RCTL_UPE; | 2895 | i = 0; |
2564 | else | 2896 | netdev_for_each_mc_addr(mc_ptr, netdev) |
2565 | rctl &= ~E1000_RCTL_UPE; | 2897 | memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); |
2566 | rctl |= E1000_RCTL_VFE; | 2898 | |
2567 | } | 2899 | igb_update_mc_addr_list(hw, mta_list, i); |
2568 | wr32(E1000_RCTL, rctl); | 2900 | kfree(mta_list); |
2901 | |||
2902 | return netdev_mc_count(netdev); | ||
2903 | } | ||
2904 | |||
2905 | /** | ||
2906 | * igb_write_uc_addr_list - write unicast addresses to RAR table | ||
2907 | * @netdev: network interface device structure | ||
2908 | * | ||
2909 | * Writes unicast address list to the RAR table. | ||
2910 | * Returns: -ENOMEM on failure/insufficient address space | ||
2911 | * 0 on no addresses written | ||
2912 | * X on writing X addresses to the RAR table | ||
2913 | **/ | ||
2914 | static int igb_write_uc_addr_list(struct net_device *netdev) | ||
2915 | { | ||
2916 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
2917 | struct e1000_hw *hw = &adapter->hw; | ||
2918 | unsigned int vfn = adapter->vfs_allocated_count; | ||
2919 | unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); | ||
2920 | int count = 0; | ||
2921 | |||
2922 | /* return ENOMEM indicating insufficient memory for addresses */ | ||
2923 | if (netdev_uc_count(netdev) > rar_entries) | ||
2924 | return -ENOMEM; | ||
2569 | 2925 | ||
2570 | if (netdev->uc.count && rar_entries) { | 2926 | if (!netdev_uc_empty(netdev) && rar_entries) { |
2571 | struct netdev_hw_addr *ha; | 2927 | struct netdev_hw_addr *ha; |
2572 | list_for_each_entry(ha, &netdev->uc.list, list) { | 2928 | |
2929 | netdev_for_each_uc_addr(ha, netdev) { | ||
2573 | if (!rar_entries) | 2930 | if (!rar_entries) |
2574 | break; | 2931 | break; |
2575 | igb_rar_set(hw, ha->addr, rar_entries); | 2932 | igb_rar_set_qsel(adapter, ha->addr, |
2576 | igb_set_rah_pool(hw, adapter->vfs_allocated_count, | 2933 | rar_entries--, |
2577 | rar_entries); | 2934 | vfn); |
2578 | rar_entries--; | 2935 | count++; |
2579 | } | 2936 | } |
2580 | } | 2937 | } |
2581 | /* write the addresses in reverse order to avoid write combining */ | 2938 | /* write the addresses in reverse order to avoid write combining */ |
@@ -2585,29 +2942,79 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
2585 | } | 2942 | } |
2586 | wrfl(); | 2943 | wrfl(); |
2587 | 2944 | ||
2588 | if (!netdev->mc_count) { | 2945 | return count; |
2589 | /* nothing to program, so clear mc list */ | 2946 | } |
2590 | igb_update_mc_addr_list(hw, NULL, 0); | 2947 | |
2591 | igb_restore_vf_multicasts(adapter); | 2948 | /** |
2592 | return; | 2949 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
2950 | * @netdev: network interface device structure | ||
2951 | * | ||
2952 | * The set_rx_mode entry point is called whenever the unicast or multicast | ||
2953 | * address lists or the network interface flags are updated. This routine is | ||
2954 | * responsible for configuring the hardware for proper unicast, multicast, | ||
2955 | * promiscuous mode, and all-multi behavior. | ||
2956 | **/ | ||
2957 | static void igb_set_rx_mode(struct net_device *netdev) | ||
2958 | { | ||
2959 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
2960 | struct e1000_hw *hw = &adapter->hw; | ||
2961 | unsigned int vfn = adapter->vfs_allocated_count; | ||
2962 | u32 rctl, vmolr = 0; | ||
2963 | int count; | ||
2964 | |||
2965 | /* Check for Promiscuous and All Multicast modes */ | ||
2966 | rctl = rd32(E1000_RCTL); | ||
2967 | |||
2968 | /* clear the effected bits */ | ||
2969 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); | ||
2970 | |||
2971 | if (netdev->flags & IFF_PROMISC) { | ||
2972 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | ||
2973 | vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); | ||
2974 | } else { | ||
2975 | if (netdev->flags & IFF_ALLMULTI) { | ||
2976 | rctl |= E1000_RCTL_MPE; | ||
2977 | vmolr |= E1000_VMOLR_MPME; | ||
2978 | } else { | ||
2979 | /* | ||
2980 | * Write addresses to the MTA, if the attempt fails | ||
2981 | * then we should just turn on promiscous mode so | ||
2982 | * that we can at least receive multicast traffic | ||
2983 | */ | ||
2984 | count = igb_write_mc_addr_list(netdev); | ||
2985 | if (count < 0) { | ||
2986 | rctl |= E1000_RCTL_MPE; | ||
2987 | vmolr |= E1000_VMOLR_MPME; | ||
2988 | } else if (count) { | ||
2989 | vmolr |= E1000_VMOLR_ROMPE; | ||
2990 | } | ||
2991 | } | ||
2992 | /* | ||
2993 | * Write addresses to available RAR registers, if there is not | ||
2994 | * sufficient space to store all the addresses then enable | ||
2995 | * unicast promiscous mode | ||
2996 | */ | ||
2997 | count = igb_write_uc_addr_list(netdev); | ||
2998 | if (count < 0) { | ||
2999 | rctl |= E1000_RCTL_UPE; | ||
3000 | vmolr |= E1000_VMOLR_ROPE; | ||
3001 | } | ||
3002 | rctl |= E1000_RCTL_VFE; | ||
2593 | } | 3003 | } |
3004 | wr32(E1000_RCTL, rctl); | ||
2594 | 3005 | ||
2595 | mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); | 3006 | /* |
2596 | if (!mta_list) { | 3007 | * In order to support SR-IOV and eventually VMDq it is necessary to set |
2597 | dev_err(&adapter->pdev->dev, | 3008 | * the VMOLR to enable the appropriate modes. Without this workaround |
2598 | "failed to allocate multicast filter list\n"); | 3009 | * we will have issues with VLAN tag stripping not being done for frames |
3010 | * that are only arriving because we are the default pool | ||
3011 | */ | ||
3012 | if (hw->mac.type < e1000_82576) | ||
2599 | return; | 3013 | return; |
2600 | } | ||
2601 | 3014 | ||
2602 | /* The shared function expects a packed array of only addresses. */ | 3015 | vmolr |= rd32(E1000_VMOLR(vfn)) & |
2603 | for (i = 0; i < netdev->mc_count; i++) { | 3016 | ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); |
2604 | if (!mc_ptr) | 3017 | wr32(E1000_VMOLR(vfn), vmolr); |
2605 | break; | ||
2606 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); | ||
2607 | mc_ptr = mc_ptr->next; | ||
2608 | } | ||
2609 | igb_update_mc_addr_list(hw, mta_list, i); | ||
2610 | kfree(mta_list); | ||
2611 | igb_restore_vf_multicasts(adapter); | 3018 | igb_restore_vf_multicasts(adapter); |
2612 | } | 3019 | } |
2613 | 3020 | ||
@@ -2623,7 +3030,7 @@ static void igb_update_phy_info(unsigned long data) | |||
2623 | * igb_has_link - check shared code for link and determine up/down | 3030 | * igb_has_link - check shared code for link and determine up/down |
2624 | * @adapter: pointer to driver private info | 3031 | * @adapter: pointer to driver private info |
2625 | **/ | 3032 | **/ |
2626 | static bool igb_has_link(struct igb_adapter *adapter) | 3033 | bool igb_has_link(struct igb_adapter *adapter) |
2627 | { | 3034 | { |
2628 | struct e1000_hw *hw = &adapter->hw; | 3035 | struct e1000_hw *hw = &adapter->hw; |
2629 | bool link_active = false; | 3036 | bool link_active = false; |
@@ -2669,49 +3076,41 @@ static void igb_watchdog(unsigned long data) | |||
2669 | static void igb_watchdog_task(struct work_struct *work) | 3076 | static void igb_watchdog_task(struct work_struct *work) |
2670 | { | 3077 | { |
2671 | struct igb_adapter *adapter = container_of(work, | 3078 | struct igb_adapter *adapter = container_of(work, |
2672 | struct igb_adapter, watchdog_task); | 3079 | struct igb_adapter, |
3080 | watchdog_task); | ||
2673 | struct e1000_hw *hw = &adapter->hw; | 3081 | struct e1000_hw *hw = &adapter->hw; |
2674 | struct net_device *netdev = adapter->netdev; | 3082 | struct net_device *netdev = adapter->netdev; |
2675 | struct igb_ring *tx_ring = adapter->tx_ring; | ||
2676 | u32 link; | 3083 | u32 link; |
2677 | u32 eics = 0; | ||
2678 | int i; | 3084 | int i; |
2679 | 3085 | ||
2680 | link = igb_has_link(adapter); | 3086 | link = igb_has_link(adapter); |
2681 | if ((netif_carrier_ok(netdev)) && link) | ||
2682 | goto link_up; | ||
2683 | |||
2684 | if (link) { | 3087 | if (link) { |
2685 | if (!netif_carrier_ok(netdev)) { | 3088 | if (!netif_carrier_ok(netdev)) { |
2686 | u32 ctrl; | 3089 | u32 ctrl; |
2687 | hw->mac.ops.get_speed_and_duplex(&adapter->hw, | 3090 | hw->mac.ops.get_speed_and_duplex(hw, |
2688 | &adapter->link_speed, | 3091 | &adapter->link_speed, |
2689 | &adapter->link_duplex); | 3092 | &adapter->link_duplex); |
2690 | 3093 | ||
2691 | ctrl = rd32(E1000_CTRL); | 3094 | ctrl = rd32(E1000_CTRL); |
2692 | /* Links status message must follow this format */ | 3095 | /* Links status message must follow this format */ |
2693 | printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " | 3096 | printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " |
2694 | "Flow Control: %s\n", | 3097 | "Flow Control: %s\n", |
2695 | netdev->name, | 3098 | netdev->name, |
2696 | adapter->link_speed, | 3099 | adapter->link_speed, |
2697 | adapter->link_duplex == FULL_DUPLEX ? | 3100 | adapter->link_duplex == FULL_DUPLEX ? |
2698 | "Full Duplex" : "Half Duplex", | 3101 | "Full Duplex" : "Half Duplex", |
2699 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & | 3102 | ((ctrl & E1000_CTRL_TFCE) && |
2700 | E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & | 3103 | (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" : |
2701 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & | 3104 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : |
2702 | E1000_CTRL_TFCE) ? "TX" : "None"))); | 3105 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); |
2703 | 3106 | ||
2704 | /* tweak tx_queue_len according to speed/duplex and | 3107 | /* adjust timeout factor according to speed/duplex */ |
2705 | * adjust the timeout factor */ | ||
2706 | netdev->tx_queue_len = adapter->tx_queue_len; | ||
2707 | adapter->tx_timeout_factor = 1; | 3108 | adapter->tx_timeout_factor = 1; |
2708 | switch (adapter->link_speed) { | 3109 | switch (adapter->link_speed) { |
2709 | case SPEED_10: | 3110 | case SPEED_10: |
2710 | netdev->tx_queue_len = 10; | ||
2711 | adapter->tx_timeout_factor = 14; | 3111 | adapter->tx_timeout_factor = 14; |
2712 | break; | 3112 | break; |
2713 | case SPEED_100: | 3113 | case SPEED_100: |
2714 | netdev->tx_queue_len = 100; | ||
2715 | /* maybe add some timeout factor ? */ | 3114 | /* maybe add some timeout factor ? */ |
2716 | break; | 3115 | break; |
2717 | } | 3116 | } |
@@ -2743,46 +3142,39 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2743 | } | 3142 | } |
2744 | } | 3143 | } |
2745 | 3144 | ||
2746 | link_up: | ||
2747 | igb_update_stats(adapter); | 3145 | igb_update_stats(adapter); |
2748 | 3146 | ||
2749 | hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; | 3147 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2750 | adapter->tpt_old = adapter->stats.tpt; | 3148 | struct igb_ring *tx_ring = adapter->tx_ring[i]; |
2751 | hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old; | 3149 | if (!netif_carrier_ok(netdev)) { |
2752 | adapter->colc_old = adapter->stats.colc; | ||
2753 | |||
2754 | adapter->gorc = adapter->stats.gorc - adapter->gorc_old; | ||
2755 | adapter->gorc_old = adapter->stats.gorc; | ||
2756 | adapter->gotc = adapter->stats.gotc - adapter->gotc_old; | ||
2757 | adapter->gotc_old = adapter->stats.gotc; | ||
2758 | |||
2759 | igb_update_adaptive(&adapter->hw); | ||
2760 | |||
2761 | if (!netif_carrier_ok(netdev)) { | ||
2762 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { | ||
2763 | /* We've lost link, so the controller stops DMA, | 3150 | /* We've lost link, so the controller stops DMA, |
2764 | * but we've got queued Tx work that's never going | 3151 | * but we've got queued Tx work that's never going |
2765 | * to get done, so reset controller to flush Tx. | 3152 | * to get done, so reset controller to flush Tx. |
2766 | * (Do the reset outside of interrupt context). */ | 3153 | * (Do the reset outside of interrupt context). */ |
2767 | adapter->tx_timeout_count++; | 3154 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { |
2768 | schedule_work(&adapter->reset_task); | 3155 | adapter->tx_timeout_count++; |
2769 | /* return immediately since reset is imminent */ | 3156 | schedule_work(&adapter->reset_task); |
2770 | return; | 3157 | /* return immediately since reset is imminent */ |
3158 | return; | ||
3159 | } | ||
2771 | } | 3160 | } |
3161 | |||
3162 | /* Force detection of hung controller every watchdog period */ | ||
3163 | tx_ring->detect_tx_hung = true; | ||
2772 | } | 3164 | } |
2773 | 3165 | ||
2774 | /* Cause software interrupt to ensure rx ring is cleaned */ | 3166 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2775 | if (adapter->msix_entries) { | 3167 | if (adapter->msix_entries) { |
2776 | for (i = 0; i < adapter->num_rx_queues; i++) | 3168 | u32 eics = 0; |
2777 | eics |= adapter->rx_ring[i].eims_value; | 3169 | for (i = 0; i < adapter->num_q_vectors; i++) { |
3170 | struct igb_q_vector *q_vector = adapter->q_vector[i]; | ||
3171 | eics |= q_vector->eims_value; | ||
3172 | } | ||
2778 | wr32(E1000_EICS, eics); | 3173 | wr32(E1000_EICS, eics); |
2779 | } else { | 3174 | } else { |
2780 | wr32(E1000_ICS, E1000_ICS_RXDMT0); | 3175 | wr32(E1000_ICS, E1000_ICS_RXDMT0); |
2781 | } | 3176 | } |
2782 | 3177 | ||
2783 | /* Force detection of hung controller every watchdog period */ | ||
2784 | tx_ring->detect_tx_hung = true; | ||
2785 | |||
2786 | /* Reset the timer */ | 3178 | /* Reset the timer */ |
2787 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 3179 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
2788 | mod_timer(&adapter->watchdog_timer, | 3180 | mod_timer(&adapter->watchdog_timer, |
@@ -2796,7 +3188,6 @@ enum latency_range { | |||
2796 | latency_invalid = 255 | 3188 | latency_invalid = 255 |
2797 | }; | 3189 | }; |
2798 | 3190 | ||
2799 | |||
2800 | /** | 3191 | /** |
2801 | * igb_update_ring_itr - update the dynamic ITR value based on packet size | 3192 | * igb_update_ring_itr - update the dynamic ITR value based on packet size |
2802 | * | 3193 | * |
@@ -2811,25 +3202,37 @@ enum latency_range { | |||
2811 | * parameter (see igb_param.c) | 3202 | * parameter (see igb_param.c) |
2812 | * NOTE: This function is called only when operating in a multiqueue | 3203 | * NOTE: This function is called only when operating in a multiqueue |
2813 | * receive environment. | 3204 | * receive environment. |
2814 | * @rx_ring: pointer to ring | 3205 | * @q_vector: pointer to q_vector |
2815 | **/ | 3206 | **/ |
2816 | static void igb_update_ring_itr(struct igb_ring *rx_ring) | 3207 | static void igb_update_ring_itr(struct igb_q_vector *q_vector) |
2817 | { | 3208 | { |
2818 | int new_val = rx_ring->itr_val; | 3209 | int new_val = q_vector->itr_val; |
2819 | int avg_wire_size = 0; | 3210 | int avg_wire_size = 0; |
2820 | struct igb_adapter *adapter = rx_ring->adapter; | 3211 | struct igb_adapter *adapter = q_vector->adapter; |
2821 | |||
2822 | if (!rx_ring->total_packets) | ||
2823 | goto clear_counts; /* no packets, so don't do anything */ | ||
2824 | 3212 | ||
2825 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 | 3213 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 |
2826 | * ints/sec - ITR timer value of 120 ticks. | 3214 | * ints/sec - ITR timer value of 120 ticks. |
2827 | */ | 3215 | */ |
2828 | if (adapter->link_speed != SPEED_1000) { | 3216 | if (adapter->link_speed != SPEED_1000) { |
2829 | new_val = 120; | 3217 | new_val = 976; |
2830 | goto set_itr_val; | 3218 | goto set_itr_val; |
2831 | } | 3219 | } |
2832 | avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; | 3220 | |
3221 | if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { | ||
3222 | struct igb_ring *ring = q_vector->rx_ring; | ||
3223 | avg_wire_size = ring->total_bytes / ring->total_packets; | ||
3224 | } | ||
3225 | |||
3226 | if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { | ||
3227 | struct igb_ring *ring = q_vector->tx_ring; | ||
3228 | avg_wire_size = max_t(u32, avg_wire_size, | ||
3229 | (ring->total_bytes / | ||
3230 | ring->total_packets)); | ||
3231 | } | ||
3232 | |||
3233 | /* if avg_wire_size isn't set no work was done */ | ||
3234 | if (!avg_wire_size) | ||
3235 | goto clear_counts; | ||
2833 | 3236 | ||
2834 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ | 3237 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ |
2835 | avg_wire_size += 24; | 3238 | avg_wire_size += 24; |
@@ -2843,14 +3246,24 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring) | |||
2843 | else | 3246 | else |
2844 | new_val = avg_wire_size / 2; | 3247 | new_val = avg_wire_size / 2; |
2845 | 3248 | ||
3249 | /* when in itr mode 3 do not exceed 20K ints/sec */ | ||
3250 | if (adapter->rx_itr_setting == 3 && new_val < 196) | ||
3251 | new_val = 196; | ||
3252 | |||
2846 | set_itr_val: | 3253 | set_itr_val: |
2847 | if (new_val != rx_ring->itr_val) { | 3254 | if (new_val != q_vector->itr_val) { |
2848 | rx_ring->itr_val = new_val; | 3255 | q_vector->itr_val = new_val; |
2849 | rx_ring->set_itr = 1; | 3256 | q_vector->set_itr = 1; |
2850 | } | 3257 | } |
2851 | clear_counts: | 3258 | clear_counts: |
2852 | rx_ring->total_bytes = 0; | 3259 | if (q_vector->rx_ring) { |
2853 | rx_ring->total_packets = 0; | 3260 | q_vector->rx_ring->total_bytes = 0; |
3261 | q_vector->rx_ring->total_packets = 0; | ||
3262 | } | ||
3263 | if (q_vector->tx_ring) { | ||
3264 | q_vector->tx_ring->total_bytes = 0; | ||
3265 | q_vector->tx_ring->total_packets = 0; | ||
3266 | } | ||
2854 | } | 3267 | } |
2855 | 3268 | ||
2856 | /** | 3269 | /** |
@@ -2867,7 +3280,7 @@ clear_counts: | |||
2867 | * NOTE: These calculations are only valid when operating in a single- | 3280 | * NOTE: These calculations are only valid when operating in a single- |
2868 | * queue environment. | 3281 | * queue environment. |
2869 | * @adapter: pointer to adapter | 3282 | * @adapter: pointer to adapter |
2870 | * @itr_setting: current adapter->itr | 3283 | * @itr_setting: current q_vector->itr_val |
2871 | * @packets: the number of packets during this measurement interval | 3284 | * @packets: the number of packets during this measurement interval |
2872 | * @bytes: the number of bytes during this measurement interval | 3285 | * @bytes: the number of bytes during this measurement interval |
2873 | **/ | 3286 | **/ |
@@ -2919,8 +3332,9 @@ update_itr_done: | |||
2919 | 3332 | ||
2920 | static void igb_set_itr(struct igb_adapter *adapter) | 3333 | static void igb_set_itr(struct igb_adapter *adapter) |
2921 | { | 3334 | { |
3335 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | ||
2922 | u16 current_itr; | 3336 | u16 current_itr; |
2923 | u32 new_itr = adapter->itr; | 3337 | u32 new_itr = q_vector->itr_val; |
2924 | 3338 | ||
2925 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | 3339 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ |
2926 | if (adapter->link_speed != SPEED_1000) { | 3340 | if (adapter->link_speed != SPEED_1000) { |
@@ -2931,21 +3345,17 @@ static void igb_set_itr(struct igb_adapter *adapter) | |||
2931 | 3345 | ||
2932 | adapter->rx_itr = igb_update_itr(adapter, | 3346 | adapter->rx_itr = igb_update_itr(adapter, |
2933 | adapter->rx_itr, | 3347 | adapter->rx_itr, |
2934 | adapter->rx_ring->total_packets, | 3348 | q_vector->rx_ring->total_packets, |
2935 | adapter->rx_ring->total_bytes); | 3349 | q_vector->rx_ring->total_bytes); |
2936 | 3350 | ||
2937 | if (adapter->rx_ring->buddy) { | 3351 | adapter->tx_itr = igb_update_itr(adapter, |
2938 | adapter->tx_itr = igb_update_itr(adapter, | 3352 | adapter->tx_itr, |
2939 | adapter->tx_itr, | 3353 | q_vector->tx_ring->total_packets, |
2940 | adapter->tx_ring->total_packets, | 3354 | q_vector->tx_ring->total_bytes); |
2941 | adapter->tx_ring->total_bytes); | 3355 | current_itr = max(adapter->rx_itr, adapter->tx_itr); |
2942 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | ||
2943 | } else { | ||
2944 | current_itr = adapter->rx_itr; | ||
2945 | } | ||
2946 | 3356 | ||
2947 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | 3357 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
2948 | if (adapter->itr_setting == 3 && current_itr == lowest_latency) | 3358 | if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) |
2949 | current_itr = low_latency; | 3359 | current_itr = low_latency; |
2950 | 3360 | ||
2951 | switch (current_itr) { | 3361 | switch (current_itr) { |
@@ -2964,20 +3374,19 @@ static void igb_set_itr(struct igb_adapter *adapter) | |||
2964 | } | 3374 | } |
2965 | 3375 | ||
2966 | set_itr_now: | 3376 | set_itr_now: |
2967 | adapter->rx_ring->total_bytes = 0; | 3377 | q_vector->rx_ring->total_bytes = 0; |
2968 | adapter->rx_ring->total_packets = 0; | 3378 | q_vector->rx_ring->total_packets = 0; |
2969 | if (adapter->rx_ring->buddy) { | 3379 | q_vector->tx_ring->total_bytes = 0; |
2970 | adapter->rx_ring->buddy->total_bytes = 0; | 3380 | q_vector->tx_ring->total_packets = 0; |
2971 | adapter->rx_ring->buddy->total_packets = 0; | ||
2972 | } | ||
2973 | 3381 | ||
2974 | if (new_itr != adapter->itr) { | 3382 | if (new_itr != q_vector->itr_val) { |
2975 | /* this attempts to bias the interrupt rate towards Bulk | 3383 | /* this attempts to bias the interrupt rate towards Bulk |
2976 | * by adding intermediate steps when interrupt rate is | 3384 | * by adding intermediate steps when interrupt rate is |
2977 | * increasing */ | 3385 | * increasing */ |
2978 | new_itr = new_itr > adapter->itr ? | 3386 | new_itr = new_itr > q_vector->itr_val ? |
2979 | max((new_itr * adapter->itr) / | 3387 | max((new_itr * q_vector->itr_val) / |
2980 | (new_itr + (adapter->itr >> 2)), new_itr) : | 3388 | (new_itr + (q_vector->itr_val >> 2)), |
3389 | new_itr) : | ||
2981 | new_itr; | 3390 | new_itr; |
2982 | /* Don't write the value here; it resets the adapter's | 3391 | /* Don't write the value here; it resets the adapter's |
2983 | * internal timer, and causes us to delay far longer than | 3392 | * internal timer, and causes us to delay far longer than |
@@ -2985,25 +3394,22 @@ set_itr_now: | |||
2985 | * value at the beginning of the next interrupt so the timing | 3394 | * value at the beginning of the next interrupt so the timing |
2986 | * ends up being correct. | 3395 | * ends up being correct. |
2987 | */ | 3396 | */ |
2988 | adapter->itr = new_itr; | 3397 | q_vector->itr_val = new_itr; |
2989 | adapter->rx_ring->itr_val = new_itr; | 3398 | q_vector->set_itr = 1; |
2990 | adapter->rx_ring->set_itr = 1; | ||
2991 | } | 3399 | } |
2992 | 3400 | ||
2993 | return; | 3401 | return; |
2994 | } | 3402 | } |
2995 | 3403 | ||
2996 | |||
2997 | #define IGB_TX_FLAGS_CSUM 0x00000001 | 3404 | #define IGB_TX_FLAGS_CSUM 0x00000001 |
2998 | #define IGB_TX_FLAGS_VLAN 0x00000002 | 3405 | #define IGB_TX_FLAGS_VLAN 0x00000002 |
2999 | #define IGB_TX_FLAGS_TSO 0x00000004 | 3406 | #define IGB_TX_FLAGS_TSO 0x00000004 |
3000 | #define IGB_TX_FLAGS_IPV4 0x00000008 | 3407 | #define IGB_TX_FLAGS_IPV4 0x00000008 |
3001 | #define IGB_TX_FLAGS_TSTAMP 0x00000010 | 3408 | #define IGB_TX_FLAGS_TSTAMP 0x00000010 |
3002 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 | 3409 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 |
3003 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 | 3410 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 |
3004 | 3411 | ||
3005 | static inline int igb_tso_adv(struct igb_adapter *adapter, | 3412 | static inline int igb_tso_adv(struct igb_ring *tx_ring, |
3006 | struct igb_ring *tx_ring, | ||
3007 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) | 3413 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) |
3008 | { | 3414 | { |
3009 | struct e1000_adv_tx_context_desc *context_desc; | 3415 | struct e1000_adv_tx_context_desc *context_desc; |
@@ -3011,8 +3417,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, | |||
3011 | int err; | 3417 | int err; |
3012 | struct igb_buffer *buffer_info; | 3418 | struct igb_buffer *buffer_info; |
3013 | u32 info = 0, tu_cmd = 0; | 3419 | u32 info = 0, tu_cmd = 0; |
3014 | u32 mss_l4len_idx, l4len; | 3420 | u32 mss_l4len_idx; |
3015 | *hdr_len = 0; | 3421 | u8 l4len; |
3016 | 3422 | ||
3017 | if (skb_header_cloned(skb)) { | 3423 | if (skb_header_cloned(skb)) { |
3018 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 3424 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
@@ -3031,7 +3437,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, | |||
3031 | iph->daddr, 0, | 3437 | iph->daddr, 0, |
3032 | IPPROTO_TCP, | 3438 | IPPROTO_TCP, |
3033 | 0); | 3439 | 0); |
3034 | } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { | 3440 | } else if (skb_is_gso_v6(skb)) { |
3035 | ipv6_hdr(skb)->payload_len = 0; | 3441 | ipv6_hdr(skb)->payload_len = 0; |
3036 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 3442 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3037 | &ipv6_hdr(skb)->daddr, | 3443 | &ipv6_hdr(skb)->daddr, |
@@ -3065,8 +3471,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, | |||
3065 | mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); | 3471 | mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); |
3066 | 3472 | ||
3067 | /* For 82575, context index must be unique per ring. */ | 3473 | /* For 82575, context index must be unique per ring. */ |
3068 | if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) | 3474 | if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) |
3069 | mss_l4len_idx |= tx_ring->queue_index << 4; | 3475 | mss_l4len_idx |= tx_ring->reg_idx << 4; |
3070 | 3476 | ||
3071 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | 3477 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
3072 | context_desc->seqnum_seed = 0; | 3478 | context_desc->seqnum_seed = 0; |
@@ -3083,14 +3489,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, | |||
3083 | return true; | 3489 | return true; |
3084 | } | 3490 | } |
3085 | 3491 | ||
3086 | static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | 3492 | static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, |
3087 | struct igb_ring *tx_ring, | 3493 | struct sk_buff *skb, u32 tx_flags) |
3088 | struct sk_buff *skb, u32 tx_flags) | ||
3089 | { | 3494 | { |
3090 | struct e1000_adv_tx_context_desc *context_desc; | 3495 | struct e1000_adv_tx_context_desc *context_desc; |
3091 | unsigned int i; | 3496 | struct pci_dev *pdev = tx_ring->pdev; |
3092 | struct igb_buffer *buffer_info; | 3497 | struct igb_buffer *buffer_info; |
3093 | u32 info = 0, tu_cmd = 0; | 3498 | u32 info = 0, tu_cmd = 0; |
3499 | unsigned int i; | ||
3094 | 3500 | ||
3095 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || | 3501 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || |
3096 | (tx_flags & IGB_TX_FLAGS_VLAN)) { | 3502 | (tx_flags & IGB_TX_FLAGS_VLAN)) { |
@@ -3100,6 +3506,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3100 | 3506 | ||
3101 | if (tx_flags & IGB_TX_FLAGS_VLAN) | 3507 | if (tx_flags & IGB_TX_FLAGS_VLAN) |
3102 | info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); | 3508 | info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); |
3509 | |||
3103 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); | 3510 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); |
3104 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 3511 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
3105 | info |= skb_network_header_len(skb); | 3512 | info |= skb_network_header_len(skb); |
@@ -3137,7 +3544,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3137 | break; | 3544 | break; |
3138 | default: | 3545 | default: |
3139 | if (unlikely(net_ratelimit())) | 3546 | if (unlikely(net_ratelimit())) |
3140 | dev_warn(&adapter->pdev->dev, | 3547 | dev_warn(&pdev->dev, |
3141 | "partial checksum but proto=%x!\n", | 3548 | "partial checksum but proto=%x!\n", |
3142 | skb->protocol); | 3549 | skb->protocol); |
3143 | break; | 3550 | break; |
@@ -3146,11 +3553,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3146 | 3553 | ||
3147 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); | 3554 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); |
3148 | context_desc->seqnum_seed = 0; | 3555 | context_desc->seqnum_seed = 0; |
3149 | if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) | 3556 | if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) |
3150 | context_desc->mss_l4len_idx = | 3557 | context_desc->mss_l4len_idx = |
3151 | cpu_to_le32(tx_ring->queue_index << 4); | 3558 | cpu_to_le32(tx_ring->reg_idx << 4); |
3152 | else | ||
3153 | context_desc->mss_l4len_idx = 0; | ||
3154 | 3559 | ||
3155 | buffer_info->time_stamp = jiffies; | 3560 | buffer_info->time_stamp = jiffies; |
3156 | buffer_info->next_to_watch = i; | 3561 | buffer_info->next_to_watch = i; |
@@ -3169,36 +3574,32 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3169 | #define IGB_MAX_TXD_PWR 16 | 3574 | #define IGB_MAX_TXD_PWR 16 |
3170 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) | 3575 | #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) |
3171 | 3576 | ||
3172 | static inline int igb_tx_map_adv(struct igb_adapter *adapter, | 3577 | static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, |
3173 | struct igb_ring *tx_ring, struct sk_buff *skb, | ||
3174 | unsigned int first) | 3578 | unsigned int first) |
3175 | { | 3579 | { |
3176 | struct igb_buffer *buffer_info; | 3580 | struct igb_buffer *buffer_info; |
3581 | struct pci_dev *pdev = tx_ring->pdev; | ||
3177 | unsigned int len = skb_headlen(skb); | 3582 | unsigned int len = skb_headlen(skb); |
3178 | unsigned int count = 0, i; | 3583 | unsigned int count = 0, i; |
3179 | unsigned int f; | 3584 | unsigned int f; |
3180 | dma_addr_t *map; | ||
3181 | 3585 | ||
3182 | i = tx_ring->next_to_use; | 3586 | i = tx_ring->next_to_use; |
3183 | 3587 | ||
3184 | if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { | ||
3185 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | ||
3186 | return 0; | ||
3187 | } | ||
3188 | |||
3189 | map = skb_shinfo(skb)->dma_maps; | ||
3190 | |||
3191 | buffer_info = &tx_ring->buffer_info[i]; | 3588 | buffer_info = &tx_ring->buffer_info[i]; |
3192 | BUG_ON(len >= IGB_MAX_DATA_PER_TXD); | 3589 | BUG_ON(len >= IGB_MAX_DATA_PER_TXD); |
3193 | buffer_info->length = len; | 3590 | buffer_info->length = len; |
3194 | /* set time_stamp *before* dma to help avoid a possible race */ | 3591 | /* set time_stamp *before* dma to help avoid a possible race */ |
3195 | buffer_info->time_stamp = jiffies; | 3592 | buffer_info->time_stamp = jiffies; |
3196 | buffer_info->next_to_watch = i; | 3593 | buffer_info->next_to_watch = i; |
3197 | buffer_info->dma = skb_shinfo(skb)->dma_head; | 3594 | buffer_info->dma = pci_map_single(pdev, skb->data, len, |
3595 | PCI_DMA_TODEVICE); | ||
3596 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3597 | goto dma_error; | ||
3198 | 3598 | ||
3199 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 3599 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
3200 | struct skb_frag_struct *frag; | 3600 | struct skb_frag_struct *frag; |
3201 | 3601 | ||
3602 | count++; | ||
3202 | i++; | 3603 | i++; |
3203 | if (i == tx_ring->count) | 3604 | if (i == tx_ring->count) |
3204 | i = 0; | 3605 | i = 0; |
@@ -3211,25 +3612,53 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, | |||
3211 | buffer_info->length = len; | 3612 | buffer_info->length = len; |
3212 | buffer_info->time_stamp = jiffies; | 3613 | buffer_info->time_stamp = jiffies; |
3213 | buffer_info->next_to_watch = i; | 3614 | buffer_info->next_to_watch = i; |
3214 | buffer_info->dma = map[count]; | 3615 | buffer_info->mapped_as_page = true; |
3215 | count++; | 3616 | buffer_info->dma = pci_map_page(pdev, |
3617 | frag->page, | ||
3618 | frag->page_offset, | ||
3619 | len, | ||
3620 | PCI_DMA_TODEVICE); | ||
3621 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) | ||
3622 | goto dma_error; | ||
3623 | |||
3216 | } | 3624 | } |
3217 | 3625 | ||
3218 | tx_ring->buffer_info[i].skb = skb; | 3626 | tx_ring->buffer_info[i].skb = skb; |
3627 | tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1; | ||
3219 | tx_ring->buffer_info[first].next_to_watch = i; | 3628 | tx_ring->buffer_info[first].next_to_watch = i; |
3220 | 3629 | ||
3221 | return count + 1; | 3630 | return ++count; |
3631 | |||
3632 | dma_error: | ||
3633 | dev_err(&pdev->dev, "TX DMA map failed\n"); | ||
3634 | |||
3635 | /* clear timestamp and dma mappings for failed buffer_info mapping */ | ||
3636 | buffer_info->dma = 0; | ||
3637 | buffer_info->time_stamp = 0; | ||
3638 | buffer_info->length = 0; | ||
3639 | buffer_info->next_to_watch = 0; | ||
3640 | buffer_info->mapped_as_page = false; | ||
3641 | |||
3642 | /* clear timestamp and dma mappings for remaining portion of packet */ | ||
3643 | while (count--) { | ||
3644 | if (i == 0) | ||
3645 | i = tx_ring->count; | ||
3646 | i--; | ||
3647 | buffer_info = &tx_ring->buffer_info[i]; | ||
3648 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); | ||
3649 | } | ||
3650 | |||
3651 | return 0; | ||
3222 | } | 3652 | } |
3223 | 3653 | ||
3224 | static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | 3654 | static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, |
3225 | struct igb_ring *tx_ring, | 3655 | u32 tx_flags, int count, u32 paylen, |
3226 | int tx_flags, int count, u32 paylen, | ||
3227 | u8 hdr_len) | 3656 | u8 hdr_len) |
3228 | { | 3657 | { |
3229 | union e1000_adv_tx_desc *tx_desc = NULL; | 3658 | union e1000_adv_tx_desc *tx_desc; |
3230 | struct igb_buffer *buffer_info; | 3659 | struct igb_buffer *buffer_info; |
3231 | u32 olinfo_status = 0, cmd_type_len; | 3660 | u32 olinfo_status = 0, cmd_type_len; |
3232 | unsigned int i; | 3661 | unsigned int i = tx_ring->next_to_use; |
3233 | 3662 | ||
3234 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | | 3663 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | |
3235 | E1000_ADVTXD_DCMD_DEXT); | 3664 | E1000_ADVTXD_DCMD_DEXT); |
@@ -3254,27 +3683,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | |||
3254 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; | 3683 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; |
3255 | } | 3684 | } |
3256 | 3685 | ||
3257 | if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && | 3686 | if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && |
3258 | (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | | 3687 | (tx_flags & (IGB_TX_FLAGS_CSUM | |
3688 | IGB_TX_FLAGS_TSO | | ||
3259 | IGB_TX_FLAGS_VLAN))) | 3689 | IGB_TX_FLAGS_VLAN))) |
3260 | olinfo_status |= tx_ring->queue_index << 4; | 3690 | olinfo_status |= tx_ring->reg_idx << 4; |
3261 | 3691 | ||
3262 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); | 3692 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); |
3263 | 3693 | ||
3264 | i = tx_ring->next_to_use; | 3694 | do { |
3265 | while (count--) { | ||
3266 | buffer_info = &tx_ring->buffer_info[i]; | 3695 | buffer_info = &tx_ring->buffer_info[i]; |
3267 | tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); | 3696 | tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); |
3268 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 3697 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
3269 | tx_desc->read.cmd_type_len = | 3698 | tx_desc->read.cmd_type_len = |
3270 | cpu_to_le32(cmd_type_len | buffer_info->length); | 3699 | cpu_to_le32(cmd_type_len | buffer_info->length); |
3271 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | 3700 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
3701 | count--; | ||
3272 | i++; | 3702 | i++; |
3273 | if (i == tx_ring->count) | 3703 | if (i == tx_ring->count) |
3274 | i = 0; | 3704 | i = 0; |
3275 | } | 3705 | } while (count > 0); |
3276 | 3706 | ||
3277 | tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); | 3707 | tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); |
3278 | /* Force memory writes to complete before letting h/w | 3708 | /* Force memory writes to complete before letting h/w |
3279 | * know there are new descriptors to fetch. (Only | 3709 | * know there are new descriptors to fetch. (Only |
3280 | * applicable for weak-ordered memory model archs, | 3710 | * applicable for weak-ordered memory model archs, |
@@ -3282,16 +3712,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | |||
3282 | wmb(); | 3712 | wmb(); |
3283 | 3713 | ||
3284 | tx_ring->next_to_use = i; | 3714 | tx_ring->next_to_use = i; |
3285 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 3715 | writel(i, tx_ring->tail); |
3286 | /* we need this if more than one processor can write to our tail | 3716 | /* we need this if more than one processor can write to our tail |
3287 | * at a time, it syncronizes IO on IA64/Altix systems */ | 3717 | * at a time, it syncronizes IO on IA64/Altix systems */ |
3288 | mmiowb(); | 3718 | mmiowb(); |
3289 | } | 3719 | } |
3290 | 3720 | ||
3291 | static int __igb_maybe_stop_tx(struct net_device *netdev, | 3721 | static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) |
3292 | struct igb_ring *tx_ring, int size) | ||
3293 | { | 3722 | { |
3294 | struct igb_adapter *adapter = netdev_priv(netdev); | 3723 | struct net_device *netdev = tx_ring->netdev; |
3295 | 3724 | ||
3296 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 3725 | netif_stop_subqueue(netdev, tx_ring->queue_index); |
3297 | 3726 | ||
@@ -3307,66 +3736,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, | |||
3307 | 3736 | ||
3308 | /* A reprieve! */ | 3737 | /* A reprieve! */ |
3309 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 3738 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
3310 | ++adapter->restart_queue; | 3739 | tx_ring->tx_stats.restart_queue++; |
3311 | return 0; | 3740 | return 0; |
3312 | } | 3741 | } |
3313 | 3742 | ||
3314 | static int igb_maybe_stop_tx(struct net_device *netdev, | 3743 | static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) |
3315 | struct igb_ring *tx_ring, int size) | ||
3316 | { | 3744 | { |
3317 | if (igb_desc_unused(tx_ring) >= size) | 3745 | if (igb_desc_unused(tx_ring) >= size) |
3318 | return 0; | 3746 | return 0; |
3319 | return __igb_maybe_stop_tx(netdev, tx_ring, size); | 3747 | return __igb_maybe_stop_tx(tx_ring, size); |
3320 | } | 3748 | } |
3321 | 3749 | ||
3322 | static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | 3750 | netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, |
3323 | struct net_device *netdev, | 3751 | struct igb_ring *tx_ring) |
3324 | struct igb_ring *tx_ring) | ||
3325 | { | 3752 | { |
3326 | struct igb_adapter *adapter = netdev_priv(netdev); | 3753 | struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); |
3327 | unsigned int first; | 3754 | int tso = 0, count; |
3328 | unsigned int tx_flags = 0; | 3755 | u32 tx_flags = 0; |
3756 | u16 first; | ||
3329 | u8 hdr_len = 0; | 3757 | u8 hdr_len = 0; |
3330 | int count = 0; | 3758 | union skb_shared_tx *shtx = skb_tx(skb); |
3331 | int tso = 0; | ||
3332 | union skb_shared_tx *shtx; | ||
3333 | |||
3334 | if (test_bit(__IGB_DOWN, &adapter->state)) { | ||
3335 | dev_kfree_skb_any(skb); | ||
3336 | return NETDEV_TX_OK; | ||
3337 | } | ||
3338 | |||
3339 | if (skb->len <= 0) { | ||
3340 | dev_kfree_skb_any(skb); | ||
3341 | return NETDEV_TX_OK; | ||
3342 | } | ||
3343 | 3759 | ||
3344 | /* need: 1 descriptor per page, | 3760 | /* need: 1 descriptor per page, |
3345 | * + 2 desc gap to keep tail from touching head, | 3761 | * + 2 desc gap to keep tail from touching head, |
3346 | * + 1 desc for skb->data, | 3762 | * + 1 desc for skb->data, |
3347 | * + 1 desc for context descriptor, | 3763 | * + 1 desc for context descriptor, |
3348 | * otherwise try next time */ | 3764 | * otherwise try next time */ |
3349 | if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { | 3765 | if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { |
3350 | /* this is a hard error */ | 3766 | /* this is a hard error */ |
3351 | return NETDEV_TX_BUSY; | 3767 | return NETDEV_TX_BUSY; |
3352 | } | 3768 | } |
3353 | 3769 | ||
3354 | /* | ||
3355 | * TODO: check that there currently is no other packet with | ||
3356 | * time stamping in the queue | ||
3357 | * | ||
3358 | * When doing time stamping, keep the connection to the socket | ||
3359 | * a while longer: it is still needed by skb_hwtstamp_tx(), | ||
3360 | * called either in igb_tx_hwtstamp() or by our caller when | ||
3361 | * doing software time stamping. | ||
3362 | */ | ||
3363 | shtx = skb_tx(skb); | ||
3364 | if (unlikely(shtx->hardware)) { | 3770 | if (unlikely(shtx->hardware)) { |
3365 | shtx->in_progress = 1; | 3771 | shtx->in_progress = 1; |
3366 | tx_flags |= IGB_TX_FLAGS_TSTAMP; | 3772 | tx_flags |= IGB_TX_FLAGS_TSTAMP; |
3367 | } | 3773 | } |
3368 | 3774 | ||
3369 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 3775 | if (vlan_tx_tag_present(skb) && adapter->vlgrp) { |
3370 | tx_flags |= IGB_TX_FLAGS_VLAN; | 3776 | tx_flags |= IGB_TX_FLAGS_VLAN; |
3371 | tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); | 3777 | tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); |
3372 | } | 3778 | } |
@@ -3375,37 +3781,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
3375 | tx_flags |= IGB_TX_FLAGS_IPV4; | 3781 | tx_flags |= IGB_TX_FLAGS_IPV4; |
3376 | 3782 | ||
3377 | first = tx_ring->next_to_use; | 3783 | first = tx_ring->next_to_use; |
3378 | tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, | 3784 | if (skb_is_gso(skb)) { |
3379 | &hdr_len) : 0; | 3785 | tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); |
3380 | 3786 | ||
3381 | if (tso < 0) { | 3787 | if (tso < 0) { |
3382 | dev_kfree_skb_any(skb); | 3788 | dev_kfree_skb_any(skb); |
3383 | return NETDEV_TX_OK; | 3789 | return NETDEV_TX_OK; |
3790 | } | ||
3384 | } | 3791 | } |
3385 | 3792 | ||
3386 | if (tso) | 3793 | if (tso) |
3387 | tx_flags |= IGB_TX_FLAGS_TSO; | 3794 | tx_flags |= IGB_TX_FLAGS_TSO; |
3388 | else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && | 3795 | else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && |
3389 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 3796 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
3390 | tx_flags |= IGB_TX_FLAGS_CSUM; | 3797 | tx_flags |= IGB_TX_FLAGS_CSUM; |
3391 | 3798 | ||
3392 | /* | 3799 | /* |
3393 | * count reflects descriptors mapped, if 0 then mapping error | 3800 | * count reflects descriptors mapped, if 0 or less then mapping error |
3394 | * has occured and we need to rewind the descriptor queue | 3801 | * has occured and we need to rewind the descriptor queue |
3395 | */ | 3802 | */ |
3396 | count = igb_tx_map_adv(adapter, tx_ring, skb, first); | 3803 | count = igb_tx_map_adv(tx_ring, skb, first); |
3397 | 3804 | if (!count) { | |
3398 | if (count) { | ||
3399 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, | ||
3400 | skb->len, hdr_len); | ||
3401 | /* Make sure there is space in the ring for the next send. */ | ||
3402 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); | ||
3403 | } else { | ||
3404 | dev_kfree_skb_any(skb); | 3805 | dev_kfree_skb_any(skb); |
3405 | tx_ring->buffer_info[first].time_stamp = 0; | 3806 | tx_ring->buffer_info[first].time_stamp = 0; |
3406 | tx_ring->next_to_use = first; | 3807 | tx_ring->next_to_use = first; |
3808 | return NETDEV_TX_OK; | ||
3407 | } | 3809 | } |
3408 | 3810 | ||
3811 | igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); | ||
3812 | |||
3813 | /* Make sure there is space in the ring for the next send. */ | ||
3814 | igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); | ||
3815 | |||
3409 | return NETDEV_TX_OK; | 3816 | return NETDEV_TX_OK; |
3410 | } | 3817 | } |
3411 | 3818 | ||
@@ -3414,8 +3821,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, | |||
3414 | { | 3821 | { |
3415 | struct igb_adapter *adapter = netdev_priv(netdev); | 3822 | struct igb_adapter *adapter = netdev_priv(netdev); |
3416 | struct igb_ring *tx_ring; | 3823 | struct igb_ring *tx_ring; |
3417 | |||
3418 | int r_idx = 0; | 3824 | int r_idx = 0; |
3825 | |||
3826 | if (test_bit(__IGB_DOWN, &adapter->state)) { | ||
3827 | dev_kfree_skb_any(skb); | ||
3828 | return NETDEV_TX_OK; | ||
3829 | } | ||
3830 | |||
3831 | if (skb->len <= 0) { | ||
3832 | dev_kfree_skb_any(skb); | ||
3833 | return NETDEV_TX_OK; | ||
3834 | } | ||
3835 | |||
3419 | r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); | 3836 | r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); |
3420 | tx_ring = adapter->multi_tx_table[r_idx]; | 3837 | tx_ring = adapter->multi_tx_table[r_idx]; |
3421 | 3838 | ||
@@ -3423,7 +3840,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, | |||
3423 | * to a flow. Right now, performance is impacted slightly negatively | 3840 | * to a flow. Right now, performance is impacted slightly negatively |
3424 | * if using multiple tx queues. If the stack breaks away from a | 3841 | * if using multiple tx queues. If the stack breaks away from a |
3425 | * single qdisc implementation, we can look at this again. */ | 3842 | * single qdisc implementation, we can look at this again. */ |
3426 | return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); | 3843 | return igb_xmit_frame_ring_adv(skb, tx_ring); |
3427 | } | 3844 | } |
3428 | 3845 | ||
3429 | /** | 3846 | /** |
@@ -3437,6 +3854,10 @@ static void igb_tx_timeout(struct net_device *netdev) | |||
3437 | 3854 | ||
3438 | /* Do the reset outside of interrupt context */ | 3855 | /* Do the reset outside of interrupt context */ |
3439 | adapter->tx_timeout_count++; | 3856 | adapter->tx_timeout_count++; |
3857 | |||
3858 | if (hw->mac.type == e1000_82580) | ||
3859 | hw->dev_spec._82575.global_device_reset = true; | ||
3860 | |||
3440 | schedule_work(&adapter->reset_task); | 3861 | schedule_work(&adapter->reset_task); |
3441 | wr32(E1000_EICS, | 3862 | wr32(E1000_EICS, |
3442 | (adapter->eims_enable_mask & ~adapter->eims_other)); | 3863 | (adapter->eims_enable_mask & ~adapter->eims_other)); |
@@ -3459,10 +3880,8 @@ static void igb_reset_task(struct work_struct *work) | |||
3459 | **/ | 3880 | **/ |
3460 | static struct net_device_stats *igb_get_stats(struct net_device *netdev) | 3881 | static struct net_device_stats *igb_get_stats(struct net_device *netdev) |
3461 | { | 3882 | { |
3462 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
3463 | |||
3464 | /* only return the current stats */ | 3883 | /* only return the current stats */ |
3465 | return &adapter->net_stats; | 3884 | return &netdev->stats; |
3466 | } | 3885 | } |
3467 | 3886 | ||
3468 | /** | 3887 | /** |
@@ -3475,16 +3894,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev) | |||
3475 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) | 3894 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) |
3476 | { | 3895 | { |
3477 | struct igb_adapter *adapter = netdev_priv(netdev); | 3896 | struct igb_adapter *adapter = netdev_priv(netdev); |
3897 | struct pci_dev *pdev = adapter->pdev; | ||
3478 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 3898 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
3899 | u32 rx_buffer_len, i; | ||
3479 | 3900 | ||
3480 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | 3901 | if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3481 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3902 | dev_err(&pdev->dev, "Invalid MTU setting\n"); |
3482 | dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); | ||
3483 | return -EINVAL; | 3903 | return -EINVAL; |
3484 | } | 3904 | } |
3485 | 3905 | ||
3486 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | 3906 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
3487 | dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); | 3907 | dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); |
3488 | return -EINVAL; | 3908 | return -EINVAL; |
3489 | } | 3909 | } |
3490 | 3910 | ||
@@ -3493,8 +3913,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3493 | 3913 | ||
3494 | /* igb_down has a dependency on max_frame_size */ | 3914 | /* igb_down has a dependency on max_frame_size */ |
3495 | adapter->max_frame_size = max_frame; | 3915 | adapter->max_frame_size = max_frame; |
3496 | if (netif_running(netdev)) | ||
3497 | igb_down(adapter); | ||
3498 | 3916 | ||
3499 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3917 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3500 | * means we reserve 2 more, this pushes us to allocate from the next | 3918 | * means we reserve 2 more, this pushes us to allocate from the next |
@@ -3502,35 +3920,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3502 | * i.e. RXBUFFER_2048 --> size-4096 slab | 3920 | * i.e. RXBUFFER_2048 --> size-4096 slab |
3503 | */ | 3921 | */ |
3504 | 3922 | ||
3505 | if (max_frame <= IGB_RXBUFFER_256) | 3923 | if (max_frame <= IGB_RXBUFFER_1024) |
3506 | adapter->rx_buffer_len = IGB_RXBUFFER_256; | 3924 | rx_buffer_len = IGB_RXBUFFER_1024; |
3507 | else if (max_frame <= IGB_RXBUFFER_512) | 3925 | else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) |
3508 | adapter->rx_buffer_len = IGB_RXBUFFER_512; | 3926 | rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
3509 | else if (max_frame <= IGB_RXBUFFER_1024) | ||
3510 | adapter->rx_buffer_len = IGB_RXBUFFER_1024; | ||
3511 | else if (max_frame <= IGB_RXBUFFER_2048) | ||
3512 | adapter->rx_buffer_len = IGB_RXBUFFER_2048; | ||
3513 | else | 3927 | else |
3514 | #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 | 3928 | rx_buffer_len = IGB_RXBUFFER_128; |
3515 | adapter->rx_buffer_len = IGB_RXBUFFER_16384; | ||
3516 | #else | ||
3517 | adapter->rx_buffer_len = PAGE_SIZE / 2; | ||
3518 | #endif | ||
3519 | 3929 | ||
3520 | /* if sr-iov is enabled we need to force buffer size to 1K or larger */ | 3930 | if (netif_running(netdev)) |
3521 | if (adapter->vfs_allocated_count && | 3931 | igb_down(adapter); |
3522 | (adapter->rx_buffer_len < IGB_RXBUFFER_1024)) | ||
3523 | adapter->rx_buffer_len = IGB_RXBUFFER_1024; | ||
3524 | |||
3525 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | ||
3526 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | ||
3527 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) | ||
3528 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | ||
3529 | 3932 | ||
3530 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", | 3933 | dev_info(&pdev->dev, "changing MTU from %d to %d\n", |
3531 | netdev->mtu, new_mtu); | 3934 | netdev->mtu, new_mtu); |
3532 | netdev->mtu = new_mtu; | 3935 | netdev->mtu = new_mtu; |
3533 | 3936 | ||
3937 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3938 | adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len; | ||
3939 | |||
3534 | if (netif_running(netdev)) | 3940 | if (netif_running(netdev)) |
3535 | igb_up(adapter); | 3941 | igb_up(adapter); |
3536 | else | 3942 | else |
@@ -3548,9 +3954,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3548 | 3954 | ||
3549 | void igb_update_stats(struct igb_adapter *adapter) | 3955 | void igb_update_stats(struct igb_adapter *adapter) |
3550 | { | 3956 | { |
3957 | struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); | ||
3551 | struct e1000_hw *hw = &adapter->hw; | 3958 | struct e1000_hw *hw = &adapter->hw; |
3552 | struct pci_dev *pdev = adapter->pdev; | 3959 | struct pci_dev *pdev = adapter->pdev; |
3960 | u32 reg, mpc; | ||
3553 | u16 phy_tmp; | 3961 | u16 phy_tmp; |
3962 | int i; | ||
3963 | u64 bytes, packets; | ||
3554 | 3964 | ||
3555 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | 3965 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
3556 | 3966 | ||
@@ -3563,6 +3973,31 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3563 | if (pci_channel_offline(pdev)) | 3973 | if (pci_channel_offline(pdev)) |
3564 | return; | 3974 | return; |
3565 | 3975 | ||
3976 | bytes = 0; | ||
3977 | packets = 0; | ||
3978 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
3979 | u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; | ||
3980 | struct igb_ring *ring = adapter->rx_ring[i]; | ||
3981 | ring->rx_stats.drops += rqdpc_tmp; | ||
3982 | net_stats->rx_fifo_errors += rqdpc_tmp; | ||
3983 | bytes += ring->rx_stats.bytes; | ||
3984 | packets += ring->rx_stats.packets; | ||
3985 | } | ||
3986 | |||
3987 | net_stats->rx_bytes = bytes; | ||
3988 | net_stats->rx_packets = packets; | ||
3989 | |||
3990 | bytes = 0; | ||
3991 | packets = 0; | ||
3992 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3993 | struct igb_ring *ring = adapter->tx_ring[i]; | ||
3994 | bytes += ring->tx_stats.bytes; | ||
3995 | packets += ring->tx_stats.packets; | ||
3996 | } | ||
3997 | net_stats->tx_bytes = bytes; | ||
3998 | net_stats->tx_packets = packets; | ||
3999 | |||
4000 | /* read stats registers */ | ||
3566 | adapter->stats.crcerrs += rd32(E1000_CRCERRS); | 4001 | adapter->stats.crcerrs += rd32(E1000_CRCERRS); |
3567 | adapter->stats.gprc += rd32(E1000_GPRC); | 4002 | adapter->stats.gprc += rd32(E1000_GPRC); |
3568 | adapter->stats.gorc += rd32(E1000_GORCL); | 4003 | adapter->stats.gorc += rd32(E1000_GORCL); |
@@ -3580,7 +4015,9 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3580 | adapter->stats.symerrs += rd32(E1000_SYMERRS); | 4015 | adapter->stats.symerrs += rd32(E1000_SYMERRS); |
3581 | adapter->stats.sec += rd32(E1000_SEC); | 4016 | adapter->stats.sec += rd32(E1000_SEC); |
3582 | 4017 | ||
3583 | adapter->stats.mpc += rd32(E1000_MPC); | 4018 | mpc = rd32(E1000_MPC); |
4019 | adapter->stats.mpc += mpc; | ||
4020 | net_stats->rx_fifo_errors += mpc; | ||
3584 | adapter->stats.scc += rd32(E1000_SCC); | 4021 | adapter->stats.scc += rd32(E1000_SCC); |
3585 | adapter->stats.ecol += rd32(E1000_ECOL); | 4022 | adapter->stats.ecol += rd32(E1000_ECOL); |
3586 | adapter->stats.mcc += rd32(E1000_MCC); | 4023 | adapter->stats.mcc += rd32(E1000_MCC); |
@@ -3613,16 +4050,17 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3613 | adapter->stats.mptc += rd32(E1000_MPTC); | 4050 | adapter->stats.mptc += rd32(E1000_MPTC); |
3614 | adapter->stats.bptc += rd32(E1000_BPTC); | 4051 | adapter->stats.bptc += rd32(E1000_BPTC); |
3615 | 4052 | ||
3616 | /* used for adaptive IFS */ | 4053 | adapter->stats.tpt += rd32(E1000_TPT); |
3617 | 4054 | adapter->stats.colc += rd32(E1000_COLC); | |
3618 | hw->mac.tx_packet_delta = rd32(E1000_TPT); | ||
3619 | adapter->stats.tpt += hw->mac.tx_packet_delta; | ||
3620 | hw->mac.collision_delta = rd32(E1000_COLC); | ||
3621 | adapter->stats.colc += hw->mac.collision_delta; | ||
3622 | 4055 | ||
3623 | adapter->stats.algnerrc += rd32(E1000_ALGNERRC); | 4056 | adapter->stats.algnerrc += rd32(E1000_ALGNERRC); |
3624 | adapter->stats.rxerrc += rd32(E1000_RXERRC); | 4057 | /* read internal phy specific stats */ |
3625 | adapter->stats.tncrs += rd32(E1000_TNCRS); | 4058 | reg = rd32(E1000_CTRL_EXT); |
4059 | if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { | ||
4060 | adapter->stats.rxerrc += rd32(E1000_RXERRC); | ||
4061 | adapter->stats.tncrs += rd32(E1000_TNCRS); | ||
4062 | } | ||
4063 | |||
3626 | adapter->stats.tsctc += rd32(E1000_TSCTC); | 4064 | adapter->stats.tsctc += rd32(E1000_TSCTC); |
3627 | adapter->stats.tsctfc += rd32(E1000_TSCTFC); | 4065 | adapter->stats.tsctfc += rd32(E1000_TSCTFC); |
3628 | 4066 | ||
@@ -3637,56 +4075,29 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3637 | adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); | 4075 | adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); |
3638 | 4076 | ||
3639 | /* Fill out the OS statistics structure */ | 4077 | /* Fill out the OS statistics structure */ |
3640 | adapter->net_stats.multicast = adapter->stats.mprc; | 4078 | net_stats->multicast = adapter->stats.mprc; |
3641 | adapter->net_stats.collisions = adapter->stats.colc; | 4079 | net_stats->collisions = adapter->stats.colc; |
3642 | 4080 | ||
3643 | /* Rx Errors */ | 4081 | /* Rx Errors */ |
3644 | 4082 | ||
3645 | if (hw->mac.type != e1000_82575) { | ||
3646 | u32 rqdpc_tmp; | ||
3647 | u64 rqdpc_total = 0; | ||
3648 | int i; | ||
3649 | /* Read out drops stats per RX queue. Notice RQDPC (Receive | ||
3650 | * Queue Drop Packet Count) stats only gets incremented, if | ||
3651 | * the DROP_EN but it set (in the SRRCTL register for that | ||
3652 | * queue). If DROP_EN bit is NOT set, then the some what | ||
3653 | * equivalent count is stored in RNBC (not per queue basis). | ||
3654 | * Also note the drop count is due to lack of available | ||
3655 | * descriptors. | ||
3656 | */ | ||
3657 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
3658 | rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF; | ||
3659 | adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; | ||
3660 | rqdpc_total += adapter->rx_ring[i].rx_stats.drops; | ||
3661 | } | ||
3662 | adapter->net_stats.rx_fifo_errors = rqdpc_total; | ||
3663 | } | ||
3664 | |||
3665 | /* Note RNBC (Receive No Buffers Count) is an not an exact | ||
3666 | * drop count as the hardware FIFO might save the day. Thats | ||
3667 | * one of the reason for saving it in rx_fifo_errors, as its | ||
3668 | * potentially not a true drop. | ||
3669 | */ | ||
3670 | adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc; | ||
3671 | |||
3672 | /* RLEC on some newer hardware can be incorrect so build | 4083 | /* RLEC on some newer hardware can be incorrect so build |
3673 | * our own version based on RUC and ROC */ | 4084 | * our own version based on RUC and ROC */ |
3674 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 4085 | net_stats->rx_errors = adapter->stats.rxerrc + |
3675 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 4086 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3676 | adapter->stats.ruc + adapter->stats.roc + | 4087 | adapter->stats.ruc + adapter->stats.roc + |
3677 | adapter->stats.cexterr; | 4088 | adapter->stats.cexterr; |
3678 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + | 4089 | net_stats->rx_length_errors = adapter->stats.ruc + |
3679 | adapter->stats.roc; | 4090 | adapter->stats.roc; |
3680 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 4091 | net_stats->rx_crc_errors = adapter->stats.crcerrs; |
3681 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 4092 | net_stats->rx_frame_errors = adapter->stats.algnerrc; |
3682 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 4093 | net_stats->rx_missed_errors = adapter->stats.mpc; |
3683 | 4094 | ||
3684 | /* Tx Errors */ | 4095 | /* Tx Errors */ |
3685 | adapter->net_stats.tx_errors = adapter->stats.ecol + | 4096 | net_stats->tx_errors = adapter->stats.ecol + |
3686 | adapter->stats.latecol; | 4097 | adapter->stats.latecol; |
3687 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 4098 | net_stats->tx_aborted_errors = adapter->stats.ecol; |
3688 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 4099 | net_stats->tx_window_errors = adapter->stats.latecol; |
3689 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 4100 | net_stats->tx_carrier_errors = adapter->stats.tncrs; |
3690 | 4101 | ||
3691 | /* Tx Dropped needs to be maintained elsewhere */ | 4102 | /* Tx Dropped needs to be maintained elsewhere */ |
3692 | 4103 | ||
@@ -3707,14 +4118,15 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3707 | 4118 | ||
3708 | static irqreturn_t igb_msix_other(int irq, void *data) | 4119 | static irqreturn_t igb_msix_other(int irq, void *data) |
3709 | { | 4120 | { |
3710 | struct net_device *netdev = data; | 4121 | struct igb_adapter *adapter = data; |
3711 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
3712 | struct e1000_hw *hw = &adapter->hw; | 4122 | struct e1000_hw *hw = &adapter->hw; |
3713 | u32 icr = rd32(E1000_ICR); | 4123 | u32 icr = rd32(E1000_ICR); |
3714 | |||
3715 | /* reading ICR causes bit 31 of EICR to be cleared */ | 4124 | /* reading ICR causes bit 31 of EICR to be cleared */ |
3716 | 4125 | ||
3717 | if(icr & E1000_ICR_DOUTSYNC) { | 4126 | if (icr & E1000_ICR_DRSTA) |
4127 | schedule_work(&adapter->reset_task); | ||
4128 | |||
4129 | if (icr & E1000_ICR_DOUTSYNC) { | ||
3718 | /* HW is reporting DMA is out of sync */ | 4130 | /* HW is reporting DMA is out of sync */ |
3719 | adapter->stats.doosync++; | 4131 | adapter->stats.doosync++; |
3720 | } | 4132 | } |
@@ -3730,125 +4142,91 @@ static irqreturn_t igb_msix_other(int irq, void *data) | |||
3730 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 4142 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3731 | } | 4143 | } |
3732 | 4144 | ||
3733 | wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); | 4145 | if (adapter->vfs_allocated_count) |
4146 | wr32(E1000_IMS, E1000_IMS_LSC | | ||
4147 | E1000_IMS_VMMB | | ||
4148 | E1000_IMS_DOUTSYNC); | ||
4149 | else | ||
4150 | wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); | ||
3734 | wr32(E1000_EIMS, adapter->eims_other); | 4151 | wr32(E1000_EIMS, adapter->eims_other); |
3735 | 4152 | ||
3736 | return IRQ_HANDLED; | 4153 | return IRQ_HANDLED; |
3737 | } | 4154 | } |
3738 | 4155 | ||
3739 | static irqreturn_t igb_msix_tx(int irq, void *data) | 4156 | static void igb_write_itr(struct igb_q_vector *q_vector) |
3740 | { | 4157 | { |
3741 | struct igb_ring *tx_ring = data; | 4158 | struct igb_adapter *adapter = q_vector->adapter; |
3742 | struct igb_adapter *adapter = tx_ring->adapter; | 4159 | u32 itr_val = q_vector->itr_val & 0x7FFC; |
3743 | struct e1000_hw *hw = &adapter->hw; | ||
3744 | 4160 | ||
3745 | #ifdef CONFIG_IGB_DCA | 4161 | if (!q_vector->set_itr) |
3746 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 4162 | return; |
3747 | igb_update_tx_dca(tx_ring); | ||
3748 | #endif | ||
3749 | 4163 | ||
3750 | tx_ring->total_bytes = 0; | 4164 | if (!itr_val) |
3751 | tx_ring->total_packets = 0; | 4165 | itr_val = 0x4; |
3752 | 4166 | ||
3753 | /* auto mask will automatically reenable the interrupt when we write | 4167 | if (adapter->hw.mac.type == e1000_82575) |
3754 | * EICS */ | 4168 | itr_val |= itr_val << 16; |
3755 | if (!igb_clean_tx_irq(tx_ring)) | ||
3756 | /* Ring was not completely cleaned, so fire another interrupt */ | ||
3757 | wr32(E1000_EICS, tx_ring->eims_value); | ||
3758 | else | 4169 | else |
3759 | wr32(E1000_EIMS, tx_ring->eims_value); | 4170 | itr_val |= 0x8000000; |
3760 | 4171 | ||
3761 | return IRQ_HANDLED; | 4172 | writel(itr_val, q_vector->itr_register); |
3762 | } | 4173 | q_vector->set_itr = 0; |
3763 | |||
3764 | static void igb_write_itr(struct igb_ring *ring) | ||
3765 | { | ||
3766 | struct e1000_hw *hw = &ring->adapter->hw; | ||
3767 | if ((ring->adapter->itr_setting & 3) && ring->set_itr) { | ||
3768 | switch (hw->mac.type) { | ||
3769 | case e1000_82576: | ||
3770 | wr32(ring->itr_register, ring->itr_val | | ||
3771 | 0x80000000); | ||
3772 | break; | ||
3773 | default: | ||
3774 | wr32(ring->itr_register, ring->itr_val | | ||
3775 | (ring->itr_val << 16)); | ||
3776 | break; | ||
3777 | } | ||
3778 | ring->set_itr = 0; | ||
3779 | } | ||
3780 | } | 4174 | } |
3781 | 4175 | ||
3782 | static irqreturn_t igb_msix_rx(int irq, void *data) | 4176 | static irqreturn_t igb_msix_ring(int irq, void *data) |
3783 | { | 4177 | { |
3784 | struct igb_ring *rx_ring = data; | 4178 | struct igb_q_vector *q_vector = data; |
3785 | 4179 | ||
3786 | /* Write the ITR value calculated at the end of the | 4180 | /* Write the ITR value calculated from the previous interrupt. */ |
3787 | * previous interrupt. | 4181 | igb_write_itr(q_vector); |
3788 | */ | ||
3789 | 4182 | ||
3790 | igb_write_itr(rx_ring); | 4183 | napi_schedule(&q_vector->napi); |
3791 | 4184 | ||
3792 | if (napi_schedule_prep(&rx_ring->napi)) | 4185 | return IRQ_HANDLED; |
3793 | __napi_schedule(&rx_ring->napi); | ||
3794 | |||
3795 | #ifdef CONFIG_IGB_DCA | ||
3796 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | ||
3797 | igb_update_rx_dca(rx_ring); | ||
3798 | #endif | ||
3799 | return IRQ_HANDLED; | ||
3800 | } | 4186 | } |
3801 | 4187 | ||
3802 | #ifdef CONFIG_IGB_DCA | 4188 | #ifdef CONFIG_IGB_DCA |
3803 | static void igb_update_rx_dca(struct igb_ring *rx_ring) | 4189 | static void igb_update_dca(struct igb_q_vector *q_vector) |
3804 | { | 4190 | { |
3805 | u32 dca_rxctrl; | 4191 | struct igb_adapter *adapter = q_vector->adapter; |
3806 | struct igb_adapter *adapter = rx_ring->adapter; | ||
3807 | struct e1000_hw *hw = &adapter->hw; | 4192 | struct e1000_hw *hw = &adapter->hw; |
3808 | int cpu = get_cpu(); | 4193 | int cpu = get_cpu(); |
3809 | int q = rx_ring->reg_idx; | ||
3810 | 4194 | ||
3811 | if (rx_ring->cpu != cpu) { | 4195 | if (q_vector->cpu == cpu) |
3812 | dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | 4196 | goto out_no_update; |
3813 | if (hw->mac.type == e1000_82576) { | 4197 | |
3814 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; | 4198 | if (q_vector->tx_ring) { |
3815 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | 4199 | int q = q_vector->tx_ring->reg_idx; |
3816 | E1000_DCA_RXCTRL_CPUID_SHIFT; | 4200 | u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); |
4201 | if (hw->mac.type == e1000_82575) { | ||
4202 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | ||
4203 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
3817 | } else { | 4204 | } else { |
4205 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; | ||
4206 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
4207 | E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
4208 | } | ||
4209 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
4210 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | ||
4211 | } | ||
4212 | if (q_vector->rx_ring) { | ||
4213 | int q = q_vector->rx_ring->reg_idx; | ||
4214 | u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | ||
4215 | if (hw->mac.type == e1000_82575) { | ||
3818 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; | 4216 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; |
3819 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 4217 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
4218 | } else { | ||
4219 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; | ||
4220 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
4221 | E1000_DCA_RXCTRL_CPUID_SHIFT; | ||
3820 | } | 4222 | } |
3821 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; | 4223 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; |
3822 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; | 4224 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; |
3823 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; | 4225 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; |
3824 | wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); | 4226 | wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); |
3825 | rx_ring->cpu = cpu; | ||
3826 | } | ||
3827 | put_cpu(); | ||
3828 | } | ||
3829 | |||
3830 | static void igb_update_tx_dca(struct igb_ring *tx_ring) | ||
3831 | { | ||
3832 | u32 dca_txctrl; | ||
3833 | struct igb_adapter *adapter = tx_ring->adapter; | ||
3834 | struct e1000_hw *hw = &adapter->hw; | ||
3835 | int cpu = get_cpu(); | ||
3836 | int q = tx_ring->reg_idx; | ||
3837 | |||
3838 | if (tx_ring->cpu != cpu) { | ||
3839 | dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); | ||
3840 | if (hw->mac.type == e1000_82576) { | ||
3841 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; | ||
3842 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
3843 | E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
3844 | } else { | ||
3845 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | ||
3846 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
3847 | } | ||
3848 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
3849 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | ||
3850 | tx_ring->cpu = cpu; | ||
3851 | } | 4227 | } |
4228 | q_vector->cpu = cpu; | ||
4229 | out_no_update: | ||
3852 | put_cpu(); | 4230 | put_cpu(); |
3853 | } | 4231 | } |
3854 | 4232 | ||
@@ -3863,13 +4241,9 @@ static void igb_setup_dca(struct igb_adapter *adapter) | |||
3863 | /* Always use CB2 mode, difference is masked in the CB driver. */ | 4241 | /* Always use CB2 mode, difference is masked in the CB driver. */ |
3864 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); | 4242 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); |
3865 | 4243 | ||
3866 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4244 | for (i = 0; i < adapter->num_q_vectors; i++) { |
3867 | adapter->tx_ring[i].cpu = -1; | 4245 | adapter->q_vector[i]->cpu = -1; |
3868 | igb_update_tx_dca(&adapter->tx_ring[i]); | 4246 | igb_update_dca(adapter->q_vector[i]); |
3869 | } | ||
3870 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
3871 | adapter->rx_ring[i].cpu = -1; | ||
3872 | igb_update_rx_dca(&adapter->rx_ring[i]); | ||
3873 | } | 4247 | } |
3874 | } | 4248 | } |
3875 | 4249 | ||
@@ -3877,6 +4251,7 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
3877 | { | 4251 | { |
3878 | struct net_device *netdev = dev_get_drvdata(dev); | 4252 | struct net_device *netdev = dev_get_drvdata(dev); |
3879 | struct igb_adapter *adapter = netdev_priv(netdev); | 4253 | struct igb_adapter *adapter = netdev_priv(netdev); |
4254 | struct pci_dev *pdev = adapter->pdev; | ||
3880 | struct e1000_hw *hw = &adapter->hw; | 4255 | struct e1000_hw *hw = &adapter->hw; |
3881 | unsigned long event = *(unsigned long *)data; | 4256 | unsigned long event = *(unsigned long *)data; |
3882 | 4257 | ||
@@ -3885,12 +4260,9 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
3885 | /* if already enabled, don't do it again */ | 4260 | /* if already enabled, don't do it again */ |
3886 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 4261 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3887 | break; | 4262 | break; |
3888 | /* Always use CB2 mode, difference is masked | ||
3889 | * in the CB driver. */ | ||
3890 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); | ||
3891 | if (dca_add_requester(dev) == 0) { | 4263 | if (dca_add_requester(dev) == 0) { |
3892 | adapter->flags |= IGB_FLAG_DCA_ENABLED; | 4264 | adapter->flags |= IGB_FLAG_DCA_ENABLED; |
3893 | dev_info(&adapter->pdev->dev, "DCA enabled\n"); | 4265 | dev_info(&pdev->dev, "DCA enabled\n"); |
3894 | igb_setup_dca(adapter); | 4266 | igb_setup_dca(adapter); |
3895 | break; | 4267 | break; |
3896 | } | 4268 | } |
@@ -3898,9 +4270,9 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
3898 | case DCA_PROVIDER_REMOVE: | 4270 | case DCA_PROVIDER_REMOVE: |
3899 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { | 4271 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
3900 | /* without this a class_device is left | 4272 | /* without this a class_device is left |
3901 | * hanging around in the sysfs model */ | 4273 | * hanging around in the sysfs model */ |
3902 | dca_remove_requester(dev); | 4274 | dca_remove_requester(dev); |
3903 | dev_info(&adapter->pdev->dev, "DCA disabled\n"); | 4275 | dev_info(&pdev->dev, "DCA disabled\n"); |
3904 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; | 4276 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; |
3905 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); | 4277 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); |
3906 | } | 4278 | } |
@@ -3930,12 +4302,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter) | |||
3930 | 4302 | ||
3931 | for (i = 0 ; i < adapter->vfs_allocated_count; i++) { | 4303 | for (i = 0 ; i < adapter->vfs_allocated_count; i++) { |
3932 | ping = E1000_PF_CONTROL_MSG; | 4304 | ping = E1000_PF_CONTROL_MSG; |
3933 | if (adapter->vf_data[i].clear_to_send) | 4305 | if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) |
3934 | ping |= E1000_VT_MSGTYPE_CTS; | 4306 | ping |= E1000_VT_MSGTYPE_CTS; |
3935 | igb_write_mbx(hw, &ping, 1, i); | 4307 | igb_write_mbx(hw, &ping, 1, i); |
3936 | } | 4308 | } |
3937 | } | 4309 | } |
3938 | 4310 | ||
4311 | static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | ||
4312 | { | ||
4313 | struct e1000_hw *hw = &adapter->hw; | ||
4314 | u32 vmolr = rd32(E1000_VMOLR(vf)); | ||
4315 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | ||
4316 | |||
4317 | vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | | ||
4318 | IGB_VF_FLAG_MULTI_PROMISC); | ||
4319 | vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); | ||
4320 | |||
4321 | if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { | ||
4322 | vmolr |= E1000_VMOLR_MPME; | ||
4323 | *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; | ||
4324 | } else { | ||
4325 | /* | ||
4326 | * if we have hashes and we are clearing a multicast promisc | ||
4327 | * flag we need to write the hashes to the MTA as this step | ||
4328 | * was previously skipped | ||
4329 | */ | ||
4330 | if (vf_data->num_vf_mc_hashes > 30) { | ||
4331 | vmolr |= E1000_VMOLR_MPME; | ||
4332 | } else if (vf_data->num_vf_mc_hashes) { | ||
4333 | int j; | ||
4334 | vmolr |= E1000_VMOLR_ROMPE; | ||
4335 | for (j = 0; j < vf_data->num_vf_mc_hashes; j++) | ||
4336 | igb_mta_set(hw, vf_data->vf_mc_hashes[j]); | ||
4337 | } | ||
4338 | } | ||
4339 | |||
4340 | wr32(E1000_VMOLR(vf), vmolr); | ||
4341 | |||
4342 | /* there are flags left unprocessed, likely not supported */ | ||
4343 | if (*msgbuf & E1000_VT_MSGINFO_MASK) | ||
4344 | return -EINVAL; | ||
4345 | |||
4346 | return 0; | ||
4347 | |||
4348 | } | ||
4349 | |||
3939 | static int igb_set_vf_multicasts(struct igb_adapter *adapter, | 4350 | static int igb_set_vf_multicasts(struct igb_adapter *adapter, |
3940 | u32 *msgbuf, u32 vf) | 4351 | u32 *msgbuf, u32 vf) |
3941 | { | 4352 | { |
@@ -3944,18 +4355,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter, | |||
3944 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | 4355 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; |
3945 | int i; | 4356 | int i; |
3946 | 4357 | ||
3947 | /* only up to 30 hash values supported */ | 4358 | /* salt away the number of multicast addresses assigned |
3948 | if (n > 30) | ||
3949 | n = 30; | ||
3950 | |||
3951 | /* salt away the number of multi cast addresses assigned | ||
3952 | * to this VF for later use to restore when the PF multi cast | 4359 | * to this VF for later use to restore when the PF multi cast |
3953 | * list changes | 4360 | * list changes |
3954 | */ | 4361 | */ |
3955 | vf_data->num_vf_mc_hashes = n; | 4362 | vf_data->num_vf_mc_hashes = n; |
3956 | 4363 | ||
3957 | /* VFs are limited to using the MTA hash table for their multicast | 4364 | /* only up to 30 hash values supported */ |
3958 | * addresses */ | 4365 | if (n > 30) |
4366 | n = 30; | ||
4367 | |||
4368 | /* store the hashes for later use */ | ||
3959 | for (i = 0; i < n; i++) | 4369 | for (i = 0; i < n; i++) |
3960 | vf_data->vf_mc_hashes[i] = hash_list[i]; | 4370 | vf_data->vf_mc_hashes[i] = hash_list[i]; |
3961 | 4371 | ||
@@ -3972,9 +4382,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) | |||
3972 | int i, j; | 4382 | int i, j; |
3973 | 4383 | ||
3974 | for (i = 0; i < adapter->vfs_allocated_count; i++) { | 4384 | for (i = 0; i < adapter->vfs_allocated_count; i++) { |
4385 | u32 vmolr = rd32(E1000_VMOLR(i)); | ||
4386 | vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); | ||
4387 | |||
3975 | vf_data = &adapter->vf_data[i]; | 4388 | vf_data = &adapter->vf_data[i]; |
3976 | for (j = 0; j < vf_data->num_vf_mc_hashes; j++) | 4389 | |
3977 | igb_mta_set(hw, vf_data->vf_mc_hashes[j]); | 4390 | if ((vf_data->num_vf_mc_hashes > 30) || |
4391 | (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { | ||
4392 | vmolr |= E1000_VMOLR_MPME; | ||
4393 | } else if (vf_data->num_vf_mc_hashes) { | ||
4394 | vmolr |= E1000_VMOLR_ROMPE; | ||
4395 | for (j = 0; j < vf_data->num_vf_mc_hashes; j++) | ||
4396 | igb_mta_set(hw, vf_data->vf_mc_hashes[j]); | ||
4397 | } | ||
4398 | wr32(E1000_VMOLR(i), vmolr); | ||
3978 | } | 4399 | } |
3979 | } | 4400 | } |
3980 | 4401 | ||
@@ -4012,7 +4433,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) | |||
4012 | struct e1000_hw *hw = &adapter->hw; | 4433 | struct e1000_hw *hw = &adapter->hw; |
4013 | u32 reg, i; | 4434 | u32 reg, i; |
4014 | 4435 | ||
4015 | /* It is an error to call this function when VFs are not enabled */ | 4436 | /* The vlvf table only exists on 82576 hardware and newer */ |
4437 | if (hw->mac.type < e1000_82576) | ||
4438 | return -1; | ||
4439 | |||
4440 | /* we only need to do this if VMDq is enabled */ | ||
4016 | if (!adapter->vfs_allocated_count) | 4441 | if (!adapter->vfs_allocated_count) |
4017 | return -1; | 4442 | return -1; |
4018 | 4443 | ||
@@ -4042,16 +4467,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) | |||
4042 | 4467 | ||
4043 | /* if !enabled we need to set this up in vfta */ | 4468 | /* if !enabled we need to set this up in vfta */ |
4044 | if (!(reg & E1000_VLVF_VLANID_ENABLE)) { | 4469 | if (!(reg & E1000_VLVF_VLANID_ENABLE)) { |
4045 | /* add VID to filter table, if bit already set | 4470 | /* add VID to filter table */ |
4046 | * PF must have added it outside of table */ | 4471 | igb_vfta_set(hw, vid, true); |
4047 | if (igb_vfta_set(hw, vid, true)) | ||
4048 | reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + | ||
4049 | adapter->vfs_allocated_count); | ||
4050 | reg |= E1000_VLVF_VLANID_ENABLE; | 4472 | reg |= E1000_VLVF_VLANID_ENABLE; |
4051 | } | 4473 | } |
4052 | reg &= ~E1000_VLVF_VLANID_MASK; | 4474 | reg &= ~E1000_VLVF_VLANID_MASK; |
4053 | reg |= vid; | 4475 | reg |= vid; |
4054 | |||
4055 | wr32(E1000_VLVF(i), reg); | 4476 | wr32(E1000_VLVF(i), reg); |
4056 | 4477 | ||
4057 | /* do not modify RLPML for PF devices */ | 4478 | /* do not modify RLPML for PF devices */ |
@@ -4067,8 +4488,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) | |||
4067 | reg |= size; | 4488 | reg |= size; |
4068 | wr32(E1000_VMOLR(vf), reg); | 4489 | wr32(E1000_VMOLR(vf), reg); |
4069 | } | 4490 | } |
4070 | adapter->vf_data[vf].vlans_enabled++; | ||
4071 | 4491 | ||
4492 | adapter->vf_data[vf].vlans_enabled++; | ||
4072 | return 0; | 4493 | return 0; |
4073 | } | 4494 | } |
4074 | } else { | 4495 | } else { |
@@ -4096,10 +4517,57 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) | |||
4096 | reg |= size; | 4517 | reg |= size; |
4097 | wr32(E1000_VMOLR(vf), reg); | 4518 | wr32(E1000_VMOLR(vf), reg); |
4098 | } | 4519 | } |
4099 | return 0; | ||
4100 | } | 4520 | } |
4101 | } | 4521 | } |
4102 | return -1; | 4522 | return 0; |
4523 | } | ||
4524 | |||
4525 | static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) | ||
4526 | { | ||
4527 | struct e1000_hw *hw = &adapter->hw; | ||
4528 | |||
4529 | if (vid) | ||
4530 | wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); | ||
4531 | else | ||
4532 | wr32(E1000_VMVIR(vf), 0); | ||
4533 | } | ||
4534 | |||
4535 | static int igb_ndo_set_vf_vlan(struct net_device *netdev, | ||
4536 | int vf, u16 vlan, u8 qos) | ||
4537 | { | ||
4538 | int err = 0; | ||
4539 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
4540 | |||
4541 | if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) | ||
4542 | return -EINVAL; | ||
4543 | if (vlan || qos) { | ||
4544 | err = igb_vlvf_set(adapter, vlan, !!vlan, vf); | ||
4545 | if (err) | ||
4546 | goto out; | ||
4547 | igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); | ||
4548 | igb_set_vmolr(adapter, vf, !vlan); | ||
4549 | adapter->vf_data[vf].pf_vlan = vlan; | ||
4550 | adapter->vf_data[vf].pf_qos = qos; | ||
4551 | dev_info(&adapter->pdev->dev, | ||
4552 | "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); | ||
4553 | if (test_bit(__IGB_DOWN, &adapter->state)) { | ||
4554 | dev_warn(&adapter->pdev->dev, | ||
4555 | "The VF VLAN has been set," | ||
4556 | " but the PF device is not up.\n"); | ||
4557 | dev_warn(&adapter->pdev->dev, | ||
4558 | "Bring the PF device up before" | ||
4559 | " attempting to use the VF device.\n"); | ||
4560 | } | ||
4561 | } else { | ||
4562 | igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, | ||
4563 | false, vf); | ||
4564 | igb_set_vmvir(adapter, vlan, vf); | ||
4565 | igb_set_vmolr(adapter, vf, true); | ||
4566 | adapter->vf_data[vf].pf_vlan = 0; | ||
4567 | adapter->vf_data[vf].pf_qos = 0; | ||
4568 | } | ||
4569 | out: | ||
4570 | return err; | ||
4103 | } | 4571 | } |
4104 | 4572 | ||
4105 | static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | 4573 | static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) |
@@ -4110,18 +4578,23 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | |||
4110 | return igb_vlvf_set(adapter, vid, add, vf); | 4578 | return igb_vlvf_set(adapter, vid, add, vf); |
4111 | } | 4579 | } |
4112 | 4580 | ||
4113 | static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) | 4581 | static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) |
4114 | { | 4582 | { |
4115 | struct e1000_hw *hw = &adapter->hw; | 4583 | /* clear flags */ |
4116 | 4584 | adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC); | |
4117 | /* disable mailbox functionality for vf */ | 4585 | adapter->vf_data[vf].last_nack = jiffies; |
4118 | adapter->vf_data[vf].clear_to_send = false; | ||
4119 | 4586 | ||
4120 | /* reset offloads to defaults */ | 4587 | /* reset offloads to defaults */ |
4121 | igb_set_vmolr(hw, vf); | 4588 | igb_set_vmolr(adapter, vf, true); |
4122 | 4589 | ||
4123 | /* reset vlans for device */ | 4590 | /* reset vlans for device */ |
4124 | igb_clear_vf_vfta(adapter, vf); | 4591 | igb_clear_vf_vfta(adapter, vf); |
4592 | if (adapter->vf_data[vf].pf_vlan) | ||
4593 | igb_ndo_set_vf_vlan(adapter->netdev, vf, | ||
4594 | adapter->vf_data[vf].pf_vlan, | ||
4595 | adapter->vf_data[vf].pf_qos); | ||
4596 | else | ||
4597 | igb_clear_vf_vfta(adapter, vf); | ||
4125 | 4598 | ||
4126 | /* reset multicast table array for vf */ | 4599 | /* reset multicast table array for vf */ |
4127 | adapter->vf_data[vf].num_vf_mc_hashes = 0; | 4600 | adapter->vf_data[vf].num_vf_mc_hashes = 0; |
@@ -4130,7 +4603,19 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) | |||
4130 | igb_set_rx_mode(adapter->netdev); | 4603 | igb_set_rx_mode(adapter->netdev); |
4131 | } | 4604 | } |
4132 | 4605 | ||
4133 | static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | 4606 | static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) |
4607 | { | ||
4608 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; | ||
4609 | |||
4610 | /* generate a new mac address as we were hotplug removed/added */ | ||
4611 | if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) | ||
4612 | random_ether_addr(vf_mac); | ||
4613 | |||
4614 | /* process remaining reset events */ | ||
4615 | igb_vf_reset(adapter, vf); | ||
4616 | } | ||
4617 | |||
4618 | static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | ||
4134 | { | 4619 | { |
4135 | struct e1000_hw *hw = &adapter->hw; | 4620 | struct e1000_hw *hw = &adapter->hw; |
4136 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; | 4621 | unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; |
@@ -4139,11 +4624,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | |||
4139 | u8 *addr = (u8 *)(&msgbuf[1]); | 4624 | u8 *addr = (u8 *)(&msgbuf[1]); |
4140 | 4625 | ||
4141 | /* process all the same items cleared in a function level reset */ | 4626 | /* process all the same items cleared in a function level reset */ |
4142 | igb_vf_reset_event(adapter, vf); | 4627 | igb_vf_reset(adapter, vf); |
4143 | 4628 | ||
4144 | /* set vf mac address */ | 4629 | /* set vf mac address */ |
4145 | igb_rar_set(hw, vf_mac, rar_entry); | 4630 | igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); |
4146 | igb_set_rah_pool(hw, vf, rar_entry); | ||
4147 | 4631 | ||
4148 | /* enable transmit and receive for vf */ | 4632 | /* enable transmit and receive for vf */ |
4149 | reg = rd32(E1000_VFTE); | 4633 | reg = rd32(E1000_VFTE); |
@@ -4151,8 +4635,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | |||
4151 | reg = rd32(E1000_VFRE); | 4635 | reg = rd32(E1000_VFRE); |
4152 | wr32(E1000_VFRE, reg | (1 << vf)); | 4636 | wr32(E1000_VFRE, reg | (1 << vf)); |
4153 | 4637 | ||
4154 | /* enable mailbox functionality for vf */ | 4638 | adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS; |
4155 | adapter->vf_data[vf].clear_to_send = true; | ||
4156 | 4639 | ||
4157 | /* reply to reset with ack and vf mac address */ | 4640 | /* reply to reset with ack and vf mac address */ |
4158 | msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; | 4641 | msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; |
@@ -4162,66 +4645,51 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | |||
4162 | 4645 | ||
4163 | static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) | 4646 | static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) |
4164 | { | 4647 | { |
4165 | unsigned char *addr = (char *)&msg[1]; | 4648 | unsigned char *addr = (char *)&msg[1]; |
4166 | int err = -1; | 4649 | int err = -1; |
4167 | 4650 | ||
4168 | if (is_valid_ether_addr(addr)) | 4651 | if (is_valid_ether_addr(addr)) |
4169 | err = igb_set_vf_mac(adapter, vf, addr); | 4652 | err = igb_set_vf_mac(adapter, vf, addr); |
4170 | |||
4171 | return err; | ||
4172 | 4653 | ||
4654 | return err; | ||
4173 | } | 4655 | } |
4174 | 4656 | ||
4175 | static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) | 4657 | static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) |
4176 | { | 4658 | { |
4177 | struct e1000_hw *hw = &adapter->hw; | 4659 | struct e1000_hw *hw = &adapter->hw; |
4660 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | ||
4178 | u32 msg = E1000_VT_MSGTYPE_NACK; | 4661 | u32 msg = E1000_VT_MSGTYPE_NACK; |
4179 | 4662 | ||
4180 | /* if device isn't clear to send it shouldn't be reading either */ | 4663 | /* if device isn't clear to send it shouldn't be reading either */ |
4181 | if (!adapter->vf_data[vf].clear_to_send) | 4664 | if (!(vf_data->flags & IGB_VF_FLAG_CTS) && |
4665 | time_after(jiffies, vf_data->last_nack + (2 * HZ))) { | ||
4182 | igb_write_mbx(hw, &msg, 1, vf); | 4666 | igb_write_mbx(hw, &msg, 1, vf); |
4183 | } | 4667 | vf_data->last_nack = jiffies; |
4184 | |||
4185 | |||
4186 | static void igb_msg_task(struct igb_adapter *adapter) | ||
4187 | { | ||
4188 | struct e1000_hw *hw = &adapter->hw; | ||
4189 | u32 vf; | ||
4190 | |||
4191 | for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { | ||
4192 | /* process any reset requests */ | ||
4193 | if (!igb_check_for_rst(hw, vf)) { | ||
4194 | adapter->vf_data[vf].clear_to_send = false; | ||
4195 | igb_vf_reset_event(adapter, vf); | ||
4196 | } | ||
4197 | |||
4198 | /* process any messages pending */ | ||
4199 | if (!igb_check_for_msg(hw, vf)) | ||
4200 | igb_rcv_msg_from_vf(adapter, vf); | ||
4201 | |||
4202 | /* process any acks */ | ||
4203 | if (!igb_check_for_ack(hw, vf)) | ||
4204 | igb_rcv_ack_from_vf(adapter, vf); | ||
4205 | |||
4206 | } | 4668 | } |
4207 | } | 4669 | } |
4208 | 4670 | ||
4209 | static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | 4671 | static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) |
4210 | { | 4672 | { |
4211 | u32 mbx_size = E1000_VFMAILBOX_SIZE; | 4673 | struct pci_dev *pdev = adapter->pdev; |
4212 | u32 msgbuf[mbx_size]; | 4674 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; |
4213 | struct e1000_hw *hw = &adapter->hw; | 4675 | struct e1000_hw *hw = &adapter->hw; |
4676 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | ||
4214 | s32 retval; | 4677 | s32 retval; |
4215 | 4678 | ||
4216 | retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); | 4679 | retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); |
4217 | 4680 | ||
4218 | if (retval) | 4681 | if (retval) { |
4219 | dev_err(&adapter->pdev->dev, | 4682 | /* if receive failed revoke VF CTS stats and restart init */ |
4220 | "Error receiving message from VF\n"); | 4683 | dev_err(&pdev->dev, "Error receiving message from VF\n"); |
4684 | vf_data->flags &= ~IGB_VF_FLAG_CTS; | ||
4685 | if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) | ||
4686 | return; | ||
4687 | goto out; | ||
4688 | } | ||
4221 | 4689 | ||
4222 | /* this is a message we already processed, do nothing */ | 4690 | /* this is a message we already processed, do nothing */ |
4223 | if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) | 4691 | if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) |
4224 | return retval; | 4692 | return; |
4225 | 4693 | ||
4226 | /* | 4694 | /* |
4227 | * until the vf completes a reset it should not be | 4695 | * until the vf completes a reset it should not be |
@@ -4230,20 +4698,23 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
4230 | 4698 | ||
4231 | if (msgbuf[0] == E1000_VF_RESET) { | 4699 | if (msgbuf[0] == E1000_VF_RESET) { |
4232 | igb_vf_reset_msg(adapter, vf); | 4700 | igb_vf_reset_msg(adapter, vf); |
4233 | 4701 | return; | |
4234 | return retval; | ||
4235 | } | 4702 | } |
4236 | 4703 | ||
4237 | if (!adapter->vf_data[vf].clear_to_send) { | 4704 | if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { |
4238 | msgbuf[0] |= E1000_VT_MSGTYPE_NACK; | 4705 | if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) |
4239 | igb_write_mbx(hw, msgbuf, 1, vf); | 4706 | return; |
4240 | return retval; | 4707 | retval = -1; |
4708 | goto out; | ||
4241 | } | 4709 | } |
4242 | 4710 | ||
4243 | switch ((msgbuf[0] & 0xFFFF)) { | 4711 | switch ((msgbuf[0] & 0xFFFF)) { |
4244 | case E1000_VF_SET_MAC_ADDR: | 4712 | case E1000_VF_SET_MAC_ADDR: |
4245 | retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); | 4713 | retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); |
4246 | break; | 4714 | break; |
4715 | case E1000_VF_SET_PROMISC: | ||
4716 | retval = igb_set_vf_promisc(adapter, msgbuf, vf); | ||
4717 | break; | ||
4247 | case E1000_VF_SET_MULTICAST: | 4718 | case E1000_VF_SET_MULTICAST: |
4248 | retval = igb_set_vf_multicasts(adapter, msgbuf, vf); | 4719 | retval = igb_set_vf_multicasts(adapter, msgbuf, vf); |
4249 | break; | 4720 | break; |
@@ -4251,25 +4722,73 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
4251 | retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); | 4722 | retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); |
4252 | break; | 4723 | break; |
4253 | case E1000_VF_SET_VLAN: | 4724 | case E1000_VF_SET_VLAN: |
4254 | retval = igb_set_vf_vlan(adapter, msgbuf, vf); | 4725 | if (adapter->vf_data[vf].pf_vlan) |
4726 | retval = -1; | ||
4727 | else | ||
4728 | retval = igb_set_vf_vlan(adapter, msgbuf, vf); | ||
4255 | break; | 4729 | break; |
4256 | default: | 4730 | default: |
4257 | dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); | 4731 | dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); |
4258 | retval = -1; | 4732 | retval = -1; |
4259 | break; | 4733 | break; |
4260 | } | 4734 | } |
4261 | 4735 | ||
4736 | msgbuf[0] |= E1000_VT_MSGTYPE_CTS; | ||
4737 | out: | ||
4262 | /* notify the VF of the results of what it sent us */ | 4738 | /* notify the VF of the results of what it sent us */ |
4263 | if (retval) | 4739 | if (retval) |
4264 | msgbuf[0] |= E1000_VT_MSGTYPE_NACK; | 4740 | msgbuf[0] |= E1000_VT_MSGTYPE_NACK; |
4265 | else | 4741 | else |
4266 | msgbuf[0] |= E1000_VT_MSGTYPE_ACK; | 4742 | msgbuf[0] |= E1000_VT_MSGTYPE_ACK; |
4267 | 4743 | ||
4268 | msgbuf[0] |= E1000_VT_MSGTYPE_CTS; | ||
4269 | |||
4270 | igb_write_mbx(hw, msgbuf, 1, vf); | 4744 | igb_write_mbx(hw, msgbuf, 1, vf); |
4745 | } | ||
4271 | 4746 | ||
4272 | return retval; | 4747 | static void igb_msg_task(struct igb_adapter *adapter) |
4748 | { | ||
4749 | struct e1000_hw *hw = &adapter->hw; | ||
4750 | u32 vf; | ||
4751 | |||
4752 | for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { | ||
4753 | /* process any reset requests */ | ||
4754 | if (!igb_check_for_rst(hw, vf)) | ||
4755 | igb_vf_reset_event(adapter, vf); | ||
4756 | |||
4757 | /* process any messages pending */ | ||
4758 | if (!igb_check_for_msg(hw, vf)) | ||
4759 | igb_rcv_msg_from_vf(adapter, vf); | ||
4760 | |||
4761 | /* process any acks */ | ||
4762 | if (!igb_check_for_ack(hw, vf)) | ||
4763 | igb_rcv_ack_from_vf(adapter, vf); | ||
4764 | } | ||
4765 | } | ||
4766 | |||
4767 | /** | ||
4768 | * igb_set_uta - Set unicast filter table address | ||
4769 | * @adapter: board private structure | ||
4770 | * | ||
4771 | * The unicast table address is a register array of 32-bit registers. | ||
4772 | * The table is meant to be used in a way similar to how the MTA is used | ||
4773 | * however due to certain limitations in the hardware it is necessary to | ||
4774 | * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous | ||
4775 | * enable bit to allow vlan tag stripping when promiscous mode is enabled | ||
4776 | **/ | ||
4777 | static void igb_set_uta(struct igb_adapter *adapter) | ||
4778 | { | ||
4779 | struct e1000_hw *hw = &adapter->hw; | ||
4780 | int i; | ||
4781 | |||
4782 | /* The UTA table only exists on 82576 hardware and newer */ | ||
4783 | if (hw->mac.type < e1000_82576) | ||
4784 | return; | ||
4785 | |||
4786 | /* we only need to do this if VMDq is enabled */ | ||
4787 | if (!adapter->vfs_allocated_count) | ||
4788 | return; | ||
4789 | |||
4790 | for (i = 0; i < hw->mac.uta_reg_count; i++) | ||
4791 | array_wr32(E1000_UTA, i, ~0); | ||
4273 | } | 4792 | } |
4274 | 4793 | ||
4275 | /** | 4794 | /** |
@@ -4279,15 +4798,18 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
4279 | **/ | 4798 | **/ |
4280 | static irqreturn_t igb_intr_msi(int irq, void *data) | 4799 | static irqreturn_t igb_intr_msi(int irq, void *data) |
4281 | { | 4800 | { |
4282 | struct net_device *netdev = data; | 4801 | struct igb_adapter *adapter = data; |
4283 | struct igb_adapter *adapter = netdev_priv(netdev); | 4802 | struct igb_q_vector *q_vector = adapter->q_vector[0]; |
4284 | struct e1000_hw *hw = &adapter->hw; | 4803 | struct e1000_hw *hw = &adapter->hw; |
4285 | /* read ICR disables interrupts using IAM */ | 4804 | /* read ICR disables interrupts using IAM */ |
4286 | u32 icr = rd32(E1000_ICR); | 4805 | u32 icr = rd32(E1000_ICR); |
4287 | 4806 | ||
4288 | igb_write_itr(adapter->rx_ring); | 4807 | igb_write_itr(q_vector); |
4808 | |||
4809 | if (icr & E1000_ICR_DRSTA) | ||
4810 | schedule_work(&adapter->reset_task); | ||
4289 | 4811 | ||
4290 | if(icr & E1000_ICR_DOUTSYNC) { | 4812 | if (icr & E1000_ICR_DOUTSYNC) { |
4291 | /* HW is reporting DMA is out of sync */ | 4813 | /* HW is reporting DMA is out of sync */ |
4292 | adapter->stats.doosync++; | 4814 | adapter->stats.doosync++; |
4293 | } | 4815 | } |
@@ -4298,7 +4820,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
4298 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 4820 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
4299 | } | 4821 | } |
4300 | 4822 | ||
4301 | napi_schedule(&adapter->rx_ring[0].napi); | 4823 | napi_schedule(&q_vector->napi); |
4302 | 4824 | ||
4303 | return IRQ_HANDLED; | 4825 | return IRQ_HANDLED; |
4304 | } | 4826 | } |
@@ -4310,8 +4832,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
4310 | **/ | 4832 | **/ |
4311 | static irqreturn_t igb_intr(int irq, void *data) | 4833 | static irqreturn_t igb_intr(int irq, void *data) |
4312 | { | 4834 | { |
4313 | struct net_device *netdev = data; | 4835 | struct igb_adapter *adapter = data; |
4314 | struct igb_adapter *adapter = netdev_priv(netdev); | 4836 | struct igb_q_vector *q_vector = adapter->q_vector[0]; |
4315 | struct e1000_hw *hw = &adapter->hw; | 4837 | struct e1000_hw *hw = &adapter->hw; |
4316 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | 4838 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No |
4317 | * need for the IMC write */ | 4839 | * need for the IMC write */ |
@@ -4319,14 +4841,17 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
4319 | if (!icr) | 4841 | if (!icr) |
4320 | return IRQ_NONE; /* Not our interrupt */ | 4842 | return IRQ_NONE; /* Not our interrupt */ |
4321 | 4843 | ||
4322 | igb_write_itr(adapter->rx_ring); | 4844 | igb_write_itr(q_vector); |
4323 | 4845 | ||
4324 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | 4846 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
4325 | * not set, then the adapter didn't send an interrupt */ | 4847 | * not set, then the adapter didn't send an interrupt */ |
4326 | if (!(icr & E1000_ICR_INT_ASSERTED)) | 4848 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
4327 | return IRQ_NONE; | 4849 | return IRQ_NONE; |
4328 | 4850 | ||
4329 | if(icr & E1000_ICR_DOUTSYNC) { | 4851 | if (icr & E1000_ICR_DRSTA) |
4852 | schedule_work(&adapter->reset_task); | ||
4853 | |||
4854 | if (icr & E1000_ICR_DOUTSYNC) { | ||
4330 | /* HW is reporting DMA is out of sync */ | 4855 | /* HW is reporting DMA is out of sync */ |
4331 | adapter->stats.doosync++; | 4856 | adapter->stats.doosync++; |
4332 | } | 4857 | } |
@@ -4338,26 +4863,27 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
4338 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 4863 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
4339 | } | 4864 | } |
4340 | 4865 | ||
4341 | napi_schedule(&adapter->rx_ring[0].napi); | 4866 | napi_schedule(&q_vector->napi); |
4342 | 4867 | ||
4343 | return IRQ_HANDLED; | 4868 | return IRQ_HANDLED; |
4344 | } | 4869 | } |
4345 | 4870 | ||
4346 | static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) | 4871 | static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) |
4347 | { | 4872 | { |
4348 | struct igb_adapter *adapter = rx_ring->adapter; | 4873 | struct igb_adapter *adapter = q_vector->adapter; |
4349 | struct e1000_hw *hw = &adapter->hw; | 4874 | struct e1000_hw *hw = &adapter->hw; |
4350 | 4875 | ||
4351 | if (adapter->itr_setting & 3) { | 4876 | if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || |
4352 | if (adapter->num_rx_queues == 1) | 4877 | (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { |
4878 | if (!adapter->msix_entries) | ||
4353 | igb_set_itr(adapter); | 4879 | igb_set_itr(adapter); |
4354 | else | 4880 | else |
4355 | igb_update_ring_itr(rx_ring); | 4881 | igb_update_ring_itr(q_vector); |
4356 | } | 4882 | } |
4357 | 4883 | ||
4358 | if (!test_bit(__IGB_DOWN, &adapter->state)) { | 4884 | if (!test_bit(__IGB_DOWN, &adapter->state)) { |
4359 | if (adapter->msix_entries) | 4885 | if (adapter->msix_entries) |
4360 | wr32(E1000_EIMS, rx_ring->eims_value); | 4886 | wr32(E1000_EIMS, q_vector->eims_value); |
4361 | else | 4887 | else |
4362 | igb_irq_enable(adapter); | 4888 | igb_irq_enable(adapter); |
4363 | } | 4889 | } |
@@ -4370,76 +4896,101 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) | |||
4370 | **/ | 4896 | **/ |
4371 | static int igb_poll(struct napi_struct *napi, int budget) | 4897 | static int igb_poll(struct napi_struct *napi, int budget) |
4372 | { | 4898 | { |
4373 | struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); | 4899 | struct igb_q_vector *q_vector = container_of(napi, |
4374 | int work_done = 0; | 4900 | struct igb_q_vector, |
4901 | napi); | ||
4902 | int tx_clean_complete = 1, work_done = 0; | ||
4375 | 4903 | ||
4376 | #ifdef CONFIG_IGB_DCA | 4904 | #ifdef CONFIG_IGB_DCA |
4377 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | 4905 | if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) |
4378 | igb_update_rx_dca(rx_ring); | 4906 | igb_update_dca(q_vector); |
4379 | #endif | 4907 | #endif |
4380 | igb_clean_rx_irq_adv(rx_ring, &work_done, budget); | 4908 | if (q_vector->tx_ring) |
4909 | tx_clean_complete = igb_clean_tx_irq(q_vector); | ||
4381 | 4910 | ||
4382 | if (rx_ring->buddy) { | 4911 | if (q_vector->rx_ring) |
4383 | #ifdef CONFIG_IGB_DCA | 4912 | igb_clean_rx_irq_adv(q_vector, &work_done, budget); |
4384 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | 4913 | |
4385 | igb_update_tx_dca(rx_ring->buddy); | 4914 | if (!tx_clean_complete) |
4386 | #endif | 4915 | work_done = budget; |
4387 | if (!igb_clean_tx_irq(rx_ring->buddy)) | ||
4388 | work_done = budget; | ||
4389 | } | ||
4390 | 4916 | ||
4391 | /* If not enough Rx work done, exit the polling mode */ | 4917 | /* If not enough Rx work done, exit the polling mode */ |
4392 | if (work_done < budget) { | 4918 | if (work_done < budget) { |
4393 | napi_complete(napi); | 4919 | napi_complete(napi); |
4394 | igb_rx_irq_enable(rx_ring); | 4920 | igb_ring_irq_enable(q_vector); |
4395 | } | 4921 | } |
4396 | 4922 | ||
4397 | return work_done; | 4923 | return work_done; |
4398 | } | 4924 | } |
4399 | 4925 | ||
4400 | /** | 4926 | /** |
4401 | * igb_hwtstamp - utility function which checks for TX time stamp | 4927 | * igb_systim_to_hwtstamp - convert system time value to hw timestamp |
4402 | * @adapter: board private structure | 4928 | * @adapter: board private structure |
4929 | * @shhwtstamps: timestamp structure to update | ||
4930 | * @regval: unsigned 64bit system time value. | ||
4931 | * | ||
4932 | * We need to convert the system time value stored in the RX/TXSTMP registers | ||
4933 | * into a hwtstamp which can be used by the upper level timestamping functions | ||
4934 | */ | ||
4935 | static void igb_systim_to_hwtstamp(struct igb_adapter *adapter, | ||
4936 | struct skb_shared_hwtstamps *shhwtstamps, | ||
4937 | u64 regval) | ||
4938 | { | ||
4939 | u64 ns; | ||
4940 | |||
4941 | /* | ||
4942 | * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to | ||
4943 | * 24 to match clock shift we setup earlier. | ||
4944 | */ | ||
4945 | if (adapter->hw.mac.type == e1000_82580) | ||
4946 | regval <<= IGB_82580_TSYNC_SHIFT; | ||
4947 | |||
4948 | ns = timecounter_cyc2time(&adapter->clock, regval); | ||
4949 | timecompare_update(&adapter->compare, ns); | ||
4950 | memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); | ||
4951 | shhwtstamps->hwtstamp = ns_to_ktime(ns); | ||
4952 | shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns); | ||
4953 | } | ||
4954 | |||
4955 | /** | ||
4956 | * igb_tx_hwtstamp - utility function which checks for TX time stamp | ||
4957 | * @q_vector: pointer to q_vector containing needed info | ||
4403 | * @skb: packet that was just sent | 4958 | * @skb: packet that was just sent |
4404 | * | 4959 | * |
4405 | * If we were asked to do hardware stamping and such a time stamp is | 4960 | * If we were asked to do hardware stamping and such a time stamp is |
4406 | * available, then it must have been for this skb here because we only | 4961 | * available, then it must have been for this skb here because we only |
4407 | * allow only one such packet into the queue. | 4962 | * allow only one such packet into the queue. |
4408 | */ | 4963 | */ |
4409 | static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) | 4964 | static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) |
4410 | { | 4965 | { |
4966 | struct igb_adapter *adapter = q_vector->adapter; | ||
4411 | union skb_shared_tx *shtx = skb_tx(skb); | 4967 | union skb_shared_tx *shtx = skb_tx(skb); |
4412 | struct e1000_hw *hw = &adapter->hw; | 4968 | struct e1000_hw *hw = &adapter->hw; |
4969 | struct skb_shared_hwtstamps shhwtstamps; | ||
4970 | u64 regval; | ||
4413 | 4971 | ||
4414 | if (unlikely(shtx->hardware)) { | 4972 | /* if skb does not support hw timestamp or TX stamp not valid exit */ |
4415 | u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; | 4973 | if (likely(!shtx->hardware) || |
4416 | if (valid) { | 4974 | !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) |
4417 | u64 regval = rd32(E1000_TXSTMPL); | 4975 | return; |
4418 | u64 ns; | 4976 | |
4419 | struct skb_shared_hwtstamps shhwtstamps; | 4977 | regval = rd32(E1000_TXSTMPL); |
4420 | 4978 | regval |= (u64)rd32(E1000_TXSTMPH) << 32; | |
4421 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | 4979 | |
4422 | regval |= (u64)rd32(E1000_TXSTMPH) << 32; | 4980 | igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); |
4423 | ns = timecounter_cyc2time(&adapter->clock, | 4981 | skb_tstamp_tx(skb, &shhwtstamps); |
4424 | regval); | ||
4425 | timecompare_update(&adapter->compare, ns); | ||
4426 | shhwtstamps.hwtstamp = ns_to_ktime(ns); | ||
4427 | shhwtstamps.syststamp = | ||
4428 | timecompare_transform(&adapter->compare, ns); | ||
4429 | skb_tstamp_tx(skb, &shhwtstamps); | ||
4430 | } | ||
4431 | } | ||
4432 | } | 4982 | } |
4433 | 4983 | ||
4434 | /** | 4984 | /** |
4435 | * igb_clean_tx_irq - Reclaim resources after transmit completes | 4985 | * igb_clean_tx_irq - Reclaim resources after transmit completes |
4436 | * @adapter: board private structure | 4986 | * @q_vector: pointer to q_vector containing needed info |
4437 | * returns true if ring is completely cleaned | 4987 | * returns true if ring is completely cleaned |
4438 | **/ | 4988 | **/ |
4439 | static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | 4989 | static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) |
4440 | { | 4990 | { |
4441 | struct igb_adapter *adapter = tx_ring->adapter; | 4991 | struct igb_adapter *adapter = q_vector->adapter; |
4442 | struct net_device *netdev = adapter->netdev; | 4992 | struct igb_ring *tx_ring = q_vector->tx_ring; |
4993 | struct net_device *netdev = tx_ring->netdev; | ||
4443 | struct e1000_hw *hw = &adapter->hw; | 4994 | struct e1000_hw *hw = &adapter->hw; |
4444 | struct igb_buffer *buffer_info; | 4995 | struct igb_buffer *buffer_info; |
4445 | struct sk_buff *skb; | 4996 | struct sk_buff *skb; |
@@ -4463,17 +5014,17 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
4463 | if (skb) { | 5014 | if (skb) { |
4464 | unsigned int segs, bytecount; | 5015 | unsigned int segs, bytecount; |
4465 | /* gso_segs is currently only valid for tcp */ | 5016 | /* gso_segs is currently only valid for tcp */ |
4466 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 5017 | segs = buffer_info->gso_segs; |
4467 | /* multiply data chunks by size of headers */ | 5018 | /* multiply data chunks by size of headers */ |
4468 | bytecount = ((segs - 1) * skb_headlen(skb)) + | 5019 | bytecount = ((segs - 1) * skb_headlen(skb)) + |
4469 | skb->len; | 5020 | skb->len; |
4470 | total_packets += segs; | 5021 | total_packets += segs; |
4471 | total_bytes += bytecount; | 5022 | total_bytes += bytecount; |
4472 | 5023 | ||
4473 | igb_tx_hwtstamp(adapter, skb); | 5024 | igb_tx_hwtstamp(q_vector, skb); |
4474 | } | 5025 | } |
4475 | 5026 | ||
4476 | igb_unmap_and_free_tx_resource(adapter, buffer_info); | 5027 | igb_unmap_and_free_tx_resource(tx_ring, buffer_info); |
4477 | tx_desc->wb.status = 0; | 5028 | tx_desc->wb.status = 0; |
4478 | 5029 | ||
4479 | i++; | 5030 | i++; |
@@ -4496,7 +5047,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
4496 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 5047 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && |
4497 | !(test_bit(__IGB_DOWN, &adapter->state))) { | 5048 | !(test_bit(__IGB_DOWN, &adapter->state))) { |
4498 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 5049 | netif_wake_subqueue(netdev, tx_ring->queue_index); |
4499 | ++adapter->restart_queue; | 5050 | tx_ring->tx_stats.restart_queue++; |
4500 | } | 5051 | } |
4501 | } | 5052 | } |
4502 | 5053 | ||
@@ -4506,12 +5057,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
4506 | tx_ring->detect_tx_hung = false; | 5057 | tx_ring->detect_tx_hung = false; |
4507 | if (tx_ring->buffer_info[i].time_stamp && | 5058 | if (tx_ring->buffer_info[i].time_stamp && |
4508 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp + | 5059 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp + |
4509 | (adapter->tx_timeout_factor * HZ)) | 5060 | (adapter->tx_timeout_factor * HZ)) && |
4510 | && !(rd32(E1000_STATUS) & | 5061 | !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { |
4511 | E1000_STATUS_TXOFF)) { | ||
4512 | 5062 | ||
4513 | /* detected Tx unit hang */ | 5063 | /* detected Tx unit hang */ |
4514 | dev_err(&adapter->pdev->dev, | 5064 | dev_err(&tx_ring->pdev->dev, |
4515 | "Detected Tx Unit Hang\n" | 5065 | "Detected Tx Unit Hang\n" |
4516 | " Tx Queue <%d>\n" | 5066 | " Tx Queue <%d>\n" |
4517 | " TDH <%x>\n" | 5067 | " TDH <%x>\n" |
@@ -4524,11 +5074,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
4524 | " jiffies <%lx>\n" | 5074 | " jiffies <%lx>\n" |
4525 | " desc.status <%x>\n", | 5075 | " desc.status <%x>\n", |
4526 | tx_ring->queue_index, | 5076 | tx_ring->queue_index, |
4527 | readl(adapter->hw.hw_addr + tx_ring->head), | 5077 | readl(tx_ring->head), |
4528 | readl(adapter->hw.hw_addr + tx_ring->tail), | 5078 | readl(tx_ring->tail), |
4529 | tx_ring->next_to_use, | 5079 | tx_ring->next_to_use, |
4530 | tx_ring->next_to_clean, | 5080 | tx_ring->next_to_clean, |
4531 | tx_ring->buffer_info[i].time_stamp, | 5081 | tx_ring->buffer_info[eop].time_stamp, |
4532 | eop, | 5082 | eop, |
4533 | jiffies, | 5083 | jiffies, |
4534 | eop_desc->wb.status); | 5084 | eop_desc->wb.status); |
@@ -4539,43 +5089,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
4539 | tx_ring->total_packets += total_packets; | 5089 | tx_ring->total_packets += total_packets; |
4540 | tx_ring->tx_stats.bytes += total_bytes; | 5090 | tx_ring->tx_stats.bytes += total_bytes; |
4541 | tx_ring->tx_stats.packets += total_packets; | 5091 | tx_ring->tx_stats.packets += total_packets; |
4542 | adapter->net_stats.tx_bytes += total_bytes; | ||
4543 | adapter->net_stats.tx_packets += total_packets; | ||
4544 | return (count < tx_ring->count); | 5092 | return (count < tx_ring->count); |
4545 | } | 5093 | } |
4546 | 5094 | ||
4547 | /** | 5095 | /** |
4548 | * igb_receive_skb - helper function to handle rx indications | 5096 | * igb_receive_skb - helper function to handle rx indications |
4549 | * @ring: pointer to receive ring receving this packet | 5097 | * @q_vector: structure containing interrupt and ring information |
4550 | * @status: descriptor status field as written by hardware | 5098 | * @skb: packet to send up |
4551 | * @rx_desc: receive descriptor containing vlan and type information. | 5099 | * @vlan_tag: vlan tag for packet |
4552 | * @skb: pointer to sk_buff to be indicated to stack | ||
4553 | **/ | 5100 | **/ |
4554 | static void igb_receive_skb(struct igb_ring *ring, u8 status, | 5101 | static void igb_receive_skb(struct igb_q_vector *q_vector, |
4555 | union e1000_adv_rx_desc * rx_desc, | 5102 | struct sk_buff *skb, |
4556 | struct sk_buff *skb) | 5103 | u16 vlan_tag) |
4557 | { | 5104 | { |
4558 | struct igb_adapter * adapter = ring->adapter; | 5105 | struct igb_adapter *adapter = q_vector->adapter; |
4559 | bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); | 5106 | |
4560 | 5107 | if (vlan_tag && adapter->vlgrp) | |
4561 | skb_record_rx_queue(skb, ring->queue_index); | 5108 | vlan_gro_receive(&q_vector->napi, adapter->vlgrp, |
4562 | if (vlan_extracted) | 5109 | vlan_tag, skb); |
4563 | vlan_gro_receive(&ring->napi, adapter->vlgrp, | ||
4564 | le16_to_cpu(rx_desc->wb.upper.vlan), | ||
4565 | skb); | ||
4566 | else | 5110 | else |
4567 | napi_gro_receive(&ring->napi, skb); | 5111 | napi_gro_receive(&q_vector->napi, skb); |
4568 | } | 5112 | } |
4569 | 5113 | ||
4570 | static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | 5114 | static inline void igb_rx_checksum_adv(struct igb_ring *ring, |
4571 | u32 status_err, struct sk_buff *skb) | 5115 | u32 status_err, struct sk_buff *skb) |
4572 | { | 5116 | { |
4573 | skb->ip_summed = CHECKSUM_NONE; | 5117 | skb->ip_summed = CHECKSUM_NONE; |
4574 | 5118 | ||
4575 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ | 5119 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ |
4576 | if ((status_err & E1000_RXD_STAT_IXSM) || | 5120 | if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || |
4577 | (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) | 5121 | (status_err & E1000_RXD_STAT_IXSM)) |
4578 | return; | 5122 | return; |
5123 | |||
4579 | /* TCP/UDP checksum error bit is set */ | 5124 | /* TCP/UDP checksum error bit is set */ |
4580 | if (status_err & | 5125 | if (status_err & |
4581 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { | 5126 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { |
@@ -4584,9 +5129,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | |||
4584 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) | 5129 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) |
4585 | * packets, (aka let the stack check the crc32c) | 5130 | * packets, (aka let the stack check the crc32c) |
4586 | */ | 5131 | */ |
4587 | if (!((adapter->hw.mac.type == e1000_82576) && | 5132 | if ((skb->len == 60) && |
4588 | (skb->len == 60))) | 5133 | (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) |
4589 | adapter->hw_csum_err++; | 5134 | ring->rx_stats.csum_err++; |
5135 | |||
4590 | /* let the stack verify checksum errors */ | 5136 | /* let the stack verify checksum errors */ |
4591 | return; | 5137 | return; |
4592 | } | 5138 | } |
@@ -4594,11 +5140,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | |||
4594 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) | 5140 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) |
4595 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 5141 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4596 | 5142 | ||
4597 | dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); | 5143 | dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); |
4598 | adapter->hw_csum_good++; | ||
4599 | } | 5144 | } |
4600 | 5145 | ||
4601 | static inline u16 igb_get_hlen(struct igb_adapter *adapter, | 5146 | static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, |
5147 | struct sk_buff *skb) | ||
5148 | { | ||
5149 | struct igb_adapter *adapter = q_vector->adapter; | ||
5150 | struct e1000_hw *hw = &adapter->hw; | ||
5151 | u64 regval; | ||
5152 | |||
5153 | /* | ||
5154 | * If this bit is set, then the RX registers contain the time stamp. No | ||
5155 | * other packet will be time stamped until we read these registers, so | ||
5156 | * read the registers to make them available again. Because only one | ||
5157 | * packet can be time stamped at a time, we know that the register | ||
5158 | * values must belong to this one here and therefore we don't need to | ||
5159 | * compare any of the additional attributes stored for it. | ||
5160 | * | ||
5161 | * If nothing went wrong, then it should have a skb_shared_tx that we | ||
5162 | * can turn into a skb_shared_hwtstamps. | ||
5163 | */ | ||
5164 | if (likely(!(staterr & E1000_RXDADV_STAT_TS))) | ||
5165 | return; | ||
5166 | if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) | ||
5167 | return; | ||
5168 | |||
5169 | regval = rd32(E1000_RXSTMPL); | ||
5170 | regval |= (u64)rd32(E1000_RXSTMPH) << 32; | ||
5171 | |||
5172 | igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); | ||
5173 | } | ||
5174 | static inline u16 igb_get_hlen(struct igb_ring *rx_ring, | ||
4602 | union e1000_adv_rx_desc *rx_desc) | 5175 | union e1000_adv_rx_desc *rx_desc) |
4603 | { | 5176 | { |
4604 | /* HW will not DMA in data larger than the given buffer, even if it | 5177 | /* HW will not DMA in data larger than the given buffer, even if it |
@@ -4607,27 +5180,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter, | |||
4607 | */ | 5180 | */ |
4608 | u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & | 5181 | u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & |
4609 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; | 5182 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; |
4610 | if (hlen > adapter->rx_ps_hdr_size) | 5183 | if (hlen > rx_ring->rx_buffer_len) |
4611 | hlen = adapter->rx_ps_hdr_size; | 5184 | hlen = rx_ring->rx_buffer_len; |
4612 | return hlen; | 5185 | return hlen; |
4613 | } | 5186 | } |
4614 | 5187 | ||
4615 | static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | 5188 | static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, |
4616 | int *work_done, int budget) | 5189 | int *work_done, int budget) |
4617 | { | 5190 | { |
4618 | struct igb_adapter *adapter = rx_ring->adapter; | 5191 | struct igb_ring *rx_ring = q_vector->rx_ring; |
4619 | struct net_device *netdev = adapter->netdev; | 5192 | struct net_device *netdev = rx_ring->netdev; |
4620 | struct e1000_hw *hw = &adapter->hw; | 5193 | struct pci_dev *pdev = rx_ring->pdev; |
4621 | struct pci_dev *pdev = adapter->pdev; | ||
4622 | union e1000_adv_rx_desc *rx_desc , *next_rxd; | 5194 | union e1000_adv_rx_desc *rx_desc , *next_rxd; |
4623 | struct igb_buffer *buffer_info , *next_buffer; | 5195 | struct igb_buffer *buffer_info , *next_buffer; |
4624 | struct sk_buff *skb; | 5196 | struct sk_buff *skb; |
4625 | bool cleaned = false; | 5197 | bool cleaned = false; |
4626 | int cleaned_count = 0; | 5198 | int cleaned_count = 0; |
5199 | int current_node = numa_node_id(); | ||
4627 | unsigned int total_bytes = 0, total_packets = 0; | 5200 | unsigned int total_bytes = 0, total_packets = 0; |
4628 | unsigned int i; | 5201 | unsigned int i; |
4629 | u32 staterr; | 5202 | u32 staterr; |
4630 | u16 length; | 5203 | u16 length; |
5204 | u16 vlan_tag; | ||
4631 | 5205 | ||
4632 | i = rx_ring->next_to_clean; | 5206 | i = rx_ring->next_to_clean; |
4633 | buffer_info = &rx_ring->buffer_info[i]; | 5207 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -4646,6 +5220,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4646 | i++; | 5220 | i++; |
4647 | if (i == rx_ring->count) | 5221 | if (i == rx_ring->count) |
4648 | i = 0; | 5222 | i = 0; |
5223 | |||
4649 | next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); | 5224 | next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); |
4650 | prefetch(next_rxd); | 5225 | prefetch(next_rxd); |
4651 | next_buffer = &rx_ring->buffer_info[i]; | 5226 | next_buffer = &rx_ring->buffer_info[i]; |
@@ -4654,23 +5229,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4654 | cleaned = true; | 5229 | cleaned = true; |
4655 | cleaned_count++; | 5230 | cleaned_count++; |
4656 | 5231 | ||
4657 | /* this is the fast path for the non-packet split case */ | ||
4658 | if (!adapter->rx_ps_hdr_size) { | ||
4659 | pci_unmap_single(pdev, buffer_info->dma, | ||
4660 | adapter->rx_buffer_len, | ||
4661 | PCI_DMA_FROMDEVICE); | ||
4662 | buffer_info->dma = 0; | ||
4663 | skb_put(skb, length); | ||
4664 | goto send_up; | ||
4665 | } | ||
4666 | |||
4667 | if (buffer_info->dma) { | 5232 | if (buffer_info->dma) { |
4668 | u16 hlen = igb_get_hlen(adapter, rx_desc); | ||
4669 | pci_unmap_single(pdev, buffer_info->dma, | 5233 | pci_unmap_single(pdev, buffer_info->dma, |
4670 | adapter->rx_ps_hdr_size, | 5234 | rx_ring->rx_buffer_len, |
4671 | PCI_DMA_FROMDEVICE); | 5235 | PCI_DMA_FROMDEVICE); |
4672 | buffer_info->dma = 0; | 5236 | buffer_info->dma = 0; |
4673 | skb_put(skb, hlen); | 5237 | if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { |
5238 | skb_put(skb, length); | ||
5239 | goto send_up; | ||
5240 | } | ||
5241 | skb_put(skb, igb_get_hlen(rx_ring, rx_desc)); | ||
4674 | } | 5242 | } |
4675 | 5243 | ||
4676 | if (length) { | 5244 | if (length) { |
@@ -4683,15 +5251,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4683 | buffer_info->page_offset, | 5251 | buffer_info->page_offset, |
4684 | length); | 5252 | length); |
4685 | 5253 | ||
4686 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || | 5254 | if ((page_count(buffer_info->page) != 1) || |
4687 | (page_count(buffer_info->page) != 1)) | 5255 | (page_to_nid(buffer_info->page) != current_node)) |
4688 | buffer_info->page = NULL; | 5256 | buffer_info->page = NULL; |
4689 | else | 5257 | else |
4690 | get_page(buffer_info->page); | 5258 | get_page(buffer_info->page); |
4691 | 5259 | ||
4692 | skb->len += length; | 5260 | skb->len += length; |
4693 | skb->data_len += length; | 5261 | skb->data_len += length; |
4694 | |||
4695 | skb->truesize += length; | 5262 | skb->truesize += length; |
4696 | } | 5263 | } |
4697 | 5264 | ||
@@ -4703,60 +5270,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4703 | goto next_desc; | 5270 | goto next_desc; |
4704 | } | 5271 | } |
4705 | send_up: | 5272 | send_up: |
4706 | /* | ||
4707 | * If this bit is set, then the RX registers contain | ||
4708 | * the time stamp. No other packet will be time | ||
4709 | * stamped until we read these registers, so read the | ||
4710 | * registers to make them available again. Because | ||
4711 | * only one packet can be time stamped at a time, we | ||
4712 | * know that the register values must belong to this | ||
4713 | * one here and therefore we don't need to compare | ||
4714 | * any of the additional attributes stored for it. | ||
4715 | * | ||
4716 | * If nothing went wrong, then it should have a | ||
4717 | * skb_shared_tx that we can turn into a | ||
4718 | * skb_shared_hwtstamps. | ||
4719 | * | ||
4720 | * TODO: can time stamping be triggered (thus locking | ||
4721 | * the registers) without the packet reaching this point | ||
4722 | * here? In that case RX time stamping would get stuck. | ||
4723 | * | ||
4724 | * TODO: in "time stamp all packets" mode this bit is | ||
4725 | * not set. Need a global flag for this mode and then | ||
4726 | * always read the registers. Cannot be done without | ||
4727 | * a race condition. | ||
4728 | */ | ||
4729 | if (unlikely(staterr & E1000_RXD_STAT_TS)) { | ||
4730 | u64 regval; | ||
4731 | u64 ns; | ||
4732 | struct skb_shared_hwtstamps *shhwtstamps = | ||
4733 | skb_hwtstamps(skb); | ||
4734 | |||
4735 | WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID), | ||
4736 | "igb: no RX time stamp available for time stamped packet"); | ||
4737 | regval = rd32(E1000_RXSTMPL); | ||
4738 | regval |= (u64)rd32(E1000_RXSTMPH) << 32; | ||
4739 | ns = timecounter_cyc2time(&adapter->clock, regval); | ||
4740 | timecompare_update(&adapter->compare, ns); | ||
4741 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | ||
4742 | shhwtstamps->hwtstamp = ns_to_ktime(ns); | ||
4743 | shhwtstamps->syststamp = | ||
4744 | timecompare_transform(&adapter->compare, ns); | ||
4745 | } | ||
4746 | |||
4747 | if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { | 5273 | if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { |
4748 | dev_kfree_skb_irq(skb); | 5274 | dev_kfree_skb_irq(skb); |
4749 | goto next_desc; | 5275 | goto next_desc; |
4750 | } | 5276 | } |
4751 | 5277 | ||
5278 | igb_rx_hwtstamp(q_vector, staterr, skb); | ||
4752 | total_bytes += skb->len; | 5279 | total_bytes += skb->len; |
4753 | total_packets++; | 5280 | total_packets++; |
4754 | 5281 | ||
4755 | igb_rx_checksum_adv(adapter, staterr, skb); | 5282 | igb_rx_checksum_adv(rx_ring, staterr, skb); |
4756 | 5283 | ||
4757 | skb->protocol = eth_type_trans(skb, netdev); | 5284 | skb->protocol = eth_type_trans(skb, netdev); |
5285 | skb_record_rx_queue(skb, rx_ring->queue_index); | ||
4758 | 5286 | ||
4759 | igb_receive_skb(rx_ring, staterr, rx_desc, skb); | 5287 | vlan_tag = ((staterr & E1000_RXD_STAT_VP) ? |
5288 | le16_to_cpu(rx_desc->wb.upper.vlan) : 0); | ||
5289 | |||
5290 | igb_receive_skb(q_vector, skb, vlan_tag); | ||
4760 | 5291 | ||
4761 | next_desc: | 5292 | next_desc: |
4762 | rx_desc->wb.upper.status_error = 0; | 5293 | rx_desc->wb.upper.status_error = 0; |
@@ -4783,8 +5314,6 @@ next_desc: | |||
4783 | rx_ring->total_bytes += total_bytes; | 5314 | rx_ring->total_bytes += total_bytes; |
4784 | rx_ring->rx_stats.packets += total_packets; | 5315 | rx_ring->rx_stats.packets += total_packets; |
4785 | rx_ring->rx_stats.bytes += total_bytes; | 5316 | rx_ring->rx_stats.bytes += total_bytes; |
4786 | adapter->net_stats.rx_bytes += total_bytes; | ||
4787 | adapter->net_stats.rx_packets += total_packets; | ||
4788 | return cleaned; | 5317 | return cleaned; |
4789 | } | 5318 | } |
4790 | 5319 | ||
@@ -4792,12 +5321,9 @@ next_desc: | |||
4792 | * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split | 5321 | * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split |
4793 | * @adapter: address of board private structure | 5322 | * @adapter: address of board private structure |
4794 | **/ | 5323 | **/ |
4795 | static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | 5324 | void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) |
4796 | int cleaned_count) | ||
4797 | { | 5325 | { |
4798 | struct igb_adapter *adapter = rx_ring->adapter; | 5326 | struct net_device *netdev = rx_ring->netdev; |
4799 | struct net_device *netdev = adapter->netdev; | ||
4800 | struct pci_dev *pdev = adapter->pdev; | ||
4801 | union e1000_adv_rx_desc *rx_desc; | 5327 | union e1000_adv_rx_desc *rx_desc; |
4802 | struct igb_buffer *buffer_info; | 5328 | struct igb_buffer *buffer_info; |
4803 | struct sk_buff *skb; | 5329 | struct sk_buff *skb; |
@@ -4807,19 +5333,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
4807 | i = rx_ring->next_to_use; | 5333 | i = rx_ring->next_to_use; |
4808 | buffer_info = &rx_ring->buffer_info[i]; | 5334 | buffer_info = &rx_ring->buffer_info[i]; |
4809 | 5335 | ||
4810 | if (adapter->rx_ps_hdr_size) | 5336 | bufsz = rx_ring->rx_buffer_len; |
4811 | bufsz = adapter->rx_ps_hdr_size; | ||
4812 | else | ||
4813 | bufsz = adapter->rx_buffer_len; | ||
4814 | 5337 | ||
4815 | while (cleaned_count--) { | 5338 | while (cleaned_count--) { |
4816 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); | 5339 | rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); |
4817 | 5340 | ||
4818 | if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { | 5341 | if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { |
4819 | if (!buffer_info->page) { | 5342 | if (!buffer_info->page) { |
4820 | buffer_info->page = alloc_page(GFP_ATOMIC); | 5343 | buffer_info->page = netdev_alloc_page(netdev); |
4821 | if (!buffer_info->page) { | 5344 | if (!buffer_info->page) { |
4822 | adapter->alloc_rx_buff_failed++; | 5345 | rx_ring->rx_stats.alloc_failed++; |
4823 | goto no_buffers; | 5346 | goto no_buffers; |
4824 | } | 5347 | } |
4825 | buffer_info->page_offset = 0; | 5348 | buffer_info->page_offset = 0; |
@@ -4827,39 +5350,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | |||
4827 | buffer_info->page_offset ^= PAGE_SIZE / 2; | 5350 | buffer_info->page_offset ^= PAGE_SIZE / 2; |
4828 | } | 5351 | } |
4829 | buffer_info->page_dma = | 5352 | buffer_info->page_dma = |
4830 | pci_map_page(pdev, buffer_info->page, | 5353 | pci_map_page(rx_ring->pdev, buffer_info->page, |
4831 | buffer_info->page_offset, | 5354 | buffer_info->page_offset, |
4832 | PAGE_SIZE / 2, | 5355 | PAGE_SIZE / 2, |
4833 | PCI_DMA_FROMDEVICE); | 5356 | PCI_DMA_FROMDEVICE); |
5357 | if (pci_dma_mapping_error(rx_ring->pdev, | ||
5358 | buffer_info->page_dma)) { | ||
5359 | buffer_info->page_dma = 0; | ||
5360 | rx_ring->rx_stats.alloc_failed++; | ||
5361 | goto no_buffers; | ||
5362 | } | ||
4834 | } | 5363 | } |
4835 | 5364 | ||
4836 | if (!buffer_info->skb) { | 5365 | skb = buffer_info->skb; |
4837 | skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); | 5366 | if (!skb) { |
5367 | skb = netdev_alloc_skb_ip_align(netdev, bufsz); | ||
4838 | if (!skb) { | 5368 | if (!skb) { |
4839 | adapter->alloc_rx_buff_failed++; | 5369 | rx_ring->rx_stats.alloc_failed++; |
4840 | goto no_buffers; | 5370 | goto no_buffers; |
4841 | } | 5371 | } |
4842 | 5372 | ||
4843 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
4844 | * this will result in a 16 byte aligned IP header after | ||
4845 | * the 14 byte MAC header is removed | ||
4846 | */ | ||
4847 | skb_reserve(skb, NET_IP_ALIGN); | ||
4848 | |||
4849 | buffer_info->skb = skb; | 5373 | buffer_info->skb = skb; |
4850 | buffer_info->dma = pci_map_single(pdev, skb->data, | 5374 | } |
5375 | if (!buffer_info->dma) { | ||
5376 | buffer_info->dma = pci_map_single(rx_ring->pdev, | ||
5377 | skb->data, | ||
4851 | bufsz, | 5378 | bufsz, |
4852 | PCI_DMA_FROMDEVICE); | 5379 | PCI_DMA_FROMDEVICE); |
5380 | if (pci_dma_mapping_error(rx_ring->pdev, | ||
5381 | buffer_info->dma)) { | ||
5382 | buffer_info->dma = 0; | ||
5383 | rx_ring->rx_stats.alloc_failed++; | ||
5384 | goto no_buffers; | ||
5385 | } | ||
4853 | } | 5386 | } |
4854 | /* Refresh the desc even if buffer_addrs didn't change because | 5387 | /* Refresh the desc even if buffer_addrs didn't change because |
4855 | * each write-back erases this info. */ | 5388 | * each write-back erases this info. */ |
4856 | if (adapter->rx_ps_hdr_size) { | 5389 | if (bufsz < IGB_RXBUFFER_1024) { |
4857 | rx_desc->read.pkt_addr = | 5390 | rx_desc->read.pkt_addr = |
4858 | cpu_to_le64(buffer_info->page_dma); | 5391 | cpu_to_le64(buffer_info->page_dma); |
4859 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); | 5392 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); |
4860 | } else { | 5393 | } else { |
4861 | rx_desc->read.pkt_addr = | 5394 | rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); |
4862 | cpu_to_le64(buffer_info->dma); | ||
4863 | rx_desc->read.hdr_addr = 0; | 5395 | rx_desc->read.hdr_addr = 0; |
4864 | } | 5396 | } |
4865 | 5397 | ||
@@ -4882,7 +5414,7 @@ no_buffers: | |||
4882 | * applicable for weak-ordered memory model archs, | 5414 | * applicable for weak-ordered memory model archs, |
4883 | * such as IA-64). */ | 5415 | * such as IA-64). */ |
4884 | wmb(); | 5416 | wmb(); |
4885 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 5417 | writel(i, rx_ring->tail); |
4886 | } | 5418 | } |
4887 | } | 5419 | } |
4888 | 5420 | ||
@@ -4941,13 +5473,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, | |||
4941 | struct igb_adapter *adapter = netdev_priv(netdev); | 5473 | struct igb_adapter *adapter = netdev_priv(netdev); |
4942 | struct e1000_hw *hw = &adapter->hw; | 5474 | struct e1000_hw *hw = &adapter->hw; |
4943 | struct hwtstamp_config config; | 5475 | struct hwtstamp_config config; |
4944 | u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; | 5476 | u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; |
4945 | u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; | 5477 | u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; |
4946 | u32 tsync_rx_ctl_type = 0; | ||
4947 | u32 tsync_rx_cfg = 0; | 5478 | u32 tsync_rx_cfg = 0; |
4948 | int is_l4 = 0; | 5479 | bool is_l4 = false; |
4949 | int is_l2 = 0; | 5480 | bool is_l2 = false; |
4950 | short port = 319; /* PTP */ | ||
4951 | u32 regval; | 5481 | u32 regval; |
4952 | 5482 | ||
4953 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | 5483 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) |
@@ -4959,10 +5489,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, | |||
4959 | 5489 | ||
4960 | switch (config.tx_type) { | 5490 | switch (config.tx_type) { |
4961 | case HWTSTAMP_TX_OFF: | 5491 | case HWTSTAMP_TX_OFF: |
4962 | tsync_tx_ctl_bit = 0; | 5492 | tsync_tx_ctl = 0; |
4963 | break; | ||
4964 | case HWTSTAMP_TX_ON: | 5493 | case HWTSTAMP_TX_ON: |
4965 | tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; | ||
4966 | break; | 5494 | break; |
4967 | default: | 5495 | default: |
4968 | return -ERANGE; | 5496 | return -ERANGE; |
@@ -4970,7 +5498,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, | |||
4970 | 5498 | ||
4971 | switch (config.rx_filter) { | 5499 | switch (config.rx_filter) { |
4972 | case HWTSTAMP_FILTER_NONE: | 5500 | case HWTSTAMP_FILTER_NONE: |
4973 | tsync_rx_ctl_bit = 0; | 5501 | tsync_rx_ctl = 0; |
4974 | break; | 5502 | break; |
4975 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | 5503 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
4976 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | 5504 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
@@ -4981,86 +5509,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev, | |||
4981 | * possible to time stamp both Sync and Delay_Req messages | 5509 | * possible to time stamp both Sync and Delay_Req messages |
4982 | * => fall back to time stamping all packets | 5510 | * => fall back to time stamping all packets |
4983 | */ | 5511 | */ |
4984 | tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; | 5512 | tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; |
4985 | config.rx_filter = HWTSTAMP_FILTER_ALL; | 5513 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
4986 | break; | 5514 | break; |
4987 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | 5515 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
4988 | tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; | 5516 | tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; |
4989 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; | 5517 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; |
4990 | is_l4 = 1; | 5518 | is_l4 = true; |
4991 | break; | 5519 | break; |
4992 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | 5520 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
4993 | tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; | 5521 | tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; |
4994 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; | 5522 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; |
4995 | is_l4 = 1; | 5523 | is_l4 = true; |
4996 | break; | 5524 | break; |
4997 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | 5525 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
4998 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | 5526 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
4999 | tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; | 5527 | tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; |
5000 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; | 5528 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; |
5001 | is_l2 = 1; | 5529 | is_l2 = true; |
5002 | is_l4 = 1; | 5530 | is_l4 = true; |
5003 | config.rx_filter = HWTSTAMP_FILTER_SOME; | 5531 | config.rx_filter = HWTSTAMP_FILTER_SOME; |
5004 | break; | 5532 | break; |
5005 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | 5533 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
5006 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | 5534 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
5007 | tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; | 5535 | tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; |
5008 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; | 5536 | tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; |
5009 | is_l2 = 1; | 5537 | is_l2 = true; |
5010 | is_l4 = 1; | 5538 | is_l4 = true; |
5011 | config.rx_filter = HWTSTAMP_FILTER_SOME; | 5539 | config.rx_filter = HWTSTAMP_FILTER_SOME; |
5012 | break; | 5540 | break; |
5013 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | 5541 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
5014 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | 5542 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
5015 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | 5543 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
5016 | tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; | 5544 | tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; |
5017 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; | 5545 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
5018 | is_l2 = 1; | 5546 | is_l2 = true; |
5019 | break; | 5547 | break; |
5020 | default: | 5548 | default: |
5021 | return -ERANGE; | 5549 | return -ERANGE; |
5022 | } | 5550 | } |
5023 | 5551 | ||
5552 | if (hw->mac.type == e1000_82575) { | ||
5553 | if (tsync_rx_ctl | tsync_tx_ctl) | ||
5554 | return -EINVAL; | ||
5555 | return 0; | ||
5556 | } | ||
5557 | |||
5024 | /* enable/disable TX */ | 5558 | /* enable/disable TX */ |
5025 | regval = rd32(E1000_TSYNCTXCTL); | 5559 | regval = rd32(E1000_TSYNCTXCTL); |
5026 | regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; | 5560 | regval &= ~E1000_TSYNCTXCTL_ENABLED; |
5561 | regval |= tsync_tx_ctl; | ||
5027 | wr32(E1000_TSYNCTXCTL, regval); | 5562 | wr32(E1000_TSYNCTXCTL, regval); |
5028 | 5563 | ||
5029 | /* enable/disable RX, define which PTP packets are time stamped */ | 5564 | /* enable/disable RX */ |
5030 | regval = rd32(E1000_TSYNCRXCTL); | 5565 | regval = rd32(E1000_TSYNCRXCTL); |
5031 | regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; | 5566 | regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); |
5032 | regval = (regval & ~0xE) | tsync_rx_ctl_type; | 5567 | regval |= tsync_rx_ctl; |
5033 | wr32(E1000_TSYNCRXCTL, regval); | 5568 | wr32(E1000_TSYNCRXCTL, regval); |
5034 | wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); | ||
5035 | 5569 | ||
5036 | /* | 5570 | /* define which PTP packets are time stamped */ |
5037 | * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 | 5571 | wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); |
5038 | * (Ethertype to filter on) | ||
5039 | * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter) | ||
5040 | * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping) | ||
5041 | */ | ||
5042 | wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0); | ||
5043 | |||
5044 | /* L4 Queue Filter[0]: only filter by source and destination port */ | ||
5045 | wr32(E1000_SPQF0, htons(port)); | ||
5046 | wr32(E1000_IMIREXT(0), is_l4 ? | ||
5047 | ((1<<12) | (1<<19) /* bypass size and control flags */) : 0); | ||
5048 | wr32(E1000_IMIR(0), is_l4 ? | ||
5049 | (htons(port) | ||
5050 | | (0<<16) /* immediate interrupt disabled */ | ||
5051 | | 0 /* (1<<17) bit cleared: do not bypass | ||
5052 | destination port check */) | ||
5053 | : 0); | ||
5054 | wr32(E1000_FTQF0, is_l4 ? | ||
5055 | (0x11 /* UDP */ | ||
5056 | | (1<<15) /* VF not compared */ | ||
5057 | | (1<<27) /* Enable Timestamping */ | ||
5058 | | (7<<28) /* only source port filter enabled, | ||
5059 | source/target address and protocol | ||
5060 | masked */) | ||
5061 | : ((1<<15) | (15<<28) /* all mask bits set = filter not | ||
5062 | enabled */)); | ||
5063 | 5572 | ||
5573 | /* define ethertype filter for timestamped packets */ | ||
5574 | if (is_l2) | ||
5575 | wr32(E1000_ETQF(3), | ||
5576 | (E1000_ETQF_FILTER_ENABLE | /* enable filter */ | ||
5577 | E1000_ETQF_1588 | /* enable timestamping */ | ||
5578 | ETH_P_1588)); /* 1588 eth protocol type */ | ||
5579 | else | ||
5580 | wr32(E1000_ETQF(3), 0); | ||
5581 | |||
5582 | #define PTP_PORT 319 | ||
5583 | /* L4 Queue Filter[3]: filter by destination port and protocol */ | ||
5584 | if (is_l4) { | ||
5585 | u32 ftqf = (IPPROTO_UDP /* UDP */ | ||
5586 | | E1000_FTQF_VF_BP /* VF not compared */ | ||
5587 | | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ | ||
5588 | | E1000_FTQF_MASK); /* mask all inputs */ | ||
5589 | ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ | ||
5590 | |||
5591 | wr32(E1000_IMIR(3), htons(PTP_PORT)); | ||
5592 | wr32(E1000_IMIREXT(3), | ||
5593 | (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); | ||
5594 | if (hw->mac.type == e1000_82576) { | ||
5595 | /* enable source port check */ | ||
5596 | wr32(E1000_SPQF(3), htons(PTP_PORT)); | ||
5597 | ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; | ||
5598 | } | ||
5599 | wr32(E1000_FTQF(3), ftqf); | ||
5600 | } else { | ||
5601 | wr32(E1000_FTQF(3), E1000_FTQF_MASK); | ||
5602 | } | ||
5064 | wrfl(); | 5603 | wrfl(); |
5065 | 5604 | ||
5066 | adapter->hwtstamp_config = config; | 5605 | adapter->hwtstamp_config = config; |
@@ -5137,21 +5676,15 @@ static void igb_vlan_rx_register(struct net_device *netdev, | |||
5137 | ctrl |= E1000_CTRL_VME; | 5676 | ctrl |= E1000_CTRL_VME; |
5138 | wr32(E1000_CTRL, ctrl); | 5677 | wr32(E1000_CTRL, ctrl); |
5139 | 5678 | ||
5140 | /* enable VLAN receive filtering */ | 5679 | /* Disable CFI check */ |
5141 | rctl = rd32(E1000_RCTL); | 5680 | rctl = rd32(E1000_RCTL); |
5142 | rctl &= ~E1000_RCTL_CFIEN; | 5681 | rctl &= ~E1000_RCTL_CFIEN; |
5143 | wr32(E1000_RCTL, rctl); | 5682 | wr32(E1000_RCTL, rctl); |
5144 | igb_update_mng_vlan(adapter); | ||
5145 | } else { | 5683 | } else { |
5146 | /* disable VLAN tag insert/strip */ | 5684 | /* disable VLAN tag insert/strip */ |
5147 | ctrl = rd32(E1000_CTRL); | 5685 | ctrl = rd32(E1000_CTRL); |
5148 | ctrl &= ~E1000_CTRL_VME; | 5686 | ctrl &= ~E1000_CTRL_VME; |
5149 | wr32(E1000_CTRL, ctrl); | 5687 | wr32(E1000_CTRL, ctrl); |
5150 | |||
5151 | if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) { | ||
5152 | igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | ||
5153 | adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; | ||
5154 | } | ||
5155 | } | 5688 | } |
5156 | 5689 | ||
5157 | igb_rlpml_set(adapter); | 5690 | igb_rlpml_set(adapter); |
@@ -5166,16 +5699,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
5166 | struct e1000_hw *hw = &adapter->hw; | 5699 | struct e1000_hw *hw = &adapter->hw; |
5167 | int pf_id = adapter->vfs_allocated_count; | 5700 | int pf_id = adapter->vfs_allocated_count; |
5168 | 5701 | ||
5169 | if ((hw->mng_cookie.status & | 5702 | /* attempt to add filter to vlvf array */ |
5170 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 5703 | igb_vlvf_set(adapter, vid, true, pf_id); |
5171 | (vid == adapter->mng_vlan_id)) | ||
5172 | return; | ||
5173 | |||
5174 | /* add vid to vlvf if sr-iov is enabled, | ||
5175 | * if that fails add directly to filter table */ | ||
5176 | if (igb_vlvf_set(adapter, vid, true, pf_id)) | ||
5177 | igb_vfta_set(hw, vid, true); | ||
5178 | 5704 | ||
5705 | /* add the filter since PF can receive vlans w/o entry in vlvf */ | ||
5706 | igb_vfta_set(hw, vid, true); | ||
5179 | } | 5707 | } |
5180 | 5708 | ||
5181 | static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 5709 | static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
@@ -5183,6 +5711,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
5183 | struct igb_adapter *adapter = netdev_priv(netdev); | 5711 | struct igb_adapter *adapter = netdev_priv(netdev); |
5184 | struct e1000_hw *hw = &adapter->hw; | 5712 | struct e1000_hw *hw = &adapter->hw; |
5185 | int pf_id = adapter->vfs_allocated_count; | 5713 | int pf_id = adapter->vfs_allocated_count; |
5714 | s32 err; | ||
5186 | 5715 | ||
5187 | igb_irq_disable(adapter); | 5716 | igb_irq_disable(adapter); |
5188 | vlan_group_set_device(adapter->vlgrp, vid, NULL); | 5717 | vlan_group_set_device(adapter->vlgrp, vid, NULL); |
@@ -5190,17 +5719,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
5190 | if (!test_bit(__IGB_DOWN, &adapter->state)) | 5719 | if (!test_bit(__IGB_DOWN, &adapter->state)) |
5191 | igb_irq_enable(adapter); | 5720 | igb_irq_enable(adapter); |
5192 | 5721 | ||
5193 | if ((adapter->hw.mng_cookie.status & | 5722 | /* remove vlan from VLVF table array */ |
5194 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 5723 | err = igb_vlvf_set(adapter, vid, false, pf_id); |
5195 | (vid == adapter->mng_vlan_id)) { | ||
5196 | /* release control to f/w */ | ||
5197 | igb_release_hw_control(adapter); | ||
5198 | return; | ||
5199 | } | ||
5200 | 5724 | ||
5201 | /* remove vid from vlvf if sr-iov is enabled, | 5725 | /* if vid was not present in VLVF just remove it from table */ |
5202 | * if not in vlvf remove from vfta */ | 5726 | if (err) |
5203 | if (igb_vlvf_set(adapter, vid, false, pf_id)) | ||
5204 | igb_vfta_set(hw, vid, false); | 5727 | igb_vfta_set(hw, vid, false); |
5205 | } | 5728 | } |
5206 | 5729 | ||
@@ -5220,6 +5743,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter) | |||
5220 | 5743 | ||
5221 | int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) | 5744 | int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) |
5222 | { | 5745 | { |
5746 | struct pci_dev *pdev = adapter->pdev; | ||
5223 | struct e1000_mac_info *mac = &adapter->hw.mac; | 5747 | struct e1000_mac_info *mac = &adapter->hw.mac; |
5224 | 5748 | ||
5225 | mac->autoneg = 0; | 5749 | mac->autoneg = 0; |
@@ -5243,8 +5767,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) | |||
5243 | break; | 5767 | break; |
5244 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | 5768 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
5245 | default: | 5769 | default: |
5246 | dev_err(&adapter->pdev->dev, | 5770 | dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); |
5247 | "Unsupported Speed/Duplex configuration\n"); | ||
5248 | return -EINVAL; | 5771 | return -EINVAL; |
5249 | } | 5772 | } |
5250 | return 0; | 5773 | return 0; |
@@ -5266,9 +5789,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5266 | if (netif_running(netdev)) | 5789 | if (netif_running(netdev)) |
5267 | igb_close(netdev); | 5790 | igb_close(netdev); |
5268 | 5791 | ||
5269 | igb_reset_interrupt_capability(adapter); | 5792 | igb_clear_interrupt_scheme(adapter); |
5270 | |||
5271 | igb_free_queues(adapter); | ||
5272 | 5793 | ||
5273 | #ifdef CONFIG_PM | 5794 | #ifdef CONFIG_PM |
5274 | retval = pci_save_state(pdev); | 5795 | retval = pci_save_state(pdev); |
@@ -5300,7 +5821,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5300 | wr32(E1000_CTRL, ctrl); | 5821 | wr32(E1000_CTRL, ctrl); |
5301 | 5822 | ||
5302 | /* Allow time for pending master requests to run */ | 5823 | /* Allow time for pending master requests to run */ |
5303 | igb_disable_pcie_master(&adapter->hw); | 5824 | igb_disable_pcie_master(hw); |
5304 | 5825 | ||
5305 | wr32(E1000_WUC, E1000_WUC_PME_EN); | 5826 | wr32(E1000_WUC, E1000_WUC_PME_EN); |
5306 | wr32(E1000_WUFC, wufc); | 5827 | wr32(E1000_WUFC, wufc); |
@@ -5311,7 +5832,9 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5311 | 5832 | ||
5312 | *enable_wake = wufc || adapter->en_mng_pt; | 5833 | *enable_wake = wufc || adapter->en_mng_pt; |
5313 | if (!*enable_wake) | 5834 | if (!*enable_wake) |
5314 | igb_shutdown_serdes_link_82575(hw); | 5835 | igb_power_down_link(adapter); |
5836 | else | ||
5837 | igb_power_up_link(adapter); | ||
5315 | 5838 | ||
5316 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 5839 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
5317 | * would have already happened in close and is redundant. */ | 5840 | * would have already happened in close and is redundant. */ |
@@ -5351,6 +5874,7 @@ static int igb_resume(struct pci_dev *pdev) | |||
5351 | 5874 | ||
5352 | pci_set_power_state(pdev, PCI_D0); | 5875 | pci_set_power_state(pdev, PCI_D0); |
5353 | pci_restore_state(pdev); | 5876 | pci_restore_state(pdev); |
5877 | pci_save_state(pdev); | ||
5354 | 5878 | ||
5355 | err = pci_enable_device_mem(pdev); | 5879 | err = pci_enable_device_mem(pdev); |
5356 | if (err) { | 5880 | if (err) { |
@@ -5363,15 +5887,11 @@ static int igb_resume(struct pci_dev *pdev) | |||
5363 | pci_enable_wake(pdev, PCI_D3hot, 0); | 5887 | pci_enable_wake(pdev, PCI_D3hot, 0); |
5364 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5888 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5365 | 5889 | ||
5366 | igb_set_interrupt_capability(adapter); | 5890 | if (igb_init_interrupt_scheme(adapter)) { |
5367 | |||
5368 | if (igb_alloc_queues(adapter)) { | ||
5369 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | 5891 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
5370 | return -ENOMEM; | 5892 | return -ENOMEM; |
5371 | } | 5893 | } |
5372 | 5894 | ||
5373 | /* e1000_power_up_phy(adapter); */ | ||
5374 | |||
5375 | igb_reset(adapter); | 5895 | igb_reset(adapter); |
5376 | 5896 | ||
5377 | /* let the f/w know that the h/w is now under the control of the | 5897 | /* let the f/w know that the h/w is now under the control of the |
@@ -5417,22 +5937,16 @@ static void igb_netpoll(struct net_device *netdev) | |||
5417 | int i; | 5937 | int i; |
5418 | 5938 | ||
5419 | if (!adapter->msix_entries) { | 5939 | if (!adapter->msix_entries) { |
5940 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | ||
5420 | igb_irq_disable(adapter); | 5941 | igb_irq_disable(adapter); |
5421 | napi_schedule(&adapter->rx_ring[0].napi); | 5942 | napi_schedule(&q_vector->napi); |
5422 | return; | 5943 | return; |
5423 | } | 5944 | } |
5424 | 5945 | ||
5425 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5946 | for (i = 0; i < adapter->num_q_vectors; i++) { |
5426 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; | 5947 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
5427 | wr32(E1000_EIMC, tx_ring->eims_value); | 5948 | wr32(E1000_EIMC, q_vector->eims_value); |
5428 | igb_clean_tx_irq(tx_ring); | 5949 | napi_schedule(&q_vector->napi); |
5429 | wr32(E1000_EIMS, tx_ring->eims_value); | ||
5430 | } | ||
5431 | |||
5432 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
5433 | struct igb_ring *rx_ring = &adapter->rx_ring[i]; | ||
5434 | wr32(E1000_EIMC, rx_ring->eims_value); | ||
5435 | napi_schedule(&rx_ring->napi); | ||
5436 | } | 5950 | } |
5437 | } | 5951 | } |
5438 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 5952 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
@@ -5486,6 +6000,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) | |||
5486 | } else { | 6000 | } else { |
5487 | pci_set_master(pdev); | 6001 | pci_set_master(pdev); |
5488 | pci_restore_state(pdev); | 6002 | pci_restore_state(pdev); |
6003 | pci_save_state(pdev); | ||
5489 | 6004 | ||
5490 | pci_enable_wake(pdev, PCI_D3hot, 0); | 6005 | pci_enable_wake(pdev, PCI_D3hot, 0); |
5491 | pci_enable_wake(pdev, PCI_D3cold, 0); | 6006 | pci_enable_wake(pdev, PCI_D3cold, 0); |
@@ -5532,6 +6047,33 @@ static void igb_io_resume(struct pci_dev *pdev) | |||
5532 | igb_get_hw_control(adapter); | 6047 | igb_get_hw_control(adapter); |
5533 | } | 6048 | } |
5534 | 6049 | ||
6050 | static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, | ||
6051 | u8 qsel) | ||
6052 | { | ||
6053 | u32 rar_low, rar_high; | ||
6054 | struct e1000_hw *hw = &adapter->hw; | ||
6055 | |||
6056 | /* HW expects these in little endian so we reverse the byte order | ||
6057 | * from network order (big endian) to little endian | ||
6058 | */ | ||
6059 | rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | | ||
6060 | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); | ||
6061 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); | ||
6062 | |||
6063 | /* Indicate to hardware the Address is Valid. */ | ||
6064 | rar_high |= E1000_RAH_AV; | ||
6065 | |||
6066 | if (hw->mac.type == e1000_82575) | ||
6067 | rar_high |= E1000_RAH_POOL_1 * qsel; | ||
6068 | else | ||
6069 | rar_high |= E1000_RAH_POOL_1 << qsel; | ||
6070 | |||
6071 | wr32(E1000_RAL(index), rar_low); | ||
6072 | wrfl(); | ||
6073 | wr32(E1000_RAH(index), rar_high); | ||
6074 | wrfl(); | ||
6075 | } | ||
6076 | |||
5535 | static int igb_set_vf_mac(struct igb_adapter *adapter, | 6077 | static int igb_set_vf_mac(struct igb_adapter *adapter, |
5536 | int vf, unsigned char *mac_addr) | 6078 | int vf, unsigned char *mac_addr) |
5537 | { | 6079 | { |
@@ -5542,28 +6084,74 @@ static int igb_set_vf_mac(struct igb_adapter *adapter, | |||
5542 | 6084 | ||
5543 | memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); | 6085 | memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); |
5544 | 6086 | ||
5545 | igb_rar_set(hw, mac_addr, rar_entry); | 6087 | igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); |
5546 | igb_set_rah_pool(hw, vf, rar_entry); | 6088 | |
6089 | return 0; | ||
6090 | } | ||
6091 | |||
6092 | static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) | ||
6093 | { | ||
6094 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
6095 | if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) | ||
6096 | return -EINVAL; | ||
6097 | adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; | ||
6098 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); | ||
6099 | dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" | ||
6100 | " change effective."); | ||
6101 | if (test_bit(__IGB_DOWN, &adapter->state)) { | ||
6102 | dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," | ||
6103 | " but the PF device is not up.\n"); | ||
6104 | dev_warn(&adapter->pdev->dev, "Bring the PF device up before" | ||
6105 | " attempting to use the VF device.\n"); | ||
6106 | } | ||
6107 | return igb_set_vf_mac(adapter, vf, mac); | ||
6108 | } | ||
5547 | 6109 | ||
6110 | static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) | ||
6111 | { | ||
6112 | return -EOPNOTSUPP; | ||
6113 | } | ||
6114 | |||
6115 | static int igb_ndo_get_vf_config(struct net_device *netdev, | ||
6116 | int vf, struct ifla_vf_info *ivi) | ||
6117 | { | ||
6118 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
6119 | if (vf >= adapter->vfs_allocated_count) | ||
6120 | return -EINVAL; | ||
6121 | ivi->vf = vf; | ||
6122 | memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); | ||
6123 | ivi->tx_rate = 0; | ||
6124 | ivi->vlan = adapter->vf_data[vf].pf_vlan; | ||
6125 | ivi->qos = adapter->vf_data[vf].pf_qos; | ||
5548 | return 0; | 6126 | return 0; |
5549 | } | 6127 | } |
5550 | 6128 | ||
5551 | static void igb_vmm_control(struct igb_adapter *adapter) | 6129 | static void igb_vmm_control(struct igb_adapter *adapter) |
5552 | { | 6130 | { |
5553 | struct e1000_hw *hw = &adapter->hw; | 6131 | struct e1000_hw *hw = &adapter->hw; |
5554 | u32 reg_data; | 6132 | u32 reg; |
5555 | 6133 | ||
5556 | if (!adapter->vfs_allocated_count) | 6134 | /* replication is not supported for 82575 */ |
6135 | if (hw->mac.type == e1000_82575) | ||
5557 | return; | 6136 | return; |
5558 | 6137 | ||
5559 | /* VF's need PF reset indication before they | 6138 | /* enable replication vlan tag stripping */ |
5560 | * can send/receive mail */ | 6139 | reg = rd32(E1000_RPLOLR); |
5561 | reg_data = rd32(E1000_CTRL_EXT); | 6140 | reg |= E1000_RPLOLR_STRVLAN; |
5562 | reg_data |= E1000_CTRL_EXT_PFRSTD; | 6141 | wr32(E1000_RPLOLR, reg); |
5563 | wr32(E1000_CTRL_EXT, reg_data); | ||
5564 | 6142 | ||
5565 | igb_vmdq_set_loopback_pf(hw, true); | 6143 | /* notify HW that the MAC is adding vlan tags */ |
5566 | igb_vmdq_set_replication_pf(hw, true); | 6144 | reg = rd32(E1000_DTXCTL); |
6145 | reg |= E1000_DTXCTL_VLAN_ADDED; | ||
6146 | wr32(E1000_DTXCTL, reg); | ||
6147 | |||
6148 | if (adapter->vfs_allocated_count) { | ||
6149 | igb_vmdq_set_loopback_pf(hw, true); | ||
6150 | igb_vmdq_set_replication_pf(hw, true); | ||
6151 | } else { | ||
6152 | igb_vmdq_set_loopback_pf(hw, false); | ||
6153 | igb_vmdq_set_replication_pf(hw, false); | ||
6154 | } | ||
5567 | } | 6155 | } |
5568 | 6156 | ||
5569 | /* igb_main.c */ | 6157 | /* igb_main.c */ |