aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-04-19 14:19:07 -0400
committerDavid S. Miller <davem@davemloft.net>2013-04-19 14:19:07 -0400
commitc2962897c94605bc8f158a37dee8d867dda9f116 (patch)
treea81eabbd1edf494207c2412c274a2c649354a39e /drivers
parentc1cb0d3b569b1d21ada8666efb42dfd437617a09 (diff)
parentceb5f13b70cd6e7afa87ba1b13eb900a766a28e4 (diff)
Merge branch 'intel'
Jeff Kirsher says: ==================== This series contains updates to ixgbe and igb. The ixgbe changes contains 2 patches from the community, one which is a fix from akepner to fix a issue where netif_running() in shutdown was not done under rtnl_lock. The other community fix from Joe Perches cleans up #ifdef CONFIG_DEBUG_FS which is no longer necessary. The last ixgbe patch, from Jacob Keller, adds support for WoL on 82559 SFP+ LOM. The remaining patches are against igb, 10 of which were previously submitted in a pull request where changes were requested. The following igb patches: igb: Support for 100base-fx SFP igb: Support to read and export SFF-8472/8079 data are v2 based on feedback from Dan Carpenter and Ben Hutchings in the previous pull request. The largest set of changes are in my patch to cleanup code comments and whitespace to align the igb driver with the networking style of code comments. While cleaning up the code comments, fixed several other whitespace/checkpatch.pl code formatting issues. Other notable igb patches are EEE capable devices query the PHY to determine what the link partner is advertising, added support for i354 devices and added support for spoofchk config. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c252
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h50
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h57
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c93
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c124
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h52
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c259
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h53
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h133
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c345
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c29
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1270
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
21 files changed, 1604 insertions, 1256 deletions
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 12b1d8480808..c9bba39d50bd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -100,6 +100,7 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
100 break; 100 break;
101 case e1000_82580: 101 case e1000_82580:
102 case e1000_i350: 102 case e1000_i350:
103 case e1000_i354:
103 case e1000_i210: 104 case e1000_i210:
104 case e1000_i211: 105 case e1000_i211:
105 reg = rd32(E1000_MDICNFG); 106 reg = rd32(E1000_MDICNFG);
@@ -149,6 +150,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
149 switch (hw->mac.type) { 150 switch (hw->mac.type) {
150 case e1000_82580: 151 case e1000_82580:
151 case e1000_i350: 152 case e1000_i350:
153 case e1000_i354:
152 phy->ops.read_reg = igb_read_phy_reg_82580; 154 phy->ops.read_reg = igb_read_phy_reg_82580;
153 phy->ops.write_reg = igb_write_phy_reg_82580; 155 phy->ops.write_reg = igb_write_phy_reg_82580;
154 break; 156 break;
@@ -174,13 +176,14 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
174 176
175 /* Verify phy id and set remaining function pointers */ 177 /* Verify phy id and set remaining function pointers */
176 switch (phy->id) { 178 switch (phy->id) {
179 case M88E1545_E_PHY_ID:
177 case I347AT4_E_PHY_ID: 180 case I347AT4_E_PHY_ID:
178 case M88E1112_E_PHY_ID: 181 case M88E1112_E_PHY_ID:
179 case M88E1111_I_PHY_ID: 182 case M88E1111_I_PHY_ID:
180 phy->type = e1000_phy_m88; 183 phy->type = e1000_phy_m88;
184 phy->ops.check_polarity = igb_check_polarity_m88;
181 phy->ops.get_phy_info = igb_get_phy_info_m88; 185 phy->ops.get_phy_info = igb_get_phy_info_m88;
182 if (phy->id == I347AT4_E_PHY_ID || 186 if (phy->id != M88E1111_I_PHY_ID)
183 phy->id == M88E1112_E_PHY_ID)
184 phy->ops.get_cable_length = 187 phy->ops.get_cable_length =
185 igb_get_cable_length_m88_gen2; 188 igb_get_cable_length_m88_gen2;
186 else 189 else
@@ -227,7 +230,7 @@ out:
227 * igb_init_nvm_params_82575 - Init NVM func ptrs. 230 * igb_init_nvm_params_82575 - Init NVM func ptrs.
228 * @hw: pointer to the HW structure 231 * @hw: pointer to the HW structure
229 **/ 232 **/
230s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 233static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
231{ 234{
232 struct e1000_nvm_info *nvm = &hw->nvm; 235 struct e1000_nvm_info *nvm = &hw->nvm;
233 u32 eecd = rd32(E1000_EECD); 236 u32 eecd = rd32(E1000_EECD);
@@ -287,6 +290,7 @@ s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
287 nvm->ops.read = igb_read_nvm_spi; 290 nvm->ops.read = igb_read_nvm_spi;
288 nvm->ops.write = igb_write_nvm_spi; 291 nvm->ops.write = igb_write_nvm_spi;
289 break; 292 break;
293 case e1000_i354:
290 case e1000_i350: 294 case e1000_i350:
291 nvm->ops.validate = igb_validate_nvm_checksum_i350; 295 nvm->ops.validate = igb_validate_nvm_checksum_i350;
292 nvm->ops.update = igb_update_nvm_checksum_i350; 296 nvm->ops.update = igb_update_nvm_checksum_i350;
@@ -352,6 +356,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
352 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 356 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
353 break; 357 break;
354 case e1000_i350: 358 case e1000_i350:
359 case e1000_i354:
355 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 360 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
356 break; 361 break;
357 default: 362 default:
@@ -445,14 +450,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
445 case E1000_DEV_ID_I211_COPPER: 450 case E1000_DEV_ID_I211_COPPER:
446 mac->type = e1000_i211; 451 mac->type = e1000_i211;
447 break; 452 break;
453 case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
454 case E1000_DEV_ID_I354_SGMII:
455 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
456 mac->type = e1000_i354;
457 break;
448 default: 458 default:
449 return -E1000_ERR_MAC_INIT; 459 return -E1000_ERR_MAC_INIT;
450 break; 460 break;
451 } 461 }
452 462
453 /* Set media type */ 463 /* Set media type */
454 /* 464 /* The 82575 uses bits 22:23 for link mode. The mode can be changed
455 * The 82575 uses bits 22:23 for link mode. The mode can be changed
456 * based on the EEPROM. We cannot rely upon device ID. There 465 * based on the EEPROM. We cannot rely upon device ID. There
457 * is no distinguishable difference between fiber and internal 466 * is no distinguishable difference between fiber and internal
458 * SerDes mode on the 82575. There can be an external PHY attached 467 * SerDes mode on the 82575. There can be an external PHY attached
@@ -621,8 +630,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
621 u32 ctrl_ext; 630 u32 ctrl_ext;
622 u32 mdic; 631 u32 mdic;
623 632
624 /* 633 /* For SGMII PHYs, we try the list of possible addresses until
625 * For SGMII PHYs, we try the list of possible addresses until
626 * we find one that works. For non-SGMII PHYs 634 * we find one that works. For non-SGMII PHYs
627 * (e.g. integrated copper PHYs), an address of 1 should 635 * (e.g. integrated copper PHYs), an address of 1 should
628 * work. The result of this function should mean phy->phy_addr 636 * work. The result of this function should mean phy->phy_addr
@@ -644,6 +652,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
644 break; 652 break;
645 case e1000_82580: 653 case e1000_82580:
646 case e1000_i350: 654 case e1000_i350:
655 case e1000_i354:
647 case e1000_i210: 656 case e1000_i210:
648 case e1000_i211: 657 case e1000_i211:
649 mdic = rd32(E1000_MDICNFG); 658 mdic = rd32(E1000_MDICNFG);
@@ -665,8 +674,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
665 wrfl(); 674 wrfl();
666 msleep(300); 675 msleep(300);
667 676
668 /* 677 /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
669 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
670 * Therefore, we need to test 1-7 678 * Therefore, we need to test 1-7
671 */ 679 */
672 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 680 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
@@ -674,8 +682,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
674 if (ret_val == 0) { 682 if (ret_val == 0) {
675 hw_dbg("Vendor ID 0x%08X read at address %u\n", 683 hw_dbg("Vendor ID 0x%08X read at address %u\n",
676 phy_id, phy->addr); 684 phy_id, phy->addr);
677 /* 685 /* At the time of this writing, The M88 part is
678 * At the time of this writing, The M88 part is
679 * the only supported SGMII PHY product. 686 * the only supported SGMII PHY product.
680 */ 687 */
681 if (phy_id == M88_VENDOR) 688 if (phy_id == M88_VENDOR)
@@ -711,15 +718,13 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
711{ 718{
712 s32 ret_val; 719 s32 ret_val;
713 720
714 /* 721 /* This isn't a true "hard" reset, but is the only reset
715 * This isn't a true "hard" reset, but is the only reset
716 * available to us at this time. 722 * available to us at this time.
717 */ 723 */
718 724
719 hw_dbg("Soft resetting SGMII attached PHY...\n"); 725 hw_dbg("Soft resetting SGMII attached PHY...\n");
720 726
721 /* 727 /* SFP documentation requires the following to configure the SPF module
722 * SFP documentation requires the following to configure the SPF module
723 * to work on SGMII. No further documentation is given. 728 * to work on SGMII. No further documentation is given.
724 */ 729 */
725 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 730 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
@@ -774,8 +779,7 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
774 data &= ~IGP02E1000_PM_D0_LPLU; 779 data &= ~IGP02E1000_PM_D0_LPLU;
775 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 780 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
776 data); 781 data);
777 /* 782 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
778 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
779 * during Dx states where the power conservation is most 783 * during Dx states where the power conservation is most
780 * important. During driver activity we should enable 784 * important. During driver activity we should enable
781 * SmartSpeed, so performance is maintained. 785 * SmartSpeed, so performance is maintained.
@@ -838,8 +842,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
838 } else { 842 } else {
839 data &= ~E1000_82580_PM_D0_LPLU; 843 data &= ~E1000_82580_PM_D0_LPLU;
840 844
841 /* 845 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
842 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
843 * during Dx states where the power conservation is most 846 * during Dx states where the power conservation is most
844 * important. During driver activity we should enable 847 * important. During driver activity we should enable
845 * SmartSpeed, so performance is maintained. 848 * SmartSpeed, so performance is maintained.
@@ -867,7 +870,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
867 * During driver activity, SmartSpeed should be enabled so performance is 870 * During driver activity, SmartSpeed should be enabled so performance is
868 * maintained. 871 * maintained.
869 **/ 872 **/
870s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 873static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
871{ 874{
872 struct e1000_phy_info *phy = &hw->phy; 875 struct e1000_phy_info *phy = &hw->phy;
873 s32 ret_val = 0; 876 s32 ret_val = 0;
@@ -877,8 +880,7 @@ s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
877 880
878 if (!active) { 881 if (!active) {
879 data &= ~E1000_82580_PM_D3_LPLU; 882 data &= ~E1000_82580_PM_D3_LPLU;
880 /* 883 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
881 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
882 * during Dx states where the power conservation is most 884 * during Dx states where the power conservation is most
883 * important. During driver activity we should enable 885 * important. During driver activity we should enable
884 * SmartSpeed, so performance is maintained. 886 * SmartSpeed, so performance is maintained.
@@ -964,8 +966,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
964 if (!(swfw_sync & (fwmask | swmask))) 966 if (!(swfw_sync & (fwmask | swmask)))
965 break; 967 break;
966 968
967 /* 969 /* Firmware currently using resource (fwmask)
968 * Firmware currently using resource (fwmask)
969 * or other software thread using resource (swmask) 970 * or other software thread using resource (swmask)
970 */ 971 */
971 igb_put_hw_semaphore(hw); 972 igb_put_hw_semaphore(hw);
@@ -1065,8 +1066,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1065 if (hw->phy.media_type != e1000_media_type_copper) { 1066 if (hw->phy.media_type != e1000_media_type_copper) {
1066 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1067 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1067 &duplex); 1068 &duplex);
1068 /* 1069 /* Use this flag to determine if link needs to be checked or
1069 * Use this flag to determine if link needs to be checked or
1070 * not. If we have link clear the flag so that we do not 1070 * not. If we have link clear the flag so that we do not
1071 * continue to check for link. 1071 * continue to check for link.
1072 */ 1072 */
@@ -1135,15 +1135,13 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1135 *speed = 0; 1135 *speed = 0;
1136 *duplex = 0; 1136 *duplex = 0;
1137 1137
1138 /* 1138 /* Read the PCS Status register for link state. For non-copper mode,
1139 * Read the PCS Status register for link state. For non-copper mode,
1140 * the status register is not accurate. The PCS status register is 1139 * the status register is not accurate. The PCS status register is
1141 * used instead. 1140 * used instead.
1142 */ 1141 */
1143 pcs = rd32(E1000_PCS_LSTAT); 1142 pcs = rd32(E1000_PCS_LSTAT);
1144 1143
1145 /* 1144 /* The link up bit determines when link is up on autoneg. The sync ok
1146 * The link up bit determines when link is up on autoneg. The sync ok
1147 * gets set once both sides sync up and agree upon link. Stable link 1145 * gets set once both sides sync up and agree upon link. Stable link
1148 * can be determined by checking for both link up and link sync ok 1146 * can be determined by checking for both link up and link sync ok
1149 */ 1147 */
@@ -1214,8 +1212,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1214 u32 ctrl, icr; 1212 u32 ctrl, icr;
1215 s32 ret_val; 1213 s32 ret_val;
1216 1214
1217 /* 1215 /* Prevent the PCI-E bus from sticking if there is no TLP connection
1218 * Prevent the PCI-E bus from sticking if there is no TLP connection
1219 * on the last TLP read/write transaction when MAC is reset. 1216 * on the last TLP read/write transaction when MAC is reset.
1220 */ 1217 */
1221 ret_val = igb_disable_pcie_master(hw); 1218 ret_val = igb_disable_pcie_master(hw);
@@ -1244,8 +1241,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1244 1241
1245 ret_val = igb_get_auto_rd_done(hw); 1242 ret_val = igb_get_auto_rd_done(hw);
1246 if (ret_val) { 1243 if (ret_val) {
1247 /* 1244 /* When auto config read does not complete, do not
1248 * When auto config read does not complete, do not
1249 * return with an error. This can happen in situations 1245 * return with an error. This can happen in situations
1250 * where there is no eeprom and prevents getting link. 1246 * where there is no eeprom and prevents getting link.
1251 */ 1247 */
@@ -1287,7 +1283,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1287 1283
1288 /* Disabling VLAN filtering */ 1284 /* Disabling VLAN filtering */
1289 hw_dbg("Initializing the IEEE VLAN\n"); 1285 hw_dbg("Initializing the IEEE VLAN\n");
1290 if (hw->mac.type == e1000_i350) 1286 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
1291 igb_clear_vfta_i350(hw); 1287 igb_clear_vfta_i350(hw);
1292 else 1288 else
1293 igb_clear_vfta(hw); 1289 igb_clear_vfta(hw);
@@ -1308,8 +1304,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1308 /* Setup link and flow control */ 1304 /* Setup link and flow control */
1309 ret_val = igb_setup_link(hw); 1305 ret_val = igb_setup_link(hw);
1310 1306
1311 /* 1307 /* Clear all of the statistics registers (clear on read). It is
1312 * Clear all of the statistics registers (clear on read). It is
1313 * important that we do this after we have tried to establish link 1308 * important that we do this after we have tried to establish link
1314 * because the symbol error count will increment wildly if there 1309 * because the symbol error count will increment wildly if there
1315 * is no link. 1310 * is no link.
@@ -1364,6 +1359,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1364 switch (hw->phy.id) { 1359 switch (hw->phy.id) {
1365 case I347AT4_E_PHY_ID: 1360 case I347AT4_E_PHY_ID:
1366 case M88E1112_E_PHY_ID: 1361 case M88E1112_E_PHY_ID:
1362 case M88E1545_E_PHY_ID:
1367 case I210_I_PHY_ID: 1363 case I210_I_PHY_ID:
1368 ret_val = igb_copper_link_setup_m88_gen2(hw); 1364 ret_val = igb_copper_link_setup_m88_gen2(hw);
1369 break; 1365 break;
@@ -1412,17 +1408,17 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1412 return ret_val; 1408 return ret_val;
1413 1409
1414 1410
1415 /* 1411 /* On the 82575, SerDes loopback mode persists until it is
1416 * On the 82575, SerDes loopback mode persists until it is
1417 * explicitly turned off or a power cycle is performed. A read to 1412 * explicitly turned off or a power cycle is performed. A read to
1418 * the register does not indicate its status. Therefore, we ensure 1413 * the register does not indicate its status. Therefore, we ensure
1419 * loopback mode is disabled during initialization. 1414 * loopback mode is disabled during initialization.
1420 */ 1415 */
1421 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1416 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1422 1417
1423 /* power on the sfp cage if present */ 1418 /* power on the sfp cage if present and turn on I2C */
1424 ctrl_ext = rd32(E1000_CTRL_EXT); 1419 ctrl_ext = rd32(E1000_CTRL_EXT);
1425 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1420 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1421 ctrl_ext |= E1000_CTRL_I2C_ENA;
1426 wr32(E1000_CTRL_EXT, ctrl_ext); 1422 wr32(E1000_CTRL_EXT, ctrl_ext);
1427 1423
1428 ctrl_reg = rd32(E1000_CTRL); 1424 ctrl_reg = rd32(E1000_CTRL);
@@ -1466,8 +1462,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1466 pcs_autoneg = false; 1462 pcs_autoneg = false;
1467 } 1463 }
1468 1464
1469 /* 1465 /* non-SGMII modes only supports a speed of 1000/Full for the
1470 * non-SGMII modes only supports a speed of 1000/Full for the
1471 * link so it is best to just force the MAC and let the pcs 1466 * link so it is best to just force the MAC and let the pcs
1472 * link either autoneg or be forced to 1000/Full 1467 * link either autoneg or be forced to 1000/Full
1473 */ 1468 */
@@ -1481,8 +1476,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1481 1476
1482 wr32(E1000_CTRL, ctrl_reg); 1477 wr32(E1000_CTRL, ctrl_reg);
1483 1478
1484 /* 1479 /* New SerDes mode allows for forcing speed or autonegotiating speed
1485 * New SerDes mode allows for forcing speed or autonegotiating speed
1486 * at 1gb. Autoneg should be default set by most drivers. This is the 1480 * at 1gb. Autoneg should be default set by most drivers. This is the
1487 * mode that will be compatible with older link partners and switches. 1481 * mode that will be compatible with older link partners and switches.
1488 * However, both are supported by the hardware and some drivers/tools. 1482 * However, both are supported by the hardware and some drivers/tools.
@@ -1592,8 +1586,7 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1592{ 1586{
1593 s32 ret_val = 0; 1587 s32 ret_val = 0;
1594 1588
1595 /* 1589 /* If there's an alternate MAC address place it in RAR0
1596 * If there's an alternate MAC address place it in RAR0
1597 * so that it will override the Si installed default perm 1590 * so that it will override the Si installed default perm
1598 * address. 1591 * address.
1599 */ 1592 */
@@ -1777,8 +1770,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1777 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1770 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1778 goto out; 1771 goto out;
1779 1772
1780 /* 1773 /* if capabilities version is type 1 we can write the
1781 * if capababilities version is type 1 we can write the
1782 * timeout of 10ms to 200ms through the GCR register 1774 * timeout of 10ms to 200ms through the GCR register
1783 */ 1775 */
1784 if (!(gcr & E1000_GCR_CAP_VER2)) { 1776 if (!(gcr & E1000_GCR_CAP_VER2)) {
@@ -1786,8 +1778,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1786 goto out; 1778 goto out;
1787 } 1779 }
1788 1780
1789 /* 1781 /* for version 2 capabilities we need to write the config space
1790 * for version 2 capabilities we need to write the config space
1791 * directly in order to set the completion timeout value for 1782 * directly in order to set the completion timeout value for
1792 * 16ms to 55ms 1783 * 16ms to 55ms
1793 */ 1784 */
@@ -1825,6 +1816,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1825 reg_offset = E1000_DTXSWC; 1816 reg_offset = E1000_DTXSWC;
1826 break; 1817 break;
1827 case e1000_i350: 1818 case e1000_i350:
1819 case e1000_i354:
1828 reg_offset = E1000_TXSWC; 1820 reg_offset = E1000_TXSWC;
1829 break; 1821 break;
1830 default: 1822 default:
@@ -1866,6 +1858,7 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1866 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1858 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1867 wr32(E1000_DTXSWC, dtxswc); 1859 wr32(E1000_DTXSWC, dtxswc);
1868 break; 1860 break;
1861 case e1000_i354:
1869 case e1000_i350: 1862 case e1000_i350:
1870 dtxswc = rd32(E1000_TXSWC); 1863 dtxswc = rd32(E1000_TXSWC);
1871 if (enable) 1864 if (enable)
@@ -1879,7 +1872,6 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1879 break; 1872 break;
1880 } 1873 }
1881 1874
1882
1883} 1875}
1884 1876
1885/** 1877/**
@@ -1914,7 +1906,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1914{ 1906{
1915 s32 ret_val; 1907 s32 ret_val;
1916 1908
1917
1918 ret_val = hw->phy.ops.acquire(hw); 1909 ret_val = hw->phy.ops.acquire(hw);
1919 if (ret_val) 1910 if (ret_val)
1920 goto out; 1911 goto out;
@@ -2016,8 +2007,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2016 /* Get current control state. */ 2007 /* Get current control state. */
2017 ctrl = rd32(E1000_CTRL); 2008 ctrl = rd32(E1000_CTRL);
2018 2009
2019 /* 2010 /* Prevent the PCI-E bus from sticking if there is no TLP connection
2020 * Prevent the PCI-E bus from sticking if there is no TLP connection
2021 * on the last TLP read/write transaction when MAC is reset. 2011 * on the last TLP read/write transaction when MAC is reset.
2022 */ 2012 */
2023 ret_val = igb_disable_pcie_master(hw); 2013 ret_val = igb_disable_pcie_master(hw);
@@ -2052,8 +2042,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2052 2042
2053 ret_val = igb_get_auto_rd_done(hw); 2043 ret_val = igb_get_auto_rd_done(hw);
2054 if (ret_val) { 2044 if (ret_val) {
2055 /* 2045 /* When auto config read does not complete, do not
2056 * When auto config read does not complete, do not
2057 * return with an error. This can happen in situations 2046 * return with an error. This can happen in situations
2058 * where there is no eeprom and prevents getting link. 2047 * where there is no eeprom and prevents getting link.
2059 */ 2048 */
@@ -2197,7 +2186,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2197 2186
2198 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2187 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2199 /* if checksums compatibility bit is set validate checksums 2188 /* if checksums compatibility bit is set validate checksums
2200 * for all 4 ports. */ 2189 * for all 4 ports.
2190 */
2201 eeprom_regions_count = 4; 2191 eeprom_regions_count = 4;
2202 } 2192 }
2203 2193
@@ -2309,6 +2299,41 @@ out:
2309} 2299}
2310 2300
2311/** 2301/**
2302 * __igb_access_emi_reg - Read/write EMI register
2303 * @hw: pointer to the HW structure
2304 * @addr: EMI address to program
2305 * @data: pointer to value to read/write from/to the EMI address
2306 * @read: boolean flag to indicate read or write
2307 **/
2308static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2309 u16 *data, bool read)
2310{
2311 s32 ret_val = E1000_SUCCESS;
2312
2313 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2314 if (ret_val)
2315 return ret_val;
2316
2317 if (read)
2318 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2319 else
2320 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2321
2322 return ret_val;
2323}
2324
2325/**
2326 * igb_read_emi_reg - Read Extended Management Interface register
2327 * @hw: pointer to the HW structure
2328 * @addr: EMI address to program
2329 * @data: value to be read from the EMI address
2330 **/
2331s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2332{
2333 return __igb_access_emi_reg(hw, addr, data, true);
2334}
2335
2336/**
2312 * igb_set_eee_i350 - Enable/disable EEE support 2337 * igb_set_eee_i350 - Enable/disable EEE support
2313 * @hw: pointer to the HW structure 2338 * @hw: pointer to the HW structure
2314 * 2339 *
@@ -2338,7 +2363,6 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2338 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2363 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2339 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2364 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2340 2365
2341
2342 } else { 2366 } else {
2343 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2367 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2344 E1000_IPCNFG_EEE_100M_AN); 2368 E1000_IPCNFG_EEE_100M_AN);
@@ -2355,6 +2379,108 @@ out:
2355 return ret_val; 2379 return ret_val;
2356} 2380}
2357 2381
2382/**
2383 * igb_set_eee_i354 - Enable/disable EEE support
2384 * @hw: pointer to the HW structure
2385 *
2386 * Enable/disable EEE legacy mode based on setting in dev_spec structure.
2387 *
2388 **/
2389s32 igb_set_eee_i354(struct e1000_hw *hw)
2390{
2391 struct e1000_phy_info *phy = &hw->phy;
2392 s32 ret_val = 0;
2393 u16 phy_data;
2394
2395 if ((hw->phy.media_type != e1000_media_type_copper) ||
2396 (phy->id != M88E1545_E_PHY_ID))
2397 goto out;
2398
2399 if (!hw->dev_spec._82575.eee_disable) {
2400 /* Switch to PHY page 18. */
2401 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
2402 if (ret_val)
2403 goto out;
2404
2405 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
2406 &phy_data);
2407 if (ret_val)
2408 goto out;
2409
2410 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
2411 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
2412 phy_data);
2413 if (ret_val)
2414 goto out;
2415
2416 /* Return the PHY to page 0. */
2417 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
2418 if (ret_val)
2419 goto out;
2420
2421 /* Turn on EEE advertisement. */
2422 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2423 E1000_EEE_ADV_DEV_I354,
2424 &phy_data);
2425 if (ret_val)
2426 goto out;
2427
2428 phy_data |= E1000_EEE_ADV_100_SUPPORTED |
2429 E1000_EEE_ADV_1000_SUPPORTED;
2430 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2431 E1000_EEE_ADV_DEV_I354,
2432 phy_data);
2433 } else {
2434 /* Turn off EEE advertisement. */
2435 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2436 E1000_EEE_ADV_DEV_I354,
2437 &phy_data);
2438 if (ret_val)
2439 goto out;
2440
2441 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2442 E1000_EEE_ADV_1000_SUPPORTED);
2443 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2444 E1000_EEE_ADV_DEV_I354,
2445 phy_data);
2446 }
2447
2448out:
2449 return ret_val;
2450}
2451
2452/**
2453 * igb_get_eee_status_i354 - Get EEE status
2454 * @hw: pointer to the HW structure
2455 * @status: EEE status
2456 *
2457 * Get EEE status by guessing based on whether Tx or Rx LPI indications have
2458 * been received.
2459 **/
2460s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2461{
2462 struct e1000_phy_info *phy = &hw->phy;
2463 s32 ret_val = 0;
2464 u16 phy_data;
2465
2466 /* Check if EEE is supported on this device. */
2467 if ((hw->phy.media_type != e1000_media_type_copper) ||
2468 (phy->id != M88E1545_E_PHY_ID))
2469 goto out;
2470
2471 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2472 E1000_PCS_STATUS_DEV_I354,
2473 &phy_data);
2474 if (ret_val)
2475 goto out;
2476
2477 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2478 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2479
2480out:
2481 return ret_val;
2482}
2483
2358static const u8 e1000_emc_temp_data[4] = { 2484static const u8 e1000_emc_temp_data[4] = {
2359 E1000_EMC_INTERNAL_DATA, 2485 E1000_EMC_INTERNAL_DATA,
2360 E1000_EMC_DIODE1_DATA, 2486 E1000_EMC_DIODE1_DATA,
@@ -2368,11 +2494,12 @@ static const u8 e1000_emc_therm_limit[4] = {
2368 E1000_EMC_DIODE3_THERM_LIMIT 2494 E1000_EMC_DIODE3_THERM_LIMIT
2369}; 2495};
2370 2496
2371/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2497/**
2498 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2372 * @hw: pointer to hardware structure 2499 * @hw: pointer to hardware structure
2373 * 2500 *
2374 * Updates the temperatures in mac.thermal_sensor_data 2501 * Updates the temperatures in mac.thermal_sensor_data
2375 */ 2502 **/
2376s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2503s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2377{ 2504{
2378 s32 status = E1000_SUCCESS; 2505 s32 status = E1000_SUCCESS;
@@ -2420,12 +2547,13 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2420 return status; 2547 return status;
2421} 2548}
2422 2549
2423/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2550/**
2551 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2424 * @hw: pointer to hardware structure 2552 * @hw: pointer to hardware structure
2425 * 2553 *
2426 * Sets the thermal sensor thresholds according to the NVM map 2554 * Sets the thermal sensor thresholds according to the NVM map
2427 * and save off the threshold and location values into mac.thermal_sensor_data 2555 * and save off the threshold and location values into mac.thermal_sensor_data
2428 */ 2556 **/
2429s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2557s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2430{ 2558{
2431 s32 status = E1000_SUCCESS; 2559 s32 status = E1000_SUCCESS;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 73ab41f0e032..74a1506b4235 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -263,7 +263,9 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
263void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 263void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
264void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 264void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
265u16 igb_rxpbs_adjust_82580(u32 data); 265u16 igb_rxpbs_adjust_82580(u32 data);
266s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
266s32 igb_set_eee_i350(struct e1000_hw *); 267s32 igb_set_eee_i350(struct e1000_hw *);
268s32 igb_set_eee_i354(struct e1000_hw *);
267s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *); 269s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
268s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw); 270s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
269 271
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 7e13337d3b9d..31a0f82cc650 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -138,8 +138,7 @@
138#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 138#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
139#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 139#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
140 140
141/* 141/* Use byte values for the following shift parameters
142 * Use byte values for the following shift parameters
143 * Usage: 142 * Usage:
144 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 143 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
145 * E1000_PSRCTL_BSIZE0_MASK) | 144 * E1000_PSRCTL_BSIZE0_MASK) |
@@ -237,11 +236,14 @@
237#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 236#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
238/* BMC external code execution disabled */ 237/* BMC external code execution disabled */
239 238
239#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
240#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
240/* Constants used to intrepret the masked PCI-X bus speed. */ 241/* Constants used to intrepret the masked PCI-X bus speed. */
241 242
242#define SPEED_10 10 243#define SPEED_10 10
243#define SPEED_100 100 244#define SPEED_100 100
244#define SPEED_1000 1000 245#define SPEED_1000 1000
246#define SPEED_2500 2500
245#define HALF_DUPLEX 1 247#define HALF_DUPLEX 1
246#define FULL_DUPLEX 2 248#define FULL_DUPLEX 2
247 249
@@ -382,8 +384,7 @@
382#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ 384#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
383/* TCP Timer */ 385/* TCP Timer */
384 386
385/* 387/* This defines the bits that are set in the Interrupt Mask
386 * This defines the bits that are set in the Interrupt Mask
387 * Set/Read Register. Each bit is documented below: 388 * Set/Read Register. Each bit is documented below:
388 * o RXT0 = Receiver Timer Interrupt (ring 0) 389 * o RXT0 = Receiver Timer Interrupt (ring 0)
389 * o TXDW = Transmit Descriptor Written Back 390 * o TXDW = Transmit Descriptor Written Back
@@ -440,8 +441,7 @@
440#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 441#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
441 442
442/* Receive Address */ 443/* Receive Address */
443/* 444/* Number of high/low register pairs in the RAR. The RAR (Receive Address
444 * Number of high/low register pairs in the RAR. The RAR (Receive Address
445 * Registers) holds the directed and multicast addresses that we monitor. 445 * Registers) holds the directed and multicast addresses that we monitor.
446 * Technically, we have 16 spots. However, we reserve one of these spots 446 * Technically, we have 16 spots. However, we reserve one of these spots
447 * (RAR[15]) for our directed address used by controllers with 447 * (RAR[15]) for our directed address used by controllers with
@@ -760,8 +760,7 @@
760#define MAX_PHY_MULTI_PAGE_REG 0xF 760#define MAX_PHY_MULTI_PAGE_REG 0xF
761 761
762/* Bit definitions for valid PHY IDs. */ 762/* Bit definitions for valid PHY IDs. */
763/* 763/* I = Integrated
764 * I = Integrated
765 * E = External 764 * E = External
766 */ 765 */
767#define M88E1111_I_PHY_ID 0x01410CC0 766#define M88E1111_I_PHY_ID 0x01410CC0
@@ -772,6 +771,7 @@
772#define I350_I_PHY_ID 0x015403B0 771#define I350_I_PHY_ID 0x015403B0
773#define M88_VENDOR 0x0141 772#define M88_VENDOR 0x0141
774#define I210_I_PHY_ID 0x01410C00 773#define I210_I_PHY_ID 0x01410C00
774#define M88E1545_E_PHY_ID 0x01410EA0
775 775
776/* M88E1000 Specific Registers */ 776/* M88E1000 Specific Registers */
777#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 777#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -791,8 +791,7 @@
791#define M88E1000_PSCR_AUTO_X_1000T 0x0040 791#define M88E1000_PSCR_AUTO_X_1000T 0x0040
792/* Auto crossover enabled all speeds */ 792/* Auto crossover enabled all speeds */
793#define M88E1000_PSCR_AUTO_X_MODE 0x0060 793#define M88E1000_PSCR_AUTO_X_MODE 0x0060
794/* 794/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
795 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
796 * 0=Normal 10BASE-T Rx Threshold 795 * 0=Normal 10BASE-T Rx Threshold
797 */ 796 */
798/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ 797/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
@@ -802,8 +801,7 @@
802#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 801#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
803#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 802#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
804#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 803#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
805/* 804/* 0 = <50M
806 * 0 = <50M
807 * 1 = 50-80M 805 * 1 = 50-80M
808 * 2 = 80-110M 806 * 2 = 80-110M
809 * 3 = 110-140M 807 * 3 = 110-140M
@@ -816,20 +814,17 @@
816#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 814#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
817 815
818/* M88E1000 Extended PHY Specific Control Register */ 816/* M88E1000 Extended PHY Specific Control Register */
819/* 817/* 1 = Lost lock detect enabled.
820 * 1 = Lost lock detect enabled.
821 * Will assert lost lock and bring 818 * Will assert lost lock and bring
822 * link down if idle not seen 819 * link down if idle not seen
823 * within 1ms in 1000BASE-T 820 * within 1ms in 1000BASE-T
824 */ 821 */
825/* 822/* Number of times we will attempt to autonegotiate before downshifting if we
826 * Number of times we will attempt to autonegotiate before downshifting if we
827 * are the master 823 * are the master
828 */ 824 */
829#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 825#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
830#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 826#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
831/* 827/* Number of times we will attempt to autonegotiate before downshifting if we
832 * Number of times we will attempt to autonegotiate before downshifting if we
833 * are the slave 828 * are the slave
834 */ 829 */
835#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 830#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
@@ -844,8 +839,7 @@
844 839
845/* i347-AT4 Extended PHY Specific Control Register */ 840/* i347-AT4 Extended PHY Specific Control Register */
846 841
847/* 842/* Number of times we will attempt to autonegotiate before downshifting if we
848 * Number of times we will attempt to autonegotiate before downshifting if we
849 * are the master 843 * are the master
850 */ 844 */
851#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 845#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
@@ -895,6 +889,22 @@
895#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ 889#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
896#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ 890#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
897#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ 891#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
892#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
893#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
894#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
895#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
896#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
897#define E1000_M88E1545_EEE_CTRL_1 0x0
898#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
899#define E1000_EEE_ADV_DEV_I354 7
900#define E1000_EEE_ADV_ADDR_I354 60
901#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
902#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
903#define E1000_PCS_STATUS_DEV_I354 3
904#define E1000_PCS_STATUS_ADDR_I354 1
905#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
906#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
907#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
898 908
899/* SerDes Control */ 909/* SerDes Control */
900#define E1000_GEN_CTL_READY 0x80000000 910#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 0d5cf9c63d0d..1138ccaf95ff 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -38,31 +38,31 @@
38 38
39struct e1000_hw; 39struct e1000_hw;
40 40
41#define E1000_DEV_ID_82576 0x10C9 41#define E1000_DEV_ID_82576 0x10C9
42#define E1000_DEV_ID_82576_FIBER 0x10E6 42#define E1000_DEV_ID_82576_FIBER 0x10E6
43#define E1000_DEV_ID_82576_SERDES 0x10E7 43#define E1000_DEV_ID_82576_SERDES 0x10E7
44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
45#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 45#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
46#define E1000_DEV_ID_82576_NS 0x150A 46#define E1000_DEV_ID_82576_NS 0x150A
47#define E1000_DEV_ID_82576_NS_SERDES 0x1518 47#define E1000_DEV_ID_82576_NS_SERDES 0x1518
48#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D 48#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
49#define E1000_DEV_ID_82575EB_COPPER 0x10A7 49#define E1000_DEV_ID_82575EB_COPPER 0x10A7
50#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 50#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
51#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 51#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
52#define E1000_DEV_ID_82580_COPPER 0x150E 52#define E1000_DEV_ID_82580_COPPER 0x150E
53#define E1000_DEV_ID_82580_FIBER 0x150F 53#define E1000_DEV_ID_82580_FIBER 0x150F
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A 59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C 60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
61#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 61#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
62#define E1000_DEV_ID_I350_COPPER 0x1521 62#define E1000_DEV_ID_I350_COPPER 0x1521
63#define E1000_DEV_ID_I350_FIBER 0x1522 63#define E1000_DEV_ID_I350_FIBER 0x1522
64#define E1000_DEV_ID_I350_SERDES 0x1523 64#define E1000_DEV_ID_I350_SERDES 0x1523
65#define E1000_DEV_ID_I350_SGMII 0x1524 65#define E1000_DEV_ID_I350_SGMII 0x1524
66#define E1000_DEV_ID_I210_COPPER 0x1533 66#define E1000_DEV_ID_I210_COPPER 0x1533
67#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 67#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
68#define E1000_DEV_ID_I210_COPPER_IT 0x1535 68#define E1000_DEV_ID_I210_COPPER_IT 0x1535
@@ -70,6 +70,9 @@ struct e1000_hw;
70#define E1000_DEV_ID_I210_SERDES 0x1537 70#define E1000_DEV_ID_I210_SERDES 0x1537
71#define E1000_DEV_ID_I210_SGMII 0x1538 71#define E1000_DEV_ID_I210_SGMII 0x1538
72#define E1000_DEV_ID_I211_COPPER 0x1539 72#define E1000_DEV_ID_I211_COPPER 0x1539
73#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
74#define E1000_DEV_ID_I354_SGMII 0x1F41
75#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
73 76
74#define E1000_REVISION_2 2 77#define E1000_REVISION_2 2
75#define E1000_REVISION_4 4 78#define E1000_REVISION_4 4
@@ -90,6 +93,7 @@ enum e1000_mac_type {
90 e1000_82576, 93 e1000_82576,
91 e1000_82580, 94 e1000_82580,
92 e1000_i350, 95 e1000_i350,
96 e1000_i354,
93 e1000_i210, 97 e1000_i210,
94 e1000_i211, 98 e1000_i211,
95 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 99 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
@@ -98,7 +102,8 @@ enum e1000_mac_type {
98enum e1000_media_type { 102enum e1000_media_type {
99 e1000_media_type_unknown = 0, 103 e1000_media_type_unknown = 0,
100 e1000_media_type_copper = 1, 104 e1000_media_type_copper = 1,
101 e1000_media_type_internal_serdes = 2, 105 e1000_media_type_fiber = 2,
106 e1000_media_type_internal_serdes = 3,
102 e1000_num_media_types 107 e1000_num_media_types
103}; 108};
104 109
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 6a42344f24f1..9764cd3610e5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -103,7 +103,7 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
103 * @hw: pointer to the HW structure 103 * @hw: pointer to the HW structure
104 * 104 *
105 * Release hardware semaphore used to access the PHY or NVM 105 * Release hardware semaphore used to access the PHY or NVM
106 */ 106 **/
107static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) 107static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
108{ 108{
109 u32 swsm; 109 u32 swsm;
@@ -141,9 +141,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
141 if (!(swfw_sync & fwmask)) 141 if (!(swfw_sync & fwmask))
142 break; 142 break;
143 143
144 /* 144 /* Firmware currently using resource (fwmask) */
145 * Firmware currently using resource (fwmask)
146 */
147 igb_put_hw_semaphore_i210(hw); 145 igb_put_hw_semaphore_i210(hw);
148 mdelay(5); 146 mdelay(5);
149 i++; 147 i++;
@@ -203,7 +201,8 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
203 201
204 /* We cannot hold synchronization semaphores for too long, 202 /* We cannot hold synchronization semaphores for too long,
205 * because of forceful takeover procedure. However it is more efficient 203 * because of forceful takeover procedure. However it is more efficient
206 * to read in bursts than synchronizing access for each word. */ 204 * to read in bursts than synchronizing access for each word.
205 */
207 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { 206 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
208 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? 207 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
209 E1000_EERD_EEWR_MAX_COUNT : (words - i); 208 E1000_EERD_EEWR_MAX_COUNT : (words - i);
@@ -242,8 +241,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
242 u32 attempts = 100000; 241 u32 attempts = 100000;
243 s32 ret_val = E1000_SUCCESS; 242 s32 ret_val = E1000_SUCCESS;
244 243
245 /* 244 /* A check for invalid values: offset too large, too many words,
246 * A check for invalid values: offset too large, too many words,
247 * too many words for the offset, and not enough words. 245 * too many words for the offset, and not enough words.
248 */ 246 */
249 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 247 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -294,7 +292,7 @@ out:
294 * 292 *
295 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 293 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
296 * partially written. 294 * partially written.
297 */ 295 **/
298s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 296s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
299 u16 *data) 297 u16 *data)
300{ 298{
@@ -326,7 +324,7 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
326/** 324/**
327 * igb_read_nvm_i211 - Read NVM wrapper function for I211 325 * igb_read_nvm_i211 - Read NVM wrapper function for I211
328 * @hw: pointer to the HW structure 326 * @hw: pointer to the HW structure
329 * @address: the word address (aka eeprom offset) to read 327 * @words: number of words to read
330 * @data: pointer to the data read 328 * @data: pointer to the data read
331 * 329 *
332 * Wrapper function to return data formerly found in the NVM. 330 * Wrapper function to return data formerly found in the NVM.
@@ -549,8 +547,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
549 547
550 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 548 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
551 549
552 /* 550 /* Replace the read function with semaphore grabbing with
553 * Replace the read function with semaphore grabbing with
554 * the one that skips this for a while. 551 * the one that skips this for a while.
555 * We have semaphore taken already here. 552 * We have semaphore taken already here.
556 */ 553 */
@@ -570,7 +567,6 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
570 return status; 567 return status;
571} 568}
572 569
573
574/** 570/**
575 * igb_update_nvm_checksum_i210 - Update EEPROM checksum 571 * igb_update_nvm_checksum_i210 - Update EEPROM checksum
576 * @hw: pointer to the HW structure 572 * @hw: pointer to the HW structure
@@ -585,8 +581,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
585 u16 checksum = 0; 581 u16 checksum = 0;
586 u16 i, nvm_data; 582 u16 i, nvm_data;
587 583
588 /* 584 /* Read the first word from the EEPROM. If this times out or fails, do
589 * Read the first word from the EEPROM. If this times out or fails, do
590 * not continue or we could be in for a very long wait while every 585 * not continue or we could be in for a very long wait while every
591 * EEPROM read fails 586 * EEPROM read fails
592 */ 587 */
@@ -597,8 +592,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
597 } 592 }
598 593
599 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { 594 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
600 /* 595 /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
601 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
602 * because we do not want to take the synchronization 596 * because we do not want to take the synchronization
603 * semaphores twice here. 597 * semaphores twice here.
604 */ 598 */
@@ -635,7 +629,7 @@ out:
635 * igb_pool_flash_update_done_i210 - Pool FLUDONE status. 629 * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
636 * @hw: pointer to the HW structure 630 * @hw: pointer to the HW structure
637 * 631 *
638 */ 632 **/
639static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) 633static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
640{ 634{
641 s32 ret_val = -E1000_ERR_NVM; 635 s32 ret_val = -E1000_ERR_NVM;
@@ -714,3 +708,68 @@ s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
714out: 708out:
715 return ret_val; 709 return ret_val;
716} 710}
711
712/**
713 * __igb_access_xmdio_reg - Read/write XMDIO register
714 * @hw: pointer to the HW structure
715 * @address: XMDIO address to program
716 * @dev_addr: device address to program
717 * @data: pointer to value to read/write from/to the XMDIO address
718 * @read: boolean flag to indicate read or write
719 **/
720static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
721 u8 dev_addr, u16 *data, bool read)
722{
723 s32 ret_val = E1000_SUCCESS;
724
725 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
726 if (ret_val)
727 return ret_val;
728
729 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
730 if (ret_val)
731 return ret_val;
732
733 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
734 dev_addr);
735 if (ret_val)
736 return ret_val;
737
738 if (read)
739 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
740 else
741 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
742 if (ret_val)
743 return ret_val;
744
745 /* Recalibrate the device back to 0 */
746 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
747 if (ret_val)
748 return ret_val;
749
750 return ret_val;
751}
752
753/**
754 * igb_read_xmdio_reg - Read XMDIO register
755 * @hw: pointer to the HW structure
756 * @addr: XMDIO address to program
757 * @dev_addr: device address to program
758 * @data: value to be read from the EMI address
759 **/
760s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
761{
762 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
763}
764
765/**
766 * igb_write_xmdio_reg - Write XMDIO register
767 * @hw: pointer to the HW structure
768 * @addr: XMDIO address to program
769 * @dev_addr: device address to program
770 * @data: value to be written to the XMDIO address
771 **/
772s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
773{
774 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
775}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index e4e1a73b7c75..bfc08e05c907 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -45,6 +45,10 @@ extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data); 45 u16 *data);
46extern s32 igb_read_invm_version(struct e1000_hw *hw, 46extern s32 igb_read_invm_version(struct e1000_hw *hw,
47 struct e1000_fw_version *invm_ver); 47 struct e1000_fw_version *invm_ver);
48extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
49 u16 *data);
50extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
51 u16 data);
48 52
49#define E1000_STM_OPCODE 0xDB00 53#define E1000_STM_OPCODE 0xDB00
50#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 54#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index a5c7200b9a71..2559d70a2321 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -214,7 +214,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
214 else 214 else
215 vfta &= ~mask; 215 vfta &= ~mask;
216 } 216 }
217 if (hw->mac.type == e1000_i350) 217 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
218 igb_write_vfta_i350(hw, index, vfta); 218 igb_write_vfta_i350(hw, index, vfta);
219 else 219 else
220 igb_write_vfta(hw, index, vfta); 220 igb_write_vfta(hw, index, vfta);
@@ -230,8 +230,8 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
230 * Checks the nvm for an alternate MAC address. An alternate MAC address 230 * Checks the nvm for an alternate MAC address. An alternate MAC address
231 * can be setup by pre-boot software and must be treated like a permanent 231 * can be setup by pre-boot software and must be treated like a permanent
232 * address and must override the actual permanent MAC address. If an 232 * address and must override the actual permanent MAC address. If an
233 * alternate MAC address is fopund it is saved in the hw struct and 233 * alternate MAC address is found it is saved in the hw struct and
234 * prgrammed into RAR0 and the cuntion returns success, otherwise the 234 * programmed into RAR0 and the function returns success, otherwise the
235 * function returns an error. 235 * function returns an error.
236 **/ 236 **/
237s32 igb_check_alt_mac_addr(struct e1000_hw *hw) 237s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
@@ -241,8 +241,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
241 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 241 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
242 u8 alt_mac_addr[ETH_ALEN]; 242 u8 alt_mac_addr[ETH_ALEN];
243 243
244 /* 244 /* Alternate MAC address is handled by the option ROM for 82580
245 * Alternate MAC address is handled by the option ROM for 82580
246 * and newer. SW support not required. 245 * and newer. SW support not required.
247 */ 246 */
248 if (hw->mac.type >= e1000_82580) 247 if (hw->mac.type >= e1000_82580)
@@ -285,8 +284,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
285 goto out; 284 goto out;
286 } 285 }
287 286
288 /* 287 /* We have a valid alternate MAC address, and we want to treat it the
289 * We have a valid alternate MAC address, and we want to treat it the
290 * same as the normal permanent MAC address stored by the HW into the 288 * same as the normal permanent MAC address stored by the HW into the
291 * RAR. Do this by mapping this address into RAR0. 289 * RAR. Do this by mapping this address into RAR0.
292 */ 290 */
@@ -309,8 +307,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
309{ 307{
310 u32 rar_low, rar_high; 308 u32 rar_low, rar_high;
311 309
312 /* 310 /* HW expects these in little endian so we reverse the byte order
313 * HW expects these in little endian so we reverse the byte order
314 * from network order (big endian) to little endian 311 * from network order (big endian) to little endian
315 */ 312 */
316 rar_low = ((u32) addr[0] | 313 rar_low = ((u32) addr[0] |
@@ -323,8 +320,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
323 if (rar_low || rar_high) 320 if (rar_low || rar_high)
324 rar_high |= E1000_RAH_AV; 321 rar_high |= E1000_RAH_AV;
325 322
326 /* 323 /* Some bridges will combine consecutive 32-bit writes into
327 * Some bridges will combine consecutive 32-bit writes into
328 * a single burst write, which will malfunction on some parts. 324 * a single burst write, which will malfunction on some parts.
329 * The flushes avoid this. 325 * The flushes avoid this.
330 */ 326 */
@@ -348,8 +344,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
348{ 344{
349 u32 hash_bit, hash_reg, mta; 345 u32 hash_bit, hash_reg, mta;
350 346
351 /* 347 /* The MTA is a register array of 32-bit registers. It is
352 * The MTA is a register array of 32-bit registers. It is
353 * treated like an array of (32*mta_reg_count) bits. We want to 348 * treated like an array of (32*mta_reg_count) bits. We want to
354 * set bit BitArray[hash_value]. So we figure out what register 349 * set bit BitArray[hash_value]. So we figure out what register
355 * the bit is in, read it, OR in the new bit, then write 350 * the bit is in, read it, OR in the new bit, then write
@@ -386,15 +381,13 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
386 /* Register count multiplied by bits per register */ 381 /* Register count multiplied by bits per register */
387 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 382 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
388 383
389 /* 384 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
390 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
391 * where 0xFF would still fall within the hash mask. 385 * where 0xFF would still fall within the hash mask.
392 */ 386 */
393 while (hash_mask >> bit_shift != 0xFF) 387 while (hash_mask >> bit_shift != 0xFF)
394 bit_shift++; 388 bit_shift++;
395 389
396 /* 390 /* The portion of the address that is used for the hash table
397 * The portion of the address that is used for the hash table
398 * is determined by the mc_filter_type setting. 391 * is determined by the mc_filter_type setting.
399 * The algorithm is such that there is a total of 8 bits of shifting. 392 * The algorithm is such that there is a total of 8 bits of shifting.
400 * The bit_shift for a mc_filter_type of 0 represents the number of 393 * The bit_shift for a mc_filter_type of 0 represents the number of
@@ -536,8 +529,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
536 s32 ret_val; 529 s32 ret_val;
537 bool link; 530 bool link;
538 531
539 /* 532 /* We only want to go out to the PHY registers to see if Auto-Neg
540 * We only want to go out to the PHY registers to see if Auto-Neg
541 * has completed and/or if our link status has changed. The 533 * has completed and/or if our link status has changed. The
542 * get_link_status flag is set upon receiving a Link Status 534 * get_link_status flag is set upon receiving a Link Status
543 * Change or Rx Sequence Error interrupt. 535 * Change or Rx Sequence Error interrupt.
@@ -547,8 +539,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
547 goto out; 539 goto out;
548 } 540 }
549 541
550 /* 542 /* First we want to see if the MII Status Register reports
551 * First we want to see if the MII Status Register reports
552 * link. If so, then we want to get the current speed/duplex 543 * link. If so, then we want to get the current speed/duplex
553 * of the PHY. 544 * of the PHY.
554 */ 545 */
@@ -561,14 +552,12 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
561 552
562 mac->get_link_status = false; 553 mac->get_link_status = false;
563 554
564 /* 555 /* Check if there was DownShift, must be checked
565 * Check if there was DownShift, must be checked
566 * immediately after link-up 556 * immediately after link-up
567 */ 557 */
568 igb_check_downshift(hw); 558 igb_check_downshift(hw);
569 559
570 /* 560 /* If we are forcing speed/duplex, then we simply return since
571 * If we are forcing speed/duplex, then we simply return since
572 * we have already determined whether we have link or not. 561 * we have already determined whether we have link or not.
573 */ 562 */
574 if (!mac->autoneg) { 563 if (!mac->autoneg) {
@@ -576,15 +565,13 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
576 goto out; 565 goto out;
577 } 566 }
578 567
579 /* 568 /* Auto-Neg is enabled. Auto Speed Detection takes care
580 * Auto-Neg is enabled. Auto Speed Detection takes care
581 * of MAC speed/duplex configuration. So we only need to 569 * of MAC speed/duplex configuration. So we only need to
582 * configure Collision Distance in the MAC. 570 * configure Collision Distance in the MAC.
583 */ 571 */
584 igb_config_collision_dist(hw); 572 igb_config_collision_dist(hw);
585 573
586 /* 574 /* Configure Flow Control now that Auto-Neg has completed.
587 * Configure Flow Control now that Auto-Neg has completed.
588 * First, we need to restore the desired flow control 575 * First, we need to restore the desired flow control
589 * settings because we may have had to re-autoneg with a 576 * settings because we may have had to re-autoneg with a
590 * different link partner. 577 * different link partner.
@@ -611,15 +598,13 @@ s32 igb_setup_link(struct e1000_hw *hw)
611{ 598{
612 s32 ret_val = 0; 599 s32 ret_val = 0;
613 600
614 /* 601 /* In the case of the phy reset being blocked, we already have a link.
615 * In the case of the phy reset being blocked, we already have a link.
616 * We do not need to set it up again. 602 * We do not need to set it up again.
617 */ 603 */
618 if (igb_check_reset_block(hw)) 604 if (igb_check_reset_block(hw))
619 goto out; 605 goto out;
620 606
621 /* 607 /* If requested flow control is set to default, set flow control
622 * If requested flow control is set to default, set flow control
623 * based on the EEPROM flow control settings. 608 * based on the EEPROM flow control settings.
624 */ 609 */
625 if (hw->fc.requested_mode == e1000_fc_default) { 610 if (hw->fc.requested_mode == e1000_fc_default) {
@@ -628,8 +613,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
628 goto out; 613 goto out;
629 } 614 }
630 615
631 /* 616 /* We want to save off the original Flow Control configuration just
632 * We want to save off the original Flow Control configuration just
633 * in case we get disconnected and then reconnected into a different 617 * in case we get disconnected and then reconnected into a different
634 * hub or switch with different Flow Control capabilities. 618 * hub or switch with different Flow Control capabilities.
635 */ 619 */
@@ -642,8 +626,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
642 if (ret_val) 626 if (ret_val)
643 goto out; 627 goto out;
644 628
645 /* 629 /* Initialize the flow control address, type, and PAUSE timer
646 * Initialize the flow control address, type, and PAUSE timer
647 * registers to their default values. This is done even if flow 630 * registers to their default values. This is done even if flow
648 * control is disabled, because it does not hurt anything to 631 * control is disabled, because it does not hurt anything to
649 * initialize these registers. 632 * initialize these registers.
@@ -696,16 +679,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
696 s32 ret_val = 0; 679 s32 ret_val = 0;
697 u32 fcrtl = 0, fcrth = 0; 680 u32 fcrtl = 0, fcrth = 0;
698 681
699 /* 682 /* Set the flow control receive threshold registers. Normally,
700 * Set the flow control receive threshold registers. Normally,
701 * these registers will be set to a default threshold that may be 683 * these registers will be set to a default threshold that may be
702 * adjusted later by the driver's runtime code. However, if the 684 * adjusted later by the driver's runtime code. However, if the
703 * ability to transmit pause frames is not enabled, then these 685 * ability to transmit pause frames is not enabled, then these
704 * registers will be set to 0. 686 * registers will be set to 0.
705 */ 687 */
706 if (hw->fc.current_mode & e1000_fc_tx_pause) { 688 if (hw->fc.current_mode & e1000_fc_tx_pause) {
707 /* 689 /* We need to set up the Receive Threshold high and low water
708 * We need to set up the Receive Threshold high and low water
709 * marks as well as (optionally) enabling the transmission of 690 * marks as well as (optionally) enabling the transmission of
710 * XON frames. 691 * XON frames.
711 */ 692 */
@@ -733,8 +714,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
733 s32 ret_val = 0; 714 s32 ret_val = 0;
734 u16 nvm_data; 715 u16 nvm_data;
735 716
736 /* 717 /* Read and store word 0x0F of the EEPROM. This word contains bits
737 * Read and store word 0x0F of the EEPROM. This word contains bits
738 * that determine the hardware's default PAUSE (flow control) mode, 718 * that determine the hardware's default PAUSE (flow control) mode,
739 * a bit that determines whether the HW defaults to enabling or 719 * a bit that determines whether the HW defaults to enabling or
740 * disabling auto-negotiation, and the direction of the 720 * disabling auto-negotiation, and the direction of the
@@ -778,8 +758,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
778 758
779 ctrl = rd32(E1000_CTRL); 759 ctrl = rd32(E1000_CTRL);
780 760
781 /* 761 /* Because we didn't get link via the internal auto-negotiation
782 * Because we didn't get link via the internal auto-negotiation
783 * mechanism (we either forced link or we got link via PHY 762 * mechanism (we either forced link or we got link via PHY
784 * auto-neg), we have to manually enable/disable transmit an 763 * auto-neg), we have to manually enable/disable transmit an
785 * receive flow control. 764 * receive flow control.
@@ -843,8 +822,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
843 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 822 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
844 u16 speed, duplex; 823 u16 speed, duplex;
845 824
846 /* 825 /* Check for the case where we have fiber media and auto-neg failed
847 * Check for the case where we have fiber media and auto-neg failed
848 * so we had to force link. In this case, we need to force the 826 * so we had to force link. In this case, we need to force the
849 * configuration of the MAC to match the "fc" parameter. 827 * configuration of the MAC to match the "fc" parameter.
850 */ 828 */
@@ -861,15 +839,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
861 goto out; 839 goto out;
862 } 840 }
863 841
864 /* 842 /* Check for the case where we have copper media and auto-neg is
865 * Check for the case where we have copper media and auto-neg is
866 * enabled. In this case, we need to check and see if Auto-Neg 843 * enabled. In this case, we need to check and see if Auto-Neg
867 * has completed, and if so, how the PHY and link partner has 844 * has completed, and if so, how the PHY and link partner has
868 * flow control configured. 845 * flow control configured.
869 */ 846 */
870 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 847 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
871 /* 848 /* Read the MII Status Register and check to see if AutoNeg
872 * Read the MII Status Register and check to see if AutoNeg
873 * has completed. We read this twice because this reg has 849 * has completed. We read this twice because this reg has
874 * some "sticky" (latched) bits. 850 * some "sticky" (latched) bits.
875 */ 851 */
@@ -888,8 +864,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
888 goto out; 864 goto out;
889 } 865 }
890 866
891 /* 867 /* The AutoNeg process has completed, so we now need to
892 * The AutoNeg process has completed, so we now need to
893 * read both the Auto Negotiation Advertisement 868 * read both the Auto Negotiation Advertisement
894 * Register (Address 4) and the Auto_Negotiation Base 869 * Register (Address 4) and the Auto_Negotiation Base
895 * Page Ability Register (Address 5) to determine how 870 * Page Ability Register (Address 5) to determine how
@@ -904,8 +879,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
904 if (ret_val) 879 if (ret_val)
905 goto out; 880 goto out;
906 881
907 /* 882 /* Two bits in the Auto Negotiation Advertisement Register
908 * Two bits in the Auto Negotiation Advertisement Register
909 * (Address 4) and two bits in the Auto Negotiation Base 883 * (Address 4) and two bits in the Auto Negotiation Base
910 * Page Ability Register (Address 5) determine flow control 884 * Page Ability Register (Address 5) determine flow control
911 * for both the PHY and the link partner. The following 885 * for both the PHY and the link partner. The following
@@ -940,8 +914,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
940 */ 914 */
941 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 915 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
942 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 916 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
943 /* 917 /* Now we need to check if the user selected RX ONLY
944 * Now we need to check if the user selected RX ONLY
945 * of pause frames. In this case, we had to advertise 918 * of pause frames. In this case, we had to advertise
946 * FULL flow control because we could not advertise RX 919 * FULL flow control because we could not advertise RX
947 * ONLY. Hence, we must now check to see if we need to 920 * ONLY. Hence, we must now check to see if we need to
@@ -956,8 +929,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
956 "RX PAUSE frames only.\r\n"); 929 "RX PAUSE frames only.\r\n");
957 } 930 }
958 } 931 }
959 /* 932 /* For receiving PAUSE frames ONLY.
960 * For receiving PAUSE frames ONLY.
961 * 933 *
962 * LOCAL DEVICE | LINK PARTNER 934 * LOCAL DEVICE | LINK PARTNER
963 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 935 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -971,8 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
971 hw->fc.current_mode = e1000_fc_tx_pause; 943 hw->fc.current_mode = e1000_fc_tx_pause;
972 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 944 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
973 } 945 }
974 /* 946 /* For transmitting PAUSE frames ONLY.
975 * For transmitting PAUSE frames ONLY.
976 * 947 *
977 * LOCAL DEVICE | LINK PARTNER 948 * LOCAL DEVICE | LINK PARTNER
978 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 949 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -986,8 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
986 hw->fc.current_mode = e1000_fc_rx_pause; 957 hw->fc.current_mode = e1000_fc_rx_pause;
987 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 958 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
988 } 959 }
989 /* 960 /* Per the IEEE spec, at this point flow control should be
990 * Per the IEEE spec, at this point flow control should be
991 * disabled. However, we want to consider that we could 961 * disabled. However, we want to consider that we could
992 * be connected to a legacy switch that doesn't advertise 962 * be connected to a legacy switch that doesn't advertise
993 * desired flow control, but can be forced on the link 963 * desired flow control, but can be forced on the link
@@ -1007,9 +977,9 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1007 * be asked to delay transmission of packets than asking 977 * be asked to delay transmission of packets than asking
1008 * our link partner to pause transmission of frames. 978 * our link partner to pause transmission of frames.
1009 */ 979 */
1010 else if ((hw->fc.requested_mode == e1000_fc_none || 980 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1011 hw->fc.requested_mode == e1000_fc_tx_pause) || 981 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1012 hw->fc.strict_ieee) { 982 (hw->fc.strict_ieee)) {
1013 hw->fc.current_mode = e1000_fc_none; 983 hw->fc.current_mode = e1000_fc_none;
1014 hw_dbg("Flow Control = NONE.\r\n"); 984 hw_dbg("Flow Control = NONE.\r\n");
1015 } else { 985 } else {
@@ -1017,8 +987,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1017 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 987 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
1018 } 988 }
1019 989
1020 /* 990 /* Now we need to do one last check... If we auto-
1021 * Now we need to do one last check... If we auto-
1022 * negotiated to HALF DUPLEX, flow control should not be 991 * negotiated to HALF DUPLEX, flow control should not be
1023 * enabled per IEEE 802.3 spec. 992 * enabled per IEEE 802.3 spec.
1024 */ 993 */
@@ -1031,8 +1000,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1031 if (duplex == HALF_DUPLEX) 1000 if (duplex == HALF_DUPLEX)
1032 hw->fc.current_mode = e1000_fc_none; 1001 hw->fc.current_mode = e1000_fc_none;
1033 1002
1034 /* 1003 /* Now we call a subroutine to actually force the MAC
1035 * Now we call a subroutine to actually force the MAC
1036 * controller to use the correct flow control settings. 1004 * controller to use the correct flow control settings.
1037 */ 1005 */
1038 ret_val = igb_force_mac_fc(hw); 1006 ret_val = igb_force_mac_fc(hw);
@@ -1203,6 +1171,17 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1203 hw_dbg("Half Duplex\n"); 1171 hw_dbg("Half Duplex\n");
1204 } 1172 }
1205 1173
1174 /* Check if it is an I354 2.5Gb backplane connection. */
1175 if (hw->mac.type == e1000_i354) {
1176 if ((status & E1000_STATUS_2P5_SKU) &&
1177 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1178 *speed = SPEED_2500;
1179 *duplex = FULL_DUPLEX;
1180 hw_dbg("2500 Mbs, ");
1181 hw_dbg("Full Duplex\n");
1182 }
1183 }
1184
1206 return 0; 1185 return 0;
1207} 1186}
1208 1187
@@ -1427,8 +1406,7 @@ s32 igb_blink_led(struct e1000_hw *hw)
1427 u32 ledctl_blink = 0; 1406 u32 ledctl_blink = 0;
1428 u32 i; 1407 u32 i;
1429 1408
1430 /* 1409 /* set the blink bit for each LED that's "on" (0x0E)
1431 * set the blink bit for each LED that's "on" (0x0E)
1432 * in ledctl_mode2 1410 * in ledctl_mode2
1433 */ 1411 */
1434 ledctl_blink = hw->mac.ledctl_mode2; 1412 ledctl_blink = hw->mac.ledctl_mode2;
@@ -1467,7 +1445,7 @@ s32 igb_led_off(struct e1000_hw *hw)
1467 * @hw: pointer to the HW structure 1445 * @hw: pointer to the HW structure
1468 * 1446 *
1469 * Returns 0 (0) if successful, else returns -10 1447 * Returns 0 (0) if successful, else returns -10
1470 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued 1448 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1471 * the master requests to be disabled. 1449 * the master requests to be disabled.
1472 * 1450 *
1473 * Disables PCI-Express master access and verifies there are no pending 1451 * Disables PCI-Express master access and verifies there are no pending
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e6d6ce433261..5e13e83cc608 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -35,8 +35,7 @@
35#include "e1000_defines.h" 35#include "e1000_defines.h"
36#include "e1000_i210.h" 36#include "e1000_i210.h"
37 37
38/* 38/* Functions that should not be called directly from drivers but can be used
39 * Functions that should not be called directly from drivers but can be used
40 * by other files in this 'shared code' 39 * by other files in this 'shared code'
41 */ 40 */
42s32 igb_blink_led(struct e1000_hw *hw); 41s32 igb_blink_led(struct e1000_hw *hw);
@@ -49,15 +48,15 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw);
49s32 igb_get_bus_info_pcie(struct e1000_hw *hw); 48s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
50s32 igb_get_hw_semaphore(struct e1000_hw *hw); 49s32 igb_get_hw_semaphore(struct e1000_hw *hw);
51s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, 50s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
52 u16 *duplex); 51 u16 *duplex);
53s32 igb_id_led_init(struct e1000_hw *hw); 52s32 igb_id_led_init(struct e1000_hw *hw);
54s32 igb_led_off(struct e1000_hw *hw); 53s32 igb_led_off(struct e1000_hw *hw);
55void igb_update_mc_addr_list(struct e1000_hw *hw, 54void igb_update_mc_addr_list(struct e1000_hw *hw,
56 u8 *mc_addr_list, u32 mc_addr_count); 55 u8 *mc_addr_list, u32 mc_addr_count);
57s32 igb_setup_link(struct e1000_hw *hw); 56s32 igb_setup_link(struct e1000_hw *hw);
58s32 igb_validate_mdi_setting(struct e1000_hw *hw); 57s32 igb_validate_mdi_setting(struct e1000_hw *hw);
59s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 58s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
60 u32 offset, u8 data); 59 u32 offset, u8 data);
61 60
62void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 61void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
63void igb_clear_vfta(struct e1000_hw *hw); 62void igb_clear_vfta(struct e1000_hw *hw);
@@ -80,12 +79,12 @@ enum e1000_mng_mode {
80 e1000_mng_mode_host_if_only 79 e1000_mng_mode_host_if_only
81}; 80};
82 81
83#define E1000_FACTPS_MNGCG 0x20000000 82#define E1000_FACTPS_MNGCG 0x20000000
84 83
85#define E1000_FWSM_MODE_MASK 0xE 84#define E1000_FWSM_MODE_MASK 0xE
86#define E1000_FWSM_MODE_SHIFT 1 85#define E1000_FWSM_MODE_SHIFT 1
87 86
88#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 87#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
89 88
90extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); 89extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
91 90
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 38e0df350904..dac1447fabf7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -196,7 +196,8 @@ out:
196 * returns SUCCESS if it successfully received a message notification and 196 * returns SUCCESS if it successfully received a message notification and
197 * copied it into the receive buffer. 197 * copied it into the receive buffer.
198 **/ 198 **/
199static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 199static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
200 u16 mbx_id)
200{ 201{
201 struct e1000_mbx_info *mbx = &hw->mbx; 202 struct e1000_mbx_info *mbx = &hw->mbx;
202 s32 ret_val = -E1000_ERR_MBX; 203 s32 ret_val = -E1000_ERR_MBX;
@@ -222,7 +223,8 @@ out:
222 * returns SUCCESS if it successfully copied message into the buffer and 223 * returns SUCCESS if it successfully copied message into the buffer and
223 * received an ack to that message within delay * timeout period 224 * received an ack to that message within delay * timeout period
224 **/ 225 **/
225static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 226static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
227 u16 mbx_id)
226{ 228{
227 struct e1000_mbx_info *mbx = &hw->mbx; 229 struct e1000_mbx_info *mbx = &hw->mbx;
228 s32 ret_val = -E1000_ERR_MBX; 230 s32 ret_val = -E1000_ERR_MBX;
@@ -325,7 +327,6 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
325 s32 ret_val = -E1000_ERR_MBX; 327 s32 ret_val = -E1000_ERR_MBX;
326 u32 p2v_mailbox; 328 u32 p2v_mailbox;
327 329
328
329 /* Take ownership of the buffer */ 330 /* Take ownership of the buffer */
330 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 331 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
331 332
@@ -347,7 +348,7 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
347 * returns SUCCESS if it successfully copied message into the buffer 348 * returns SUCCESS if it successfully copied message into the buffer
348 **/ 349 **/
349static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 350static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
350 u16 vf_number) 351 u16 vf_number)
351{ 352{
352 s32 ret_val; 353 s32 ret_val;
353 u16 i; 354 u16 i;
@@ -388,7 +389,7 @@ out_no_write:
388 * a message due to a VF request so no polling for message is needed. 389 * a message due to a VF request so no polling for message is needed.
389 **/ 390 **/
390static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 391static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
391 u16 vf_number) 392 u16 vf_number)
392{ 393{
393 s32 ret_val; 394 s32 ret_val;
394 u16 i; 395 u16 i;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index c13b56d9edb2..de9bba41acf3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -30,42 +30,42 @@
30 30
31#include "e1000_hw.h" 31#include "e1000_hw.h"
32 32
33#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ 33#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
34#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ 34#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
35#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ 35#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ 36#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ 37#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
38 38
39#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ 39#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
40#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ 40#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
41#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ 41#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
42#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ 42#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
43 43
44#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ 44#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
45 45
46/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the 46/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
47 * PF. The reverse is true if it is E1000_PF_*. 47 * PF. The reverse is true if it is E1000_PF_*.
48 * Message ACK's are the value or'd with 0xF0000000 48 * Message ACK's are the value or'd with 0xF0000000
49 */ 49 */
50#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 50/* Messages below or'd with this are the ACK */
51 * this are the ACK */ 51#define E1000_VT_MSGTYPE_ACK 0x80000000
52#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 52/* Messages below or'd with this are the NACK */
53 * this are the NACK */ 53#define E1000_VT_MSGTYPE_NACK 0x40000000
54#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 54/* Indicates that VF is still clear to send requests */
55 clear to send requests */ 55#define E1000_VT_MSGTYPE_CTS 0x20000000
56#define E1000_VT_MSGINFO_SHIFT 16 56#define E1000_VT_MSGINFO_SHIFT 16
57/* bits 23:16 are used for exra info for certain messages */ 57/* bits 23:16 are used for exra info for certain messages */
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59 59
60#define E1000_VF_RESET 0x01 /* VF requests reset */ 60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ 61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ 62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ 63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ 64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ 65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) 66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
67 67
68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
69 69
70s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); 70s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
71s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); 71s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 5b62adbe134d..7f9cd7cbd353 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -289,15 +289,14 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
289 udelay(1); 289 udelay(1);
290 timeout = NVM_MAX_RETRY_SPI; 290 timeout = NVM_MAX_RETRY_SPI;
291 291
292 /* 292 /* Read "Status Register" repeatedly until the LSB is cleared.
293 * Read "Status Register" repeatedly until the LSB is cleared.
294 * The EEPROM will signal that the command has been completed 293 * The EEPROM will signal that the command has been completed
295 * by clearing bit 0 of the internal status register. If it's 294 * by clearing bit 0 of the internal status register. If it's
296 * not cleared within 'timeout', then error out. 295 * not cleared within 'timeout', then error out.
297 */ 296 */
298 while (timeout) { 297 while (timeout) {
299 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, 298 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
300 hw->nvm.opcode_bits); 299 hw->nvm.opcode_bits);
301 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); 300 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
302 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) 301 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
303 break; 302 break;
@@ -335,8 +334,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
335 u16 word_in; 334 u16 word_in;
336 u8 read_opcode = NVM_READ_OPCODE_SPI; 335 u8 read_opcode = NVM_READ_OPCODE_SPI;
337 336
338 /* 337 /* A check for invalid values: offset too large, too many words,
339 * A check for invalid values: offset too large, too many words,
340 * and not enough words. 338 * and not enough words.
341 */ 339 */
342 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 340 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -363,8 +361,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
363 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); 361 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
364 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); 362 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
365 363
366 /* 364 /* Read the data. SPI NVMs increment the address with each byte
367 * Read the data. SPI NVMs increment the address with each byte
368 * read and will roll over if reading beyond the end. This allows 365 * read and will roll over if reading beyond the end. This allows
369 * us to read the whole NVM from any offset 366 * us to read the whole NVM from any offset
370 */ 367 */
@@ -395,8 +392,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
395 u32 i, eerd = 0; 392 u32 i, eerd = 0;
396 s32 ret_val = 0; 393 s32 ret_val = 0;
397 394
398 /* 395 /* A check for invalid values: offset too large, too many words,
399 * A check for invalid values: offset too large, too many words,
400 * and not enough words. 396 * and not enough words.
401 */ 397 */
402 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 398 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -408,7 +404,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
408 404
409 for (i = 0; i < words; i++) { 405 for (i = 0; i < words; i++) {
410 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + 406 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
411 E1000_NVM_RW_REG_START; 407 E1000_NVM_RW_REG_START;
412 408
413 wr32(E1000_EERD, eerd); 409 wr32(E1000_EERD, eerd);
414 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); 410 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
@@ -441,8 +437,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
441 s32 ret_val = -E1000_ERR_NVM; 437 s32 ret_val = -E1000_ERR_NVM;
442 u16 widx = 0; 438 u16 widx = 0;
443 439
444 /* 440 /* A check for invalid values: offset too large, too many words,
445 * A check for invalid values: offset too large, too many words,
446 * and not enough words. 441 * and not enough words.
447 */ 442 */
448 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 443 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -472,8 +467,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
472 467
473 igb_standby_nvm(hw); 468 igb_standby_nvm(hw);
474 469
475 /* 470 /* Some SPI eeproms use the 8th address bit embedded in the
476 * Some SPI eeproms use the 8th address bit embedded in the
477 * opcode 471 * opcode
478 */ 472 */
479 if ((nvm->address_bits == 8) && (offset >= 128)) 473 if ((nvm->address_bits == 8) && (offset >= 128))
@@ -538,8 +532,7 @@ s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
538 goto out; 532 goto out;
539 } 533 }
540 534
541 /* 535 /* if nvm_data is not ptr guard the PBA must be in legacy format which
542 * if nvm_data is not ptr guard the PBA must be in legacy format which
543 * means pointer is actually our second data word for the PBA number 536 * means pointer is actually our second data word for the PBA number
544 * and we can decode it into an ascii string 537 * and we can decode it into an ascii string
545 */ 538 */
@@ -728,6 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
728 case e1000_82575: 721 case e1000_82575:
729 case e1000_82576: 722 case e1000_82576:
730 case e1000_82580: 723 case e1000_82580:
724 case e1000_i354:
731 case e1000_i350: 725 case e1000_i350:
732 case e1000_i210: 726 case e1000_i210:
733 break; 727 break;
@@ -746,6 +740,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
746 740
747 switch (hw->mac.type) { 741 switch (hw->mac.type) {
748 case e1000_i210: 742 case e1000_i210:
743 case e1000_i354:
749 case e1000_i350: 744 case e1000_i350:
750 /* find combo image version */ 745 /* find combo image version */
751 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); 746 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 2918c979b5bb..fd46add6c4e4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -33,29 +33,29 @@
33 33
34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); 34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
36 u16 *phy_ctrl); 36 u16 *phy_ctrl);
37static s32 igb_wait_autoneg(struct e1000_hw *hw); 37static s32 igb_wait_autoneg(struct e1000_hw *hw);
38static s32 igb_set_master_slave_mode(struct e1000_hw *hw); 38static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
39 39
40/* Cable length tables */ 40/* Cable length tables */
41static const u16 e1000_m88_cable_length_table[] = 41static const u16 e1000_m88_cable_length_table[] = {
42 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 42 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
43#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ 43#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
44 (sizeof(e1000_m88_cable_length_table) / \ 44 (sizeof(e1000_m88_cable_length_table) / \
45 sizeof(e1000_m88_cable_length_table[0])) 45 sizeof(e1000_m88_cable_length_table[0]))
46 46
47static const u16 e1000_igp_2_cable_length_table[] = 47static const u16 e1000_igp_2_cable_length_table[] = {
48 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 48 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
49 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 49 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
50 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 50 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
51 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 51 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
52 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 52 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
53 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 53 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
54 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 54 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
55 104, 109, 114, 118, 121, 124}; 55 104, 109, 114, 118, 121, 124};
56#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ 56#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
57 (sizeof(e1000_igp_2_cable_length_table) / \ 57 (sizeof(e1000_igp_2_cable_length_table) / \
58 sizeof(e1000_igp_2_cable_length_table[0])) 58 sizeof(e1000_igp_2_cable_length_table[0]))
59 59
60/** 60/**
61 * igb_check_reset_block - Check if PHY reset is blocked 61 * igb_check_reset_block - Check if PHY reset is blocked
@@ -71,8 +71,7 @@ s32 igb_check_reset_block(struct e1000_hw *hw)
71 71
72 manc = rd32(E1000_MANC); 72 manc = rd32(E1000_MANC);
73 73
74 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 74 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
75 E1000_BLK_PHY_RESET : 0;
76} 75}
77 76
78/** 77/**
@@ -149,8 +148,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
149 goto out; 148 goto out;
150 } 149 }
151 150
152 /* 151 /* Set up Op-code, Phy Address, and register offset in the MDI
153 * Set up Op-code, Phy Address, and register offset in the MDI
154 * Control register. The MAC will take care of interfacing with the 152 * Control register. The MAC will take care of interfacing with the
155 * PHY to retrieve the desired data. 153 * PHY to retrieve the desired data.
156 */ 154 */
@@ -160,8 +158,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
160 158
161 wr32(E1000_MDIC, mdic); 159 wr32(E1000_MDIC, mdic);
162 160
163 /* 161 /* Poll the ready bit to see if the MDI read completed
164 * Poll the ready bit to see if the MDI read completed
165 * Increasing the time out as testing showed failures with 162 * Increasing the time out as testing showed failures with
166 * the lower time out 163 * the lower time out
167 */ 164 */
@@ -207,8 +204,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
207 goto out; 204 goto out;
208 } 205 }
209 206
210 /* 207 /* Set up Op-code, Phy Address, and register offset in the MDI
211 * Set up Op-code, Phy Address, and register offset in the MDI
212 * Control register. The MAC will take care of interfacing with the 208 * Control register. The MAC will take care of interfacing with the
213 * PHY to retrieve the desired data. 209 * PHY to retrieve the desired data.
214 */ 210 */
@@ -219,8 +215,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
219 215
220 wr32(E1000_MDIC, mdic); 216 wr32(E1000_MDIC, mdic);
221 217
222 /* 218 /* Poll the ready bit to see if the MDI read completed
223 * Poll the ready bit to see if the MDI read completed
224 * Increasing the time out as testing showed failures with 219 * Increasing the time out as testing showed failures with
225 * the lower time out 220 * the lower time out
226 */ 221 */
@@ -259,15 +254,13 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
259 struct e1000_phy_info *phy = &hw->phy; 254 struct e1000_phy_info *phy = &hw->phy;
260 u32 i, i2ccmd = 0; 255 u32 i, i2ccmd = 0;
261 256
262 257 /* Set up Op-code, Phy Address, and register address in the I2CCMD
263 /*
264 * Set up Op-code, Phy Address, and register address in the I2CCMD
265 * register. The MAC will take care of interfacing with the 258 * register. The MAC will take care of interfacing with the
266 * PHY to retrieve the desired data. 259 * PHY to retrieve the desired data.
267 */ 260 */
268 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | 261 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
269 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | 262 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
270 (E1000_I2CCMD_OPCODE_READ)); 263 (E1000_I2CCMD_OPCODE_READ));
271 264
272 wr32(E1000_I2CCMD, i2ccmd); 265 wr32(E1000_I2CCMD, i2ccmd);
273 266
@@ -317,15 +310,14 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
317 /* Swap the data bytes for the I2C interface */ 310 /* Swap the data bytes for the I2C interface */
318 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); 311 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
319 312
320 /* 313 /* Set up Op-code, Phy Address, and register address in the I2CCMD
321 * Set up Op-code, Phy Address, and register address in the I2CCMD
322 * register. The MAC will take care of interfacing with the 314 * register. The MAC will take care of interfacing with the
323 * PHY to retrieve the desired data. 315 * PHY to retrieve the desired data.
324 */ 316 */
325 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | 317 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
326 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | 318 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
327 E1000_I2CCMD_OPCODE_WRITE | 319 E1000_I2CCMD_OPCODE_WRITE |
328 phy_data_swapped); 320 phy_data_swapped);
329 321
330 wr32(E1000_I2CCMD, i2ccmd); 322 wr32(E1000_I2CCMD, i2ccmd);
331 323
@@ -371,8 +363,8 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
371 363
372 if (offset > MAX_PHY_MULTI_PAGE_REG) { 364 if (offset > MAX_PHY_MULTI_PAGE_REG) {
373 ret_val = igb_write_phy_reg_mdic(hw, 365 ret_val = igb_write_phy_reg_mdic(hw,
374 IGP01E1000_PHY_PAGE_SELECT, 366 IGP01E1000_PHY_PAGE_SELECT,
375 (u16)offset); 367 (u16)offset);
376 if (ret_val) { 368 if (ret_val) {
377 hw->phy.ops.release(hw); 369 hw->phy.ops.release(hw);
378 goto out; 370 goto out;
@@ -410,8 +402,8 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
410 402
411 if (offset > MAX_PHY_MULTI_PAGE_REG) { 403 if (offset > MAX_PHY_MULTI_PAGE_REG) {
412 ret_val = igb_write_phy_reg_mdic(hw, 404 ret_val = igb_write_phy_reg_mdic(hw,
413 IGP01E1000_PHY_PAGE_SELECT, 405 IGP01E1000_PHY_PAGE_SELECT,
414 (u16)offset); 406 (u16)offset);
415 if (ret_val) { 407 if (ret_val) {
416 hw->phy.ops.release(hw); 408 hw->phy.ops.release(hw);
417 goto out; 409 goto out;
@@ -419,7 +411,7 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
419 } 411 }
420 412
421 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 413 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
422 data); 414 data);
423 415
424 hw->phy.ops.release(hw); 416 hw->phy.ops.release(hw);
425 417
@@ -439,7 +431,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
439 s32 ret_val; 431 s32 ret_val;
440 u16 phy_data; 432 u16 phy_data;
441 433
442
443 if (phy->reset_disable) { 434 if (phy->reset_disable) {
444 ret_val = 0; 435 ret_val = 0;
445 goto out; 436 goto out;
@@ -472,8 +463,7 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
472 if (ret_val) 463 if (ret_val)
473 goto out; 464 goto out;
474 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; 465 phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
475 /* 466 /* Options:
476 * Options:
477 * 0 - Auto (default) 467 * 0 - Auto (default)
478 * 1 - MDI mode 468 * 1 - MDI mode
479 * 2 - MDI-X mode 469 * 2 - MDI-X mode
@@ -520,8 +510,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
520 510
521 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 511 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
522 512
523 /* 513 /* Options:
524 * Options:
525 * MDI/MDI-X = 0 (default) 514 * MDI/MDI-X = 0 (default)
526 * 0 - Auto for all speeds 515 * 0 - Auto for all speeds
527 * 1 - MDI mode 516 * 1 - MDI mode
@@ -546,8 +535,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
546 break; 535 break;
547 } 536 }
548 537
549 /* 538 /* Options:
550 * Options:
551 * disable_polarity_correction = 0 (default) 539 * disable_polarity_correction = 0 (default)
552 * Automatic Correction for Reversed Cable Polarity 540 * Automatic Correction for Reversed Cable Polarity
553 * 0 - Disabled 541 * 0 - Disabled
@@ -562,12 +550,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
562 goto out; 550 goto out;
563 551
564 if (phy->revision < E1000_REVISION_4) { 552 if (phy->revision < E1000_REVISION_4) {
565 /* 553 /* Force TX_CLK in the Extended PHY Specific Control Register
566 * Force TX_CLK in the Extended PHY Specific Control Register
567 * to 25MHz clock. 554 * to 25MHz clock.
568 */ 555 */
569 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 556 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
570 &phy_data); 557 &phy_data);
571 if (ret_val) 558 if (ret_val)
572 goto out; 559 goto out;
573 560
@@ -630,8 +617,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
630 if (ret_val) 617 if (ret_val)
631 goto out; 618 goto out;
632 619
633 /* 620 /* Options:
634 * Options:
635 * MDI/MDI-X = 0 (default) 621 * MDI/MDI-X = 0 (default)
636 * 0 - Auto for all speeds 622 * 0 - Auto for all speeds
637 * 1 - MDI mode 623 * 1 - MDI mode
@@ -659,8 +645,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
659 break; 645 break;
660 } 646 }
661 647
662 /* 648 /* Options:
663 * Options:
664 * disable_polarity_correction = 0 (default) 649 * disable_polarity_correction = 0 (default)
665 * Automatic Correction for Reversed Cable Polarity 650 * Automatic Correction for Reversed Cable Polarity
666 * 0 - Disabled 651 * 0 - Disabled
@@ -714,14 +699,12 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
714 goto out; 699 goto out;
715 } 700 }
716 701
717 /* 702 /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
718 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
719 * timeout issues when LFS is enabled. 703 * timeout issues when LFS is enabled.
720 */ 704 */
721 msleep(100); 705 msleep(100);
722 706
723 /* 707 /* The NVM settings will configure LPLU in D3 for
724 * The NVM settings will configure LPLU in D3 for
725 * non-IGP1 PHYs. 708 * non-IGP1 PHYs.
726 */ 709 */
727 if (phy->type == e1000_phy_igp) { 710 if (phy->type == e1000_phy_igp) {
@@ -765,8 +748,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
765 748
766 /* set auto-master slave resolution settings */ 749 /* set auto-master slave resolution settings */
767 if (hw->mac.autoneg) { 750 if (hw->mac.autoneg) {
768 /* 751 /* when autonegotiation advertisement is only 1000Mbps then we
769 * when autonegotiation advertisement is only 1000Mbps then we
770 * should disable SmartSpeed and enable Auto MasterSlave 752 * should disable SmartSpeed and enable Auto MasterSlave
771 * resolution as hardware default. 753 * resolution as hardware default.
772 */ 754 */
@@ -844,14 +826,12 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
844 s32 ret_val; 826 s32 ret_val;
845 u16 phy_ctrl; 827 u16 phy_ctrl;
846 828
847 /* 829 /* Perform some bounds checking on the autoneg advertisement
848 * Perform some bounds checking on the autoneg advertisement
849 * parameter. 830 * parameter.
850 */ 831 */
851 phy->autoneg_advertised &= phy->autoneg_mask; 832 phy->autoneg_advertised &= phy->autoneg_mask;
852 833
853 /* 834 /* If autoneg_advertised is zero, we assume it was not defaulted
854 * If autoneg_advertised is zero, we assume it was not defaulted
855 * by the calling code so we set to advertise full capability. 835 * by the calling code so we set to advertise full capability.
856 */ 836 */
857 if (phy->autoneg_advertised == 0) 837 if (phy->autoneg_advertised == 0)
@@ -865,8 +845,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
865 } 845 }
866 hw_dbg("Restarting Auto-Neg\n"); 846 hw_dbg("Restarting Auto-Neg\n");
867 847
868 /* 848 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
869 * Restart auto-negotiation by setting the Auto Neg Enable bit and
870 * the Auto Neg Restart bit in the PHY control register. 849 * the Auto Neg Restart bit in the PHY control register.
871 */ 850 */
872 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); 851 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
@@ -878,8 +857,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
878 if (ret_val) 857 if (ret_val)
879 goto out; 858 goto out;
880 859
881 /* 860 /* Does the user want to wait for Auto-Neg to complete here, or
882 * Does the user want to wait for Auto-Neg to complete here, or
883 * check at a later time (for example, callback routine). 861 * check at a later time (for example, callback routine).
884 */ 862 */
885 if (phy->autoneg_wait_to_complete) { 863 if (phy->autoneg_wait_to_complete) {
@@ -928,16 +906,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
928 goto out; 906 goto out;
929 } 907 }
930 908
931 /* 909 /* Need to parse both autoneg_advertised and fc and set up
932 * Need to parse both autoneg_advertised and fc and set up
933 * the appropriate PHY registers. First we will parse for 910 * the appropriate PHY registers. First we will parse for
934 * autoneg_advertised software override. Since we can advertise 911 * autoneg_advertised software override. Since we can advertise
935 * a plethora of combinations, we need to check each bit 912 * a plethora of combinations, we need to check each bit
936 * individually. 913 * individually.
937 */ 914 */
938 915
939 /* 916 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
940 * First we clear all the 10/100 mb speed bits in the Auto-Neg
941 * Advertisement Register (Address 4) and the 1000 mb speed bits in 917 * Advertisement Register (Address 4) and the 1000 mb speed bits in
942 * the 1000Base-T Control Register (Address 9). 918 * the 1000Base-T Control Register (Address 9).
943 */ 919 */
@@ -983,8 +959,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
983 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 959 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
984 } 960 }
985 961
986 /* 962 /* Check for a software override of the flow control settings, and
987 * Check for a software override of the flow control settings, and
988 * setup the PHY advertisement registers accordingly. If 963 * setup the PHY advertisement registers accordingly. If
989 * auto-negotiation is enabled, then software will have to set the 964 * auto-negotiation is enabled, then software will have to set the
990 * "PAUSE" bits to the correct value in the Auto-Negotiation 965 * "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -1003,15 +978,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
1003 */ 978 */
1004 switch (hw->fc.current_mode) { 979 switch (hw->fc.current_mode) {
1005 case e1000_fc_none: 980 case e1000_fc_none:
1006 /* 981 /* Flow control (RX & TX) is completely disabled by a
1007 * Flow control (RX & TX) is completely disabled by a
1008 * software over-ride. 982 * software over-ride.
1009 */ 983 */
1010 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 984 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1011 break; 985 break;
1012 case e1000_fc_rx_pause: 986 case e1000_fc_rx_pause:
1013 /* 987 /* RX Flow control is enabled, and TX Flow control is
1014 * RX Flow control is enabled, and TX Flow control is
1015 * disabled, by a software over-ride. 988 * disabled, by a software over-ride.
1016 * 989 *
1017 * Since there really isn't a way to advertise that we are 990 * Since there really isn't a way to advertise that we are
@@ -1023,16 +996,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
1023 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 996 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1024 break; 997 break;
1025 case e1000_fc_tx_pause: 998 case e1000_fc_tx_pause:
1026 /* 999 /* TX Flow control is enabled, and RX Flow control is
1027 * TX Flow control is enabled, and RX Flow control is
1028 * disabled, by a software over-ride. 1000 * disabled, by a software over-ride.
1029 */ 1001 */
1030 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 1002 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
1031 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 1003 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
1032 break; 1004 break;
1033 case e1000_fc_full: 1005 case e1000_fc_full:
1034 /* 1006 /* Flow control (both RX and TX) is enabled by a software
1035 * Flow control (both RX and TX) is enabled by a software
1036 * over-ride. 1007 * over-ride.
1037 */ 1008 */
1038 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1009 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -1075,18 +1046,15 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)
1075 s32 ret_val; 1046 s32 ret_val;
1076 bool link; 1047 bool link;
1077 1048
1078
1079 if (hw->mac.autoneg) { 1049 if (hw->mac.autoneg) {
1080 /* 1050 /* Setup autoneg and flow control advertisement and perform
1081 * Setup autoneg and flow control advertisement and perform
1082 * autonegotiation. 1051 * autonegotiation.
1083 */ 1052 */
1084 ret_val = igb_copper_link_autoneg(hw); 1053 ret_val = igb_copper_link_autoneg(hw);
1085 if (ret_val) 1054 if (ret_val)
1086 goto out; 1055 goto out;
1087 } else { 1056 } else {
1088 /* 1057 /* PHY will be set to 10H, 10F, 100H or 100F
1089 * PHY will be set to 10H, 10F, 100H or 100F
1090 * depending on user settings. 1058 * depending on user settings.
1091 */ 1059 */
1092 hw_dbg("Forcing Speed and Duplex\n"); 1060 hw_dbg("Forcing Speed and Duplex\n");
@@ -1097,14 +1065,10 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)
1097 } 1065 }
1098 } 1066 }
1099 1067
1100 /* 1068 /* Check link status. Wait up to 100 microseconds for link to become
1101 * Check link status. Wait up to 100 microseconds for link to become
1102 * valid. 1069 * valid.
1103 */ 1070 */
1104 ret_val = igb_phy_has_link(hw, 1071 ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
1105 COPPER_LINK_UP_LIMIT,
1106 10,
1107 &link);
1108 if (ret_val) 1072 if (ret_val)
1109 goto out; 1073 goto out;
1110 1074
@@ -1145,8 +1109,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1145 if (ret_val) 1109 if (ret_val)
1146 goto out; 1110 goto out;
1147 1111
1148 /* 1112 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
1149 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
1150 * forced whenever speed and duplex are forced. 1113 * forced whenever speed and duplex are forced.
1151 */ 1114 */
1152 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1115 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -1167,10 +1130,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1167 if (phy->autoneg_wait_to_complete) { 1130 if (phy->autoneg_wait_to_complete) {
1168 hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); 1131 hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
1169 1132
1170 ret_val = igb_phy_has_link(hw, 1133 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
1171 PHY_FORCE_LIMIT,
1172 100000,
1173 &link);
1174 if (ret_val) 1134 if (ret_val)
1175 goto out; 1135 goto out;
1176 1136
@@ -1178,10 +1138,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1178 hw_dbg("Link taking longer than expected.\n"); 1138 hw_dbg("Link taking longer than expected.\n");
1179 1139
1180 /* Try once more */ 1140 /* Try once more */
1181 ret_val = igb_phy_has_link(hw, 1141 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
1182 PHY_FORCE_LIMIT,
1183 100000,
1184 &link);
1185 if (ret_val) 1142 if (ret_val)
1186 goto out; 1143 goto out;
1187 } 1144 }
@@ -1209,8 +1166,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1209 1166
1210 /* I210 and I211 devices support Auto-Crossover in forced operation. */ 1167 /* I210 and I211 devices support Auto-Crossover in forced operation. */
1211 if (phy->type != e1000_phy_i210) { 1168 if (phy->type != e1000_phy_i210) {
1212 /* 1169 /* Clear Auto-Crossover to force MDI manually. M88E1000
1213 * Clear Auto-Crossover to force MDI manually. M88E1000
1214 * requires MDI forced whenever speed and duplex are forced. 1170 * requires MDI forced whenever speed and duplex are forced.
1215 */ 1171 */
1216 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, 1172 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
@@ -1266,13 +1222,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1266 if (!reset_dsp) 1222 if (!reset_dsp)
1267 hw_dbg("Link taking longer than expected.\n"); 1223 hw_dbg("Link taking longer than expected.\n");
1268 else { 1224 else {
1269 /* 1225 /* We didn't get link.
1270 * We didn't get link.
1271 * Reset the DSP and cross our fingers. 1226 * Reset the DSP and cross our fingers.
1272 */ 1227 */
1273 ret_val = phy->ops.write_reg(hw, 1228 ret_val = phy->ops.write_reg(hw,
1274 M88E1000_PHY_PAGE_SELECT, 1229 M88E1000_PHY_PAGE_SELECT,
1275 0x001d); 1230 0x001d);
1276 if (ret_val) 1231 if (ret_val)
1277 goto out; 1232 goto out;
1278 ret_val = igb_phy_reset_dsp(hw); 1233 ret_val = igb_phy_reset_dsp(hw);
@@ -1298,8 +1253,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1298 if (ret_val) 1253 if (ret_val)
1299 goto out; 1254 goto out;
1300 1255
1301 /* 1256 /* Resetting the phy means we need to re-force TX_CLK in the
1302 * Resetting the phy means we need to re-force TX_CLK in the
1303 * Extended PHY Specific Control Register to 25MHz clock from 1257 * Extended PHY Specific Control Register to 25MHz clock from
1304 * the reset value of 2.5MHz. 1258 * the reset value of 2.5MHz.
1305 */ 1259 */
@@ -1308,8 +1262,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1308 if (ret_val) 1262 if (ret_val)
1309 goto out; 1263 goto out;
1310 1264
1311 /* 1265 /* In addition, we must re-enable CRS on Tx for both half and full
1312 * In addition, we must re-enable CRS on Tx for both half and full
1313 * duplex. 1266 * duplex.
1314 */ 1267 */
1315 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1268 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1336,7 +1289,7 @@ out:
1336 * take affect. 1289 * take affect.
1337 **/ 1290 **/
1338static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 1291static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1339 u16 *phy_ctrl) 1292 u16 *phy_ctrl)
1340{ 1293{
1341 struct e1000_mac_info *mac = &hw->mac; 1294 struct e1000_mac_info *mac = &hw->mac;
1342 u32 ctrl; 1295 u32 ctrl;
@@ -1417,8 +1370,7 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1417 data); 1370 data);
1418 if (ret_val) 1371 if (ret_val)
1419 goto out; 1372 goto out;
1420 /* 1373 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1421 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1422 * during Dx states where the power conservation is most 1374 * during Dx states where the power conservation is most
1423 * important. During driver activity we should enable 1375 * important. During driver activity we should enable
1424 * SmartSpeed, so performance is maintained. 1376 * SmartSpeed, so performance is maintained.
@@ -1461,13 +1413,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1461 1413
1462 /* When LPLU is enabled, we should disable SmartSpeed */ 1414 /* When LPLU is enabled, we should disable SmartSpeed */
1463 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1415 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1464 &data); 1416 &data);
1465 if (ret_val) 1417 if (ret_val)
1466 goto out; 1418 goto out;
1467 1419
1468 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1420 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1469 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1421 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1470 data); 1422 data);
1471 } 1423 }
1472 1424
1473out: 1425out:
@@ -1556,8 +1508,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1556 s32 ret_val; 1508 s32 ret_val;
1557 u16 data, offset, mask; 1509 u16 data, offset, mask;
1558 1510
1559 /* 1511 /* Polarity is determined based on the speed of
1560 * Polarity is determined based on the speed of
1561 * our connection. 1512 * our connection.
1562 */ 1513 */
1563 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); 1514 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
@@ -1569,8 +1520,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1569 offset = IGP01E1000_PHY_PCS_INIT_REG; 1520 offset = IGP01E1000_PHY_PCS_INIT_REG;
1570 mask = IGP01E1000_PHY_POLARITY_MASK; 1521 mask = IGP01E1000_PHY_POLARITY_MASK;
1571 } else { 1522 } else {
1572 /* 1523 /* This really only applies to 10Mbps since
1573 * This really only applies to 10Mbps since
1574 * there is no polarity for 100Mbps (always 0). 1524 * there is no polarity for 100Mbps (always 0).
1575 */ 1525 */
1576 offset = IGP01E1000_PHY_PORT_STATUS; 1526 offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1589,7 +1539,7 @@ out:
1589} 1539}
1590 1540
1591/** 1541/**
1592 * igb_wait_autoneg - Wait for auto-neg compeletion 1542 * igb_wait_autoneg - Wait for auto-neg completion
1593 * @hw: pointer to the HW structure 1543 * @hw: pointer to the HW structure
1594 * 1544 *
1595 * Waits for auto-negotiation to complete or for the auto-negotiation time 1545 * Waits for auto-negotiation to complete or for the auto-negotiation time
@@ -1613,8 +1563,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
1613 msleep(100); 1563 msleep(100);
1614 } 1564 }
1615 1565
1616 /* 1566 /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1617 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1618 * has completed. 1567 * has completed.
1619 */ 1568 */
1620 return ret_val; 1569 return ret_val;
@@ -1630,21 +1579,19 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
1630 * Polls the PHY status register for link, 'iterations' number of times. 1579 * Polls the PHY status register for link, 'iterations' number of times.
1631 **/ 1580 **/
1632s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, 1581s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1633 u32 usec_interval, bool *success) 1582 u32 usec_interval, bool *success)
1634{ 1583{
1635 s32 ret_val = 0; 1584 s32 ret_val = 0;
1636 u16 i, phy_status; 1585 u16 i, phy_status;
1637 1586
1638 for (i = 0; i < iterations; i++) { 1587 for (i = 0; i < iterations; i++) {
1639 /* 1588 /* Some PHYs require the PHY_STATUS register to be read
1640 * Some PHYs require the PHY_STATUS register to be read
1641 * twice due to the link bit being sticky. No harm doing 1589 * twice due to the link bit being sticky. No harm doing
1642 * it across the board. 1590 * it across the board.
1643 */ 1591 */
1644 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1592 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1645 if (ret_val) { 1593 if (ret_val) {
1646 /* 1594 /* If the first read fails, another entity may have
1647 * If the first read fails, another entity may have
1648 * ownership of the resources, wait and try again to 1595 * ownership of the resources, wait and try again to
1649 * see if they have relinquished the resources yet. 1596 * see if they have relinquished the resources yet.
1650 */ 1597 */
@@ -1735,6 +1682,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1735 phy->max_cable_length = phy_data / (is_cm ? 100 : 1); 1682 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1736 phy->cable_length = phy_data / (is_cm ? 100 : 1); 1683 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1737 break; 1684 break;
1685 case M88E1545_E_PHY_ID:
1738 case I347AT4_E_PHY_ID: 1686 case I347AT4_E_PHY_ID:
1739 /* Remember the original page select and set it to 7 */ 1687 /* Remember the original page select and set it to 7 */
1740 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1688 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1834,10 +1782,10 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1834 u16 cur_agc_index, max_agc_index = 0; 1782 u16 cur_agc_index, max_agc_index = 0;
1835 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1783 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1836 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { 1784 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1837 IGP02E1000_PHY_AGC_A, 1785 IGP02E1000_PHY_AGC_A,
1838 IGP02E1000_PHY_AGC_B, 1786 IGP02E1000_PHY_AGC_B,
1839 IGP02E1000_PHY_AGC_C, 1787 IGP02E1000_PHY_AGC_C,
1840 IGP02E1000_PHY_AGC_D 1788 IGP02E1000_PHY_AGC_D
1841 }; 1789 };
1842 1790
1843 /* Read the AGC registers for all channels */ 1791 /* Read the AGC registers for all channels */
@@ -1846,8 +1794,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1846 if (ret_val) 1794 if (ret_val)
1847 goto out; 1795 goto out;
1848 1796
1849 /* 1797 /* Getting bits 15:9, which represent the combination of
1850 * Getting bits 15:9, which represent the combination of
1851 * coarse and fine gain values. The result is a number 1798 * coarse and fine gain values. The result is a number
1852 * that can be put into the lookup table to obtain the 1799 * that can be put into the lookup table to obtain the
1853 * approximate cable length. 1800 * approximate cable length.
@@ -2167,15 +2114,13 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
2167 hw->phy.ops.write_reg(hw, 0x1796, 0x0008); 2114 hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
2168 /* Change cg_icount + enable integbp for channels BCD */ 2115 /* Change cg_icount + enable integbp for channels BCD */
2169 hw->phy.ops.write_reg(hw, 0x1798, 0xD008); 2116 hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
2170 /* 2117 /* Change cg_icount + enable integbp + change prop_factor_master
2171 * Change cg_icount + enable integbp + change prop_factor_master
2172 * to 8 for channel A 2118 * to 8 for channel A
2173 */ 2119 */
2174 hw->phy.ops.write_reg(hw, 0x1898, 0xD918); 2120 hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
2175 /* Disable AHT in Slave mode on channel A */ 2121 /* Disable AHT in Slave mode on channel A */
2176 hw->phy.ops.write_reg(hw, 0x187A, 0x0800); 2122 hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
2177 /* 2123 /* Enable LPLU and disable AN to 1000 in non-D0a states,
2178 * Enable LPLU and disable AN to 1000 in non-D0a states,
2179 * Enable SPD+B2B 2124 * Enable SPD+B2B
2180 */ 2125 */
2181 hw->phy.ops.write_reg(hw, 0x0019, 0x008D); 2126 hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
@@ -2257,8 +2202,8 @@ static s32 igb_check_polarity_82580(struct e1000_hw *hw)
2257 2202
2258 if (!ret_val) 2203 if (!ret_val)
2259 phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) 2204 phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
2260 ? e1000_rev_polarity_reversed 2205 ? e1000_rev_polarity_reversed
2261 : e1000_rev_polarity_normal; 2206 : e1000_rev_polarity_normal;
2262 2207
2263 return ret_val; 2208 return ret_val;
2264} 2209}
@@ -2278,7 +2223,6 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2278 u16 phy_data; 2223 u16 phy_data;
2279 bool link; 2224 bool link;
2280 2225
2281
2282 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 2226 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
2283 if (ret_val) 2227 if (ret_val)
2284 goto out; 2228 goto out;
@@ -2289,8 +2233,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2289 if (ret_val) 2233 if (ret_val)
2290 goto out; 2234 goto out;
2291 2235
2292 /* 2236 /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI
2293 * Clear Auto-Crossover to force MDI manually. 82580 requires MDI
2294 * forced whenever speed and duplex are forced. 2237 * forced whenever speed and duplex are forced.
2295 */ 2238 */
2296 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); 2239 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
@@ -2310,10 +2253,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2310 if (phy->autoneg_wait_to_complete) { 2253 if (phy->autoneg_wait_to_complete) {
2311 hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); 2254 hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
2312 2255
2313 ret_val = igb_phy_has_link(hw, 2256 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
2314 PHY_FORCE_LIMIT,
2315 100000,
2316 &link);
2317 if (ret_val) 2257 if (ret_val)
2318 goto out; 2258 goto out;
2319 2259
@@ -2321,10 +2261,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
2321 hw_dbg("Link taking longer than expected.\n"); 2261 hw_dbg("Link taking longer than expected.\n");
2322 2262
2323 /* Try once more */ 2263 /* Try once more */
2324 ret_val = igb_phy_has_link(hw, 2264 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
2325 PHY_FORCE_LIMIT,
2326 100000,
2327 &link);
2328 if (ret_val) 2265 if (ret_val)
2329 goto out; 2266 goto out;
2330 } 2267 }
@@ -2349,7 +2286,6 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
2349 u16 data; 2286 u16 data;
2350 bool link; 2287 bool link;
2351 2288
2352
2353 ret_val = igb_phy_has_link(hw, 1, 0, &link); 2289 ret_val = igb_phy_has_link(hw, 1, 0, &link);
2354 if (ret_val) 2290 if (ret_val)
2355 goto out; 2291 goto out;
@@ -2383,12 +2319,12 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
2383 goto out; 2319 goto out;
2384 2320
2385 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) 2321 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
2386 ? e1000_1000t_rx_status_ok 2322 ? e1000_1000t_rx_status_ok
2387 : e1000_1000t_rx_status_not_ok; 2323 : e1000_1000t_rx_status_not_ok;
2388 2324
2389 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) 2325 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
2390 ? e1000_1000t_rx_status_ok 2326 ? e1000_1000t_rx_status_ok
2391 : e1000_1000t_rx_status_not_ok; 2327 : e1000_1000t_rx_status_not_ok;
2392 } else { 2328 } else {
2393 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; 2329 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2394 phy->local_rx = e1000_1000t_rx_status_undefined; 2330 phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -2412,13 +2348,12 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
2412 s32 ret_val; 2348 s32 ret_val;
2413 u16 phy_data, length; 2349 u16 phy_data, length;
2414 2350
2415
2416 ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); 2351 ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
2417 if (ret_val) 2352 if (ret_val)
2418 goto out; 2353 goto out;
2419 2354
2420 length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> 2355 length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
2421 I82580_DSTATUS_CABLE_LENGTH_SHIFT; 2356 I82580_DSTATUS_CABLE_LENGTH_SHIFT;
2422 2357
2423 if (length == E1000_CABLE_LENGTH_UNDEFINED) 2358 if (length == E1000_CABLE_LENGTH_UNDEFINED)
2424 ret_val = -E1000_ERR_PHY; 2359 ret_val = -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 15343286082e..82632c6c53af 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -65,6 +65,7 @@
65#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ 65#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
66#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 66#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
67#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 67#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
68#define E1000_LEDMUX 0x08130 /* LED MUX Control */
68#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 69#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
69#define E1000_PBS 0x01008 /* Packet Buffer Size */ 70#define E1000_PBS 0x01008 /* Packet Buffer Size */
70#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 71#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
@@ -83,6 +84,9 @@
83#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ 84#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
84#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ 85#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
85#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ 86#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
87#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
88#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
89#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
86 90
87/* IEEE 1588 TIMESYNCH */ 91/* IEEE 1588 TIMESYNCH */
88#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ 92#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
@@ -117,21 +121,21 @@
117#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 121#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
118 122
119/* DMA Coalescing registers */ 123/* DMA Coalescing registers */
120#define E1000_DMACR 0x02508 /* Control Register */ 124#define E1000_DMACR 0x02508 /* Control Register */
121#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ 125#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
122#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ 126#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
123#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ 127#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
124#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ 128#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
125#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ 129#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
126#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 130#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
127 131
128/* TX Rate Limit Registers */ 132/* TX Rate Limit Registers */
129#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ 133#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
130#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ 134#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
131#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ 135#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
132 136
133/* Split and Replication RX Control - RW */ 137/* Split and Replication RX Control - RW */
134#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ 138#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
135 139
136/* Thermal sensor configuration and status registers */ 140/* Thermal sensor configuration and status registers */
137#define E1000_THMJT 0x08100 /* Junction Temperature */ 141#define E1000_THMJT 0x08100 /* Junction Temperature */
@@ -140,8 +144,7 @@
140#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ 144#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
141#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ 145#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
142 146
143/* 147/* Convenience macros
144 * Convenience macros
145 * 148 *
146 * Note: "_n" is the queue number of the register to be written to. 149 * Note: "_n" is the queue number of the register to be written to.
147 * 150 *
@@ -287,7 +290,7 @@
287#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 290#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
288#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 291#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
289#define E1000_RA 0x05400 /* Receive Address - RW Array */ 292#define E1000_RA 0x05400 /* Receive Address - RW Array */
290#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 293#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
291#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) 294#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
292#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 295#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
293 (0x054E0 + ((_i - 16) * 8))) 296 (0x054E0 + ((_i - 16) * 8)))
@@ -360,21 +363,25 @@
360 (readl(hw->hw_addr + reg + ((offset) << 2))) 363 (readl(hw->hw_addr + reg + ((offset) << 2)))
361 364
362/* DMA Coalescing registers */ 365/* DMA Coalescing registers */
363#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ 366#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
364 367
365/* Energy Efficient Ethernet "EEE" register */ 368/* Energy Efficient Ethernet "EEE" register */
366#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ 369#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
367#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ 370#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
368#define E1000_EEE_SU 0X0E34 /* EEE Setup */ 371#define E1000_EEE_SU 0X0E34 /* EEE Setup */
372#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
373#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
374#define E1000_MMDAC 13 /* MMD Access Control */
375#define E1000_MMDAAD 14 /* MMD Access Address/Data */
369 376
370/* Thermal Sensor Register */ 377/* Thermal Sensor Register */
371#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ 378#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
372 379
373/* OS2BMC Registers */ 380/* OS2BMC Registers */
374#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ 381#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
375#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ 382#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
376#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ 383#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
377#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ 384#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
378 385
379#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ 386#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
380#define E1000_I210_FLMNGCTL 0x12038 387#define E1000_I210_FLMNGCTL 0x12038
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 25151401c2ab..c92115e71ebe 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -44,54 +44,54 @@
44 44
45struct igb_adapter; 45struct igb_adapter;
46 46
47#define E1000_PCS_CFG_IGN_SD 1 47#define E1000_PCS_CFG_IGN_SD 1
48 48
49/* Interrupt defines */ 49/* Interrupt defines */
50#define IGB_START_ITR 648 /* ~6000 ints/sec */ 50#define IGB_START_ITR 648 /* ~6000 ints/sec */
51#define IGB_4K_ITR 980 51#define IGB_4K_ITR 980
52#define IGB_20K_ITR 196 52#define IGB_20K_ITR 196
53#define IGB_70K_ITR 56 53#define IGB_70K_ITR 56
54 54
55/* TX/RX descriptor defines */ 55/* TX/RX descriptor defines */
56#define IGB_DEFAULT_TXD 256 56#define IGB_DEFAULT_TXD 256
57#define IGB_DEFAULT_TX_WORK 128 57#define IGB_DEFAULT_TX_WORK 128
58#define IGB_MIN_TXD 80 58#define IGB_MIN_TXD 80
59#define IGB_MAX_TXD 4096 59#define IGB_MAX_TXD 4096
60 60
61#define IGB_DEFAULT_RXD 256 61#define IGB_DEFAULT_RXD 256
62#define IGB_MIN_RXD 80 62#define IGB_MIN_RXD 80
63#define IGB_MAX_RXD 4096 63#define IGB_MAX_RXD 4096
64 64
65#define IGB_DEFAULT_ITR 3 /* dynamic */ 65#define IGB_DEFAULT_ITR 3 /* dynamic */
66#define IGB_MAX_ITR_USECS 10000 66#define IGB_MAX_ITR_USECS 10000
67#define IGB_MIN_ITR_USECS 10 67#define IGB_MIN_ITR_USECS 10
68#define NON_Q_VECTORS 1 68#define NON_Q_VECTORS 1
69#define MAX_Q_VECTORS 8 69#define MAX_Q_VECTORS 8
70 70
71/* Transmit and receive queues */ 71/* Transmit and receive queues */
72#define IGB_MAX_RX_QUEUES 8 72#define IGB_MAX_RX_QUEUES 8
73#define IGB_MAX_RX_QUEUES_82575 4 73#define IGB_MAX_RX_QUEUES_82575 4
74#define IGB_MAX_RX_QUEUES_I211 2 74#define IGB_MAX_RX_QUEUES_I211 2
75#define IGB_MAX_TX_QUEUES 8 75#define IGB_MAX_TX_QUEUES 8
76#define IGB_MAX_VF_MC_ENTRIES 30 76#define IGB_MAX_VF_MC_ENTRIES 30
77#define IGB_MAX_VF_FUNCTIONS 8 77#define IGB_MAX_VF_FUNCTIONS 8
78#define IGB_MAX_VFTA_ENTRIES 128 78#define IGB_MAX_VFTA_ENTRIES 128
79#define IGB_82576_VF_DEV_ID 0x10CA 79#define IGB_82576_VF_DEV_ID 0x10CA
80#define IGB_I350_VF_DEV_ID 0x1520 80#define IGB_I350_VF_DEV_ID 0x1520
81 81
82/* NVM version defines */ 82/* NVM version defines */
83#define IGB_MAJOR_MASK 0xF000 83#define IGB_MAJOR_MASK 0xF000
84#define IGB_MINOR_MASK 0x0FF0 84#define IGB_MINOR_MASK 0x0FF0
85#define IGB_BUILD_MASK 0x000F 85#define IGB_BUILD_MASK 0x000F
86#define IGB_COMB_VER_MASK 0x00FF 86#define IGB_COMB_VER_MASK 0x00FF
87#define IGB_MAJOR_SHIFT 12 87#define IGB_MAJOR_SHIFT 12
88#define IGB_MINOR_SHIFT 4 88#define IGB_MINOR_SHIFT 4
89#define IGB_COMB_VER_SHFT 8 89#define IGB_COMB_VER_SHFT 8
90#define IGB_NVM_VER_INVALID 0xFFFF 90#define IGB_NVM_VER_INVALID 0xFFFF
91#define IGB_ETRACK_SHIFT 16 91#define IGB_ETRACK_SHIFT 16
92#define NVM_ETRACK_WORD 0x0042 92#define NVM_ETRACK_WORD 0x0042
93#define NVM_COMB_VER_OFF 0x0083 93#define NVM_COMB_VER_OFF 0x0083
94#define NVM_COMB_VER_PTR 0x003d 94#define NVM_COMB_VER_PTR 0x003d
95 95
96struct vf_data_storage { 96struct vf_data_storage {
97 unsigned char vf_mac_addresses[ETH_ALEN]; 97 unsigned char vf_mac_addresses[ETH_ALEN];
@@ -103,6 +103,7 @@ struct vf_data_storage {
103 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 103 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
104 u16 pf_qos; 104 u16 pf_qos;
105 u16 tx_rate; 105 u16 tx_rate;
106 bool spoofchk_enabled;
106}; 107};
107 108
108#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 109#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -121,14 +122,14 @@ struct vf_data_storage {
121 * descriptors until either it has this many to write back, or the 122 * descriptors until either it has this many to write back, or the
122 * ITR timer expires. 123 * ITR timer expires.
123 */ 124 */
124#define IGB_RX_PTHRESH 8 125#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
125#define IGB_RX_HTHRESH 8 126#define IGB_RX_HTHRESH 8
126#define IGB_TX_PTHRESH 8 127#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
127#define IGB_TX_HTHRESH 1 128#define IGB_TX_HTHRESH 1
128#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ 129#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
129 adapter->msix_entries) ? 1 : 4) 130 adapter->msix_entries) ? 1 : 4)
130#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ 131#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
131 adapter->msix_entries) ? 1 : 16) 132 adapter->msix_entries) ? 1 : 16)
132 133
133/* this is the size past which hardware will drop packets when setting LPE=0 */ 134/* this is the size past which hardware will drop packets when setting LPE=0 */
134#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 135#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -140,17 +141,17 @@ struct vf_data_storage {
140#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 141#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
141 142
142/* How many Rx Buffers do we bundle into one write to the hardware ? */ 143/* How many Rx Buffers do we bundle into one write to the hardware ? */
143#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 144#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
144 145
145#define AUTO_ALL_MODES 0 146#define AUTO_ALL_MODES 0
146#define IGB_EEPROM_APME 0x0400 147#define IGB_EEPROM_APME 0x0400
147 148
148#ifndef IGB_MASTER_SLAVE 149#ifndef IGB_MASTER_SLAVE
149/* Switch to override PHY master/slave setting */ 150/* Switch to override PHY master/slave setting */
150#define IGB_MASTER_SLAVE e1000_ms_hw_default 151#define IGB_MASTER_SLAVE e1000_ms_hw_default
151#endif 152#endif
152 153
153#define IGB_MNG_VLAN_NONE -1 154#define IGB_MNG_VLAN_NONE -1
154 155
155enum igb_tx_flags { 156enum igb_tx_flags {
156 /* cmd_type flags */ 157 /* cmd_type flags */
@@ -164,11 +165,10 @@ enum igb_tx_flags {
164}; 165};
165 166
166/* VLAN info */ 167/* VLAN info */
167#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 168#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
168#define IGB_TX_FLAGS_VLAN_SHIFT 16 169#define IGB_TX_FLAGS_VLAN_SHIFT 16
169 170
170/* 171/* The largest size we can write to the descriptor is 65535. In order to
171 * The largest size we can write to the descriptor is 65535. In order to
172 * maintain a power of two alignment we have to limit ourselves to 32K. 172 * maintain a power of two alignment we have to limit ourselves to 32K.
173 */ 173 */
174#define IGB_MAX_TXD_PWR 15 174#define IGB_MAX_TXD_PWR 15
@@ -178,8 +178,17 @@ enum igb_tx_flags {
178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) 178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
179#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 179#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180 180
181/* EEPROM byte offsets */
182#define IGB_SFF_8472_SWAP 0x5C
183#define IGB_SFF_8472_COMP 0x5E
184
185/* Bitmasks */
186#define IGB_SFF_ADDRESSING_MODE 0x4
187#define IGB_SFF_8472_UNSUP 0x00
188
181/* wrapper around a pointer to a socket buffer, 189/* wrapper around a pointer to a socket buffer,
182 * so a DMA handle can be stored along with the buffer */ 190 * so a DMA handle can be stored along with the buffer
191 */
183struct igb_tx_buffer { 192struct igb_tx_buffer {
184 union e1000_adv_tx_desc *next_to_watch; 193 union e1000_adv_tx_desc *next_to_watch;
185 unsigned long time_stamp; 194 unsigned long time_stamp;
@@ -298,11 +307,11 @@ enum e1000_ring_flags_t {
298 307
299#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) 308#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
300 309
301#define IGB_RX_DESC(R, i) \ 310#define IGB_RX_DESC(R, i) \
302 (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) 311 (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
303#define IGB_TX_DESC(R, i) \ 312#define IGB_TX_DESC(R, i) \
304 (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) 313 (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
305#define IGB_TX_CTXTDESC(R, i) \ 314#define IGB_TX_CTXTDESC(R, i) \
306 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) 315 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
307 316
308/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ 317/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
@@ -461,12 +470,12 @@ struct igb_adapter {
461#define IGB_FLAG_WOL_SUPPORTED (1 << 8) 470#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
462 471
463/* DMA Coalescing defines */ 472/* DMA Coalescing defines */
464#define IGB_MIN_TXPBSIZE 20408 473#define IGB_MIN_TXPBSIZE 20408
465#define IGB_TX_BUF_4096 4096 474#define IGB_TX_BUF_4096 4096
466#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ 475#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
467 476
468#define IGB_82576_TSYNC_SHIFT 19 477#define IGB_82576_TSYNC_SHIFT 19
469#define IGB_TS_HDR_LEN 16 478#define IGB_TS_HDR_LEN 16
470enum e1000_state_t { 479enum e1000_state_t {
471 __IGB_TESTING, 480 __IGB_TESTING,
472 __IGB_RESETTING, 481 __IGB_RESETTING,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a3830a8ba4c1..48b594701efa 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
40#include <linux/highmem.h> 40#include <linux/highmem.h>
41#include <linux/mdio.h>
41 42
42#include "igb.h" 43#include "igb.h"
43 44
@@ -178,44 +179,67 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
178 179
179 ecmd->port = PORT_TP; 180 ecmd->port = PORT_TP;
180 ecmd->phy_address = hw->phy.addr; 181 ecmd->phy_address = hw->phy.addr;
182 ecmd->transceiver = XCVR_INTERNAL;
181 } else { 183 } else {
182 ecmd->supported = (SUPPORTED_1000baseT_Full | 184 ecmd->supported = (SUPPORTED_1000baseT_Full |
183 SUPPORTED_FIBRE | 185 SUPPORTED_100baseT_Full |
184 SUPPORTED_Autoneg); 186 SUPPORTED_FIBRE |
187 SUPPORTED_Autoneg |
188 SUPPORTED_Pause);
189 if (hw->mac.type == e1000_i354)
190 ecmd->supported |= SUPPORTED_2500baseX_Full;
185 191
186 ecmd->advertising = (ADVERTISED_1000baseT_Full | 192 ecmd->advertising = ADVERTISED_FIBRE;
187 ADVERTISED_FIBRE | 193
188 ADVERTISED_Autoneg | 194 switch (adapter->link_speed) {
189 ADVERTISED_Pause); 195 case SPEED_2500:
196 ecmd->advertising = ADVERTISED_2500baseX_Full;
197 break;
198 case SPEED_1000:
199 ecmd->advertising = ADVERTISED_1000baseT_Full;
200 break;
201 case SPEED_100:
202 ecmd->advertising = ADVERTISED_100baseT_Full;
203 break;
204 default:
205 break;
206 }
207
208 if (hw->mac.autoneg == 1)
209 ecmd->advertising |= ADVERTISED_Autoneg;
190 210
191 ecmd->port = PORT_FIBRE; 211 ecmd->port = PORT_FIBRE;
212 ecmd->transceiver = XCVR_EXTERNAL;
192 } 213 }
193 214
194 ecmd->transceiver = XCVR_INTERNAL;
195
196 status = rd32(E1000_STATUS); 215 status = rd32(E1000_STATUS);
197 216
198 if (status & E1000_STATUS_LU) { 217 if (status & E1000_STATUS_LU) {
199 218 if ((hw->mac.type == e1000_i354) &&
200 if ((status & E1000_STATUS_SPEED_1000) || 219 (status & E1000_STATUS_2P5_SKU) &&
201 hw->phy.media_type != e1000_media_type_copper) 220 !(status & E1000_STATUS_2P5_SKU_OVER))
202 ethtool_cmd_speed_set(ecmd, SPEED_1000); 221 ecmd->speed = SPEED_2500;
222 else if (status & E1000_STATUS_SPEED_1000)
223 ecmd->speed = SPEED_1000;
203 else if (status & E1000_STATUS_SPEED_100) 224 else if (status & E1000_STATUS_SPEED_100)
204 ethtool_cmd_speed_set(ecmd, SPEED_100); 225 ecmd->speed = SPEED_100;
205 else 226 else
206 ethtool_cmd_speed_set(ecmd, SPEED_10); 227 ecmd->speed = SPEED_10;
207
208 if ((status & E1000_STATUS_FD) || 228 if ((status & E1000_STATUS_FD) ||
209 hw->phy.media_type != e1000_media_type_copper) 229 hw->phy.media_type != e1000_media_type_copper)
210 ecmd->duplex = DUPLEX_FULL; 230 ecmd->duplex = DUPLEX_FULL;
211 else 231 else
212 ecmd->duplex = DUPLEX_HALF; 232 ecmd->duplex = DUPLEX_HALF;
213 } else { 233 } else {
214 ethtool_cmd_speed_set(ecmd, -1); 234 ecmd->speed = -1;
215 ecmd->duplex = -1; 235 ecmd->duplex = -1;
216 } 236 }
217 237
218 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 238 if ((hw->phy.media_type == e1000_media_type_fiber) ||
239 hw->mac.autoneg)
240 ecmd->autoneg = AUTONEG_ENABLE;
241 else
242 ecmd->autoneg = AUTONEG_DISABLE;
219 243
220 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 244 /* MDI-X => 2; MDI =>1; Invalid =>0 */
221 if (hw->phy.media_type == e1000_media_type_copper) 245 if (hw->phy.media_type == e1000_media_type_copper)
@@ -238,15 +262,15 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
238 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
239 263
240 /* When SoL/IDER sessions are active, autoneg/speed/duplex 264 /* When SoL/IDER sessions are active, autoneg/speed/duplex
241 * cannot be changed */ 265 * cannot be changed
266 */
242 if (igb_check_reset_block(hw)) { 267 if (igb_check_reset_block(hw)) {
243 dev_err(&adapter->pdev->dev, 268 dev_err(&adapter->pdev->dev,
244 "Cannot change link characteristics when SoL/IDER is active.\n"); 269 "Cannot change link characteristics when SoL/IDER is active.\n");
245 return -EINVAL; 270 return -EINVAL;
246 } 271 }
247 272
248 /* 273 /* MDI setting is only allowed when autoneg enabled because
249 * MDI setting is only allowed when autoneg enabled because
250 * some hardware doesn't allow MDI setting when speed or 274 * some hardware doesn't allow MDI setting when speed or
251 * duplex is forced. 275 * duplex is forced.
252 */ 276 */
@@ -266,9 +290,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
266 290
267 if (ecmd->autoneg == AUTONEG_ENABLE) { 291 if (ecmd->autoneg == AUTONEG_ENABLE) {
268 hw->mac.autoneg = 1; 292 hw->mac.autoneg = 1;
269 hw->phy.autoneg_advertised = ecmd->advertising | 293 if (hw->phy.media_type == e1000_media_type_fiber) {
270 ADVERTISED_TP | 294 hw->phy.autoneg_advertised = ecmd->advertising |
271 ADVERTISED_Autoneg; 295 ADVERTISED_FIBRE |
296 ADVERTISED_Autoneg;
297 switch (adapter->link_speed) {
298 case SPEED_2500:
299 hw->phy.autoneg_advertised =
300 ADVERTISED_2500baseX_Full;
301 break;
302 case SPEED_1000:
303 hw->phy.autoneg_advertised =
304 ADVERTISED_1000baseT_Full;
305 break;
306 case SPEED_100:
307 hw->phy.autoneg_advertised =
308 ADVERTISED_100baseT_Full;
309 break;
310 default:
311 break;
312 }
313 } else {
314 hw->phy.autoneg_advertised = ecmd->advertising |
315 ADVERTISED_TP |
316 ADVERTISED_Autoneg;
317 }
272 ecmd->advertising = hw->phy.autoneg_advertised; 318 ecmd->advertising = hw->phy.autoneg_advertised;
273 if (adapter->fc_autoneg) 319 if (adapter->fc_autoneg)
274 hw->fc.requested_mode = e1000_fc_default; 320 hw->fc.requested_mode = e1000_fc_default;
@@ -283,8 +329,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
283 329
284 /* MDI-X => 2; MDI => 1; Auto => 3 */ 330 /* MDI-X => 2; MDI => 1; Auto => 3 */
285 if (ecmd->eth_tp_mdix_ctrl) { 331 if (ecmd->eth_tp_mdix_ctrl) {
286 /* 332 /* fix up the value for auto (3 => 0) as zero is mapped
287 * fix up the value for auto (3 => 0) as zero is mapped
288 * internally to auto 333 * internally to auto
289 */ 334 */
290 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) 335 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
@@ -309,8 +354,7 @@ static u32 igb_get_link(struct net_device *netdev)
309 struct igb_adapter *adapter = netdev_priv(netdev); 354 struct igb_adapter *adapter = netdev_priv(netdev);
310 struct e1000_mac_info *mac = &adapter->hw.mac; 355 struct e1000_mac_info *mac = &adapter->hw.mac;
311 356
312 /* 357 /* If the link is not reported up to netdev, interrupts are disabled,
313 * If the link is not reported up to netdev, interrupts are disabled,
314 * and so the physical link state may have changed since we last 358 * and so the physical link state may have changed since we last
315 * looked. Set get_link_status to make sure that the true link 359 * looked. Set get_link_status to make sure that the true link
316 * state is interrogated, rather than pulling a cached and possibly 360 * state is interrogated, rather than pulling a cached and possibly
@@ -430,7 +474,8 @@ static void igb_get_regs(struct net_device *netdev,
430 474
431 /* Interrupt */ 475 /* Interrupt */
432 /* Reading EICS for EICR because they read the 476 /* Reading EICS for EICR because they read the
433 * same but EICS does not clear on read */ 477 * same but EICS does not clear on read
478 */
434 regs_buff[13] = rd32(E1000_EICS); 479 regs_buff[13] = rd32(E1000_EICS);
435 regs_buff[14] = rd32(E1000_EICS); 480 regs_buff[14] = rd32(E1000_EICS);
436 regs_buff[15] = rd32(E1000_EIMS); 481 regs_buff[15] = rd32(E1000_EIMS);
@@ -438,7 +483,8 @@ static void igb_get_regs(struct net_device *netdev,
438 regs_buff[17] = rd32(E1000_EIAC); 483 regs_buff[17] = rd32(E1000_EIAC);
439 regs_buff[18] = rd32(E1000_EIAM); 484 regs_buff[18] = rd32(E1000_EIAM);
440 /* Reading ICS for ICR because they read the 485 /* Reading ICS for ICR because they read the
441 * same but ICS does not clear on read */ 486 * same but ICS does not clear on read
487 */
442 regs_buff[19] = rd32(E1000_ICS); 488 regs_buff[19] = rd32(E1000_ICS);
443 regs_buff[20] = rd32(E1000_ICS); 489 regs_buff[20] = rd32(E1000_ICS);
444 regs_buff[21] = rd32(E1000_IMS); 490 regs_buff[21] = rd32(E1000_IMS);
@@ -688,12 +734,12 @@ static int igb_get_eeprom(struct net_device *netdev,
688 734
689 if (hw->nvm.type == e1000_nvm_eeprom_spi) 735 if (hw->nvm.type == e1000_nvm_eeprom_spi)
690 ret_val = hw->nvm.ops.read(hw, first_word, 736 ret_val = hw->nvm.ops.read(hw, first_word,
691 last_word - first_word + 1, 737 last_word - first_word + 1,
692 eeprom_buff); 738 eeprom_buff);
693 else { 739 else {
694 for (i = 0; i < last_word - first_word + 1; i++) { 740 for (i = 0; i < last_word - first_word + 1; i++) {
695 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, 741 ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
696 &eeprom_buff[i]); 742 &eeprom_buff[i]);
697 if (ret_val) 743 if (ret_val)
698 break; 744 break;
699 } 745 }
@@ -740,15 +786,17 @@ static int igb_set_eeprom(struct net_device *netdev,
740 ptr = (void *)eeprom_buff; 786 ptr = (void *)eeprom_buff;
741 787
742 if (eeprom->offset & 1) { 788 if (eeprom->offset & 1) {
743 /* need read/modify/write of first changed EEPROM word */ 789 /* need read/modify/write of first changed EEPROM word
744 /* only the second byte of the word is being modified */ 790 * only the second byte of the word is being modified
791 */
745 ret_val = hw->nvm.ops.read(hw, first_word, 1, 792 ret_val = hw->nvm.ops.read(hw, first_word, 1,
746 &eeprom_buff[0]); 793 &eeprom_buff[0]);
747 ptr++; 794 ptr++;
748 } 795 }
749 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 796 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
750 /* need read/modify/write of last changed EEPROM word */ 797 /* need read/modify/write of last changed EEPROM word
751 /* only the first byte of the word is being modified */ 798 * only the first byte of the word is being modified
799 */
752 ret_val = hw->nvm.ops.read(hw, last_word, 1, 800 ret_val = hw->nvm.ops.read(hw, last_word, 1,
753 &eeprom_buff[last_word - first_word]); 801 &eeprom_buff[last_word - first_word]);
754 } 802 }
@@ -763,10 +811,11 @@ static int igb_set_eeprom(struct net_device *netdev,
763 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 811 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
764 812
765 ret_val = hw->nvm.ops.write(hw, first_word, 813 ret_val = hw->nvm.ops.write(hw, first_word,
766 last_word - first_word + 1, eeprom_buff); 814 last_word - first_word + 1, eeprom_buff);
767 815
768 /* Update the checksum over the first part of the EEPROM if needed 816 /* Update the checksum over the first part of the EEPROM if needed
769 * and flush shadow RAM for 82573 controllers */ 817 * and flush shadow RAM for 82573 controllers
818 */
770 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 819 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
771 hw->nvm.ops.update(hw); 820 hw->nvm.ops.update(hw);
772 821
@@ -783,8 +832,7 @@ static void igb_get_drvinfo(struct net_device *netdev,
783 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); 832 strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
784 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); 833 strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
785 834
786 /* 835 /* EEPROM image version # is reported as firmware version # for
787 * EEPROM image version # is reported as firmware version # for
788 * 82575 controllers 836 * 82575 controllers
789 */ 837 */
790 strlcpy(drvinfo->fw_version, adapter->fw_version, 838 strlcpy(drvinfo->fw_version, adapter->fw_version,
@@ -847,9 +895,11 @@ static int igb_set_ringparam(struct net_device *netdev,
847 } 895 }
848 896
849 if (adapter->num_tx_queues > adapter->num_rx_queues) 897 if (adapter->num_tx_queues > adapter->num_rx_queues)
850 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 898 temp_ring = vmalloc(adapter->num_tx_queues *
899 sizeof(struct igb_ring));
851 else 900 else
852 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 901 temp_ring = vmalloc(adapter->num_rx_queues *
902 sizeof(struct igb_ring));
853 903
854 if (!temp_ring) { 904 if (!temp_ring) {
855 err = -ENOMEM; 905 err = -ENOMEM;
@@ -858,10 +908,9 @@ static int igb_set_ringparam(struct net_device *netdev,
858 908
859 igb_down(adapter); 909 igb_down(adapter);
860 910
861 /* 911 /* We can't just free everything and then setup again,
862 * We can't just free everything and then setup again,
863 * because the ISRs in MSI-X mode get passed pointers 912 * because the ISRs in MSI-X mode get passed pointers
864 * to the tx and rx ring structs. 913 * to the Tx and Rx ring structs.
865 */ 914 */
866 if (new_tx_count != adapter->tx_ring_count) { 915 if (new_tx_count != adapter->tx_ring_count) {
867 for (i = 0; i < adapter->num_tx_queues; i++) { 916 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -1199,6 +1248,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1199 1248
1200 switch (adapter->hw.mac.type) { 1249 switch (adapter->hw.mac.type) {
1201 case e1000_i350: 1250 case e1000_i350:
1251 case e1000_i354:
1202 test = reg_test_i350; 1252 test = reg_test_i350;
1203 toggle = 0x7FEFF3FF; 1253 toggle = 0x7FEFF3FF;
1204 break; 1254 break;
@@ -1361,6 +1411,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1361 ics_mask = 0x77DCFED5; 1411 ics_mask = 0x77DCFED5;
1362 break; 1412 break;
1363 case e1000_i350: 1413 case e1000_i350:
1414 case e1000_i354:
1364 case e1000_i210: 1415 case e1000_i210:
1365 case e1000_i211: 1416 case e1000_i211:
1366 ics_mask = 0x77DCFED5; 1417 ics_mask = 0x77DCFED5;
@@ -1723,8 +1774,8 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1723} 1774}
1724 1775
1725static int igb_clean_test_rings(struct igb_ring *rx_ring, 1776static int igb_clean_test_rings(struct igb_ring *rx_ring,
1726 struct igb_ring *tx_ring, 1777 struct igb_ring *tx_ring,
1727 unsigned int size) 1778 unsigned int size)
1728{ 1779{
1729 union e1000_adv_rx_desc *rx_desc; 1780 union e1000_adv_rx_desc *rx_desc;
1730 struct igb_rx_buffer *rx_buffer_info; 1781 struct igb_rx_buffer *rx_buffer_info;
@@ -1737,7 +1788,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1737 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1788 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1738 1789
1739 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { 1790 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1740 /* check rx buffer */ 1791 /* check Rx buffer */
1741 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1792 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1742 1793
1743 /* sync Rx buffer for CPU read */ 1794 /* sync Rx buffer for CPU read */
@@ -1756,11 +1807,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1756 IGB_RX_BUFSZ, 1807 IGB_RX_BUFSZ,
1757 DMA_FROM_DEVICE); 1808 DMA_FROM_DEVICE);
1758 1809
1759 /* unmap buffer on tx side */ 1810 /* unmap buffer on Tx side */
1760 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1811 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1761 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1812 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1762 1813
1763 /* increment rx/tx next to clean counters */ 1814 /* increment Rx/Tx next to clean counters */
1764 rx_ntc++; 1815 rx_ntc++;
1765 if (rx_ntc == rx_ring->count) 1816 if (rx_ntc == rx_ring->count)
1766 rx_ntc = 0; 1817 rx_ntc = 0;
@@ -1801,8 +1852,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1801 igb_create_lbtest_frame(skb, size); 1852 igb_create_lbtest_frame(skb, size);
1802 skb_put(skb, size); 1853 skb_put(skb, size);
1803 1854
1804 /* 1855 /* Calculate the loop count based on the largest descriptor ring
1805 * Calculate the loop count based on the largest descriptor ring
1806 * The idea is to wrap the largest ring a number of times using 64 1856 * The idea is to wrap the largest ring a number of times using 64
1807 * send/receive pairs during each loop 1857 * send/receive pairs during each loop
1808 */ 1858 */
@@ -1829,7 +1879,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1829 break; 1879 break;
1830 } 1880 }
1831 1881
1832 /* allow 200 milliseconds for packets to go from tx to rx */ 1882 /* allow 200 milliseconds for packets to go from Tx to Rx */
1833 msleep(200); 1883 msleep(200);
1834 1884
1835 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); 1885 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
@@ -1848,13 +1898,21 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1848static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) 1898static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1849{ 1899{
1850 /* PHY loopback cannot be performed if SoL/IDER 1900 /* PHY loopback cannot be performed if SoL/IDER
1851 * sessions are active */ 1901 * sessions are active
1902 */
1852 if (igb_check_reset_block(&adapter->hw)) { 1903 if (igb_check_reset_block(&adapter->hw)) {
1853 dev_err(&adapter->pdev->dev, 1904 dev_err(&adapter->pdev->dev,
1854 "Cannot do PHY loopback test when SoL/IDER is active.\n"); 1905 "Cannot do PHY loopback test when SoL/IDER is active.\n");
1855 *data = 0; 1906 *data = 0;
1856 goto out; 1907 goto out;
1857 } 1908 }
1909
1910 if (adapter->hw.mac.type == e1000_i354) {
1911 dev_info(&adapter->pdev->dev,
1912 "Loopback test not supported on i354.\n");
1913 *data = 0;
1914 goto out;
1915 }
1858 *data = igb_setup_desc_rings(adapter); 1916 *data = igb_setup_desc_rings(adapter);
1859 if (*data) 1917 if (*data)
1860 goto out; 1918 goto out;
@@ -1879,7 +1937,8 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1879 hw->mac.serdes_has_link = false; 1937 hw->mac.serdes_has_link = false;
1880 1938
1881 /* On some blade server designs, link establishment 1939 /* On some blade server designs, link establishment
1882 * could take as long as 2-3 minutes */ 1940 * could take as long as 2-3 minutes
1941 */
1883 do { 1942 do {
1884 hw->mac.ops.check_for_link(&adapter->hw); 1943 hw->mac.ops.check_for_link(&adapter->hw);
1885 if (hw->mac.serdes_has_link) 1944 if (hw->mac.serdes_has_link)
@@ -1922,7 +1981,8 @@ static void igb_diag_test(struct net_device *netdev,
1922 igb_power_up_link(adapter); 1981 igb_power_up_link(adapter);
1923 1982
1924 /* Link test performed before hardware reset so autoneg doesn't 1983 /* Link test performed before hardware reset so autoneg doesn't
1925 * interfere with test result */ 1984 * interfere with test result
1985 */
1926 if (igb_link_test(adapter, &data[4])) 1986 if (igb_link_test(adapter, &data[4]))
1927 eth_test->flags |= ETH_TEST_FL_FAILED; 1987 eth_test->flags |= ETH_TEST_FL_FAILED;
1928 1988
@@ -1987,8 +2047,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1987 struct igb_adapter *adapter = netdev_priv(netdev); 2047 struct igb_adapter *adapter = netdev_priv(netdev);
1988 2048
1989 wol->supported = WAKE_UCAST | WAKE_MCAST | 2049 wol->supported = WAKE_UCAST | WAKE_MCAST |
1990 WAKE_BCAST | WAKE_MAGIC | 2050 WAKE_BCAST | WAKE_MAGIC |
1991 WAKE_PHY; 2051 WAKE_PHY;
1992 wol->wolopts = 0; 2052 wol->wolopts = 0;
1993 2053
1994 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) 2054 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
@@ -2263,7 +2323,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2263 sprintf(p, "rx_queue_%u_alloc_failed", i); 2323 sprintf(p, "rx_queue_%u_alloc_failed", i);
2264 p += ETH_GSTRING_LEN; 2324 p += ETH_GSTRING_LEN;
2265 } 2325 }
2266/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2326 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2267 break; 2327 break;
2268 } 2328 }
2269} 2329}
@@ -2283,6 +2343,7 @@ static int igb_get_ts_info(struct net_device *dev,
2283 case e1000_82576: 2343 case e1000_82576:
2284 case e1000_82580: 2344 case e1000_82580:
2285 case e1000_i350: 2345 case e1000_i350:
2346 case e1000_i354:
2286 case e1000_i210: 2347 case e1000_i210:
2287 case e1000_i211: 2348 case e1000_i211:
2288 info->so_timestamping = 2349 info->so_timestamping =
@@ -2362,7 +2423,7 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2362} 2423}
2363 2424
2364static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 2425static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2365 u32 *rule_locs) 2426 u32 *rule_locs)
2366{ 2427{
2367 struct igb_adapter *adapter = netdev_priv(dev); 2428 struct igb_adapter *adapter = netdev_priv(dev);
2368 int ret = -EOPNOTSUPP; 2429 int ret = -EOPNOTSUPP;
@@ -2506,7 +2567,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2506{ 2567{
2507 struct igb_adapter *adapter = netdev_priv(netdev); 2568 struct igb_adapter *adapter = netdev_priv(netdev);
2508 struct e1000_hw *hw = &adapter->hw; 2569 struct e1000_hw *hw = &adapter->hw;
2509 u32 ipcnfg, eeer; 2570 u32 ipcnfg, eeer, ret_val;
2571 u16 phy_data;
2510 2572
2511 if ((hw->mac.type < e1000_i350) || 2573 if ((hw->mac.type < e1000_i350) ||
2512 (hw->phy.media_type != e1000_media_type_copper)) 2574 (hw->phy.media_type != e1000_media_type_copper))
@@ -2525,6 +2587,32 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2525 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN) 2587 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
2526 edata->advertised |= ADVERTISED_100baseT_Full; 2588 edata->advertised |= ADVERTISED_100baseT_Full;
2527 2589
2590 /* EEE Link Partner Advertised */
2591 switch (hw->mac.type) {
2592 case e1000_i350:
2593 ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
2594 &phy_data);
2595 if (ret_val)
2596 return -ENODATA;
2597
2598 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2599
2600 break;
2601 case e1000_i210:
2602 case e1000_i211:
2603 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
2604 E1000_EEE_LP_ADV_DEV_I210,
2605 &phy_data);
2606 if (ret_val)
2607 return -ENODATA;
2608
2609 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2610
2611 break;
2612 default:
2613 break;
2614 }
2615
2528 if (eeer & E1000_EEER_EEE_NEG) 2616 if (eeer & E1000_EEER_EEE_NEG)
2529 edata->eee_active = true; 2617 edata->eee_active = true;
2530 2618
@@ -2600,6 +2688,85 @@ static int igb_set_eee(struct net_device *netdev,
2600 return 0; 2688 return 0;
2601} 2689}
2602 2690
2691static int igb_get_module_info(struct net_device *netdev,
2692 struct ethtool_modinfo *modinfo)
2693{
2694 struct igb_adapter *adapter = netdev_priv(netdev);
2695 struct e1000_hw *hw = &adapter->hw;
2696 u32 status = E1000_SUCCESS;
2697 u16 sff8472_rev, addr_mode;
2698 bool page_swap = false;
2699
2700 if ((hw->phy.media_type == e1000_media_type_copper) ||
2701 (hw->phy.media_type == e1000_media_type_unknown))
2702 return -EOPNOTSUPP;
2703
2704 /* Check whether we support SFF-8472 or not */
2705 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
2706 if (status != E1000_SUCCESS)
2707 return -EIO;
2708
2709 /* addressing mode is not supported */
2710 status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
2711 if (status != E1000_SUCCESS)
2712 return -EIO;
2713
2714 /* addressing mode is not supported */
2715 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
2716 hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
2717 page_swap = true;
2718 }
2719
2720 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
2721 /* We have an SFP, but it does not support SFF-8472 */
2722 modinfo->type = ETH_MODULE_SFF_8079;
2723 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2724 } else {
2725 /* We have an SFP which supports a revision of SFF-8472 */
2726 modinfo->type = ETH_MODULE_SFF_8472;
2727 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2728 }
2729
2730 return 0;
2731}
2732
2733static int igb_get_module_eeprom(struct net_device *netdev,
2734 struct ethtool_eeprom *ee, u8 *data)
2735{
2736 struct igb_adapter *adapter = netdev_priv(netdev);
2737 struct e1000_hw *hw = &adapter->hw;
2738 u32 status = E1000_SUCCESS;
2739 u16 *dataword;
2740 u16 first_word, last_word;
2741 int i = 0;
2742
2743 if (ee->len == 0)
2744 return -EINVAL;
2745
2746 first_word = ee->offset >> 1;
2747 last_word = (ee->offset + ee->len - 1) >> 1;
2748
2749 dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1),
2750 GFP_KERNEL);
2751 if (!dataword)
2752 return -ENOMEM;
2753
2754 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2755 for (i = 0; i < last_word - first_word + 1; i++) {
2756 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2757 if (status != E1000_SUCCESS)
2758 /* Error occurred while reading module */
2759 return -EIO;
2760
2761 be16_to_cpus(&dataword[i]);
2762 }
2763
2764 memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len);
2765 kfree(dataword);
2766
2767 return 0;
2768}
2769
2603static int igb_ethtool_begin(struct net_device *netdev) 2770static int igb_ethtool_begin(struct net_device *netdev)
2604{ 2771{
2605 struct igb_adapter *adapter = netdev_priv(netdev); 2772 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2614,36 +2781,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
2614} 2781}
2615 2782
2616static const struct ethtool_ops igb_ethtool_ops = { 2783static const struct ethtool_ops igb_ethtool_ops = {
2617 .get_settings = igb_get_settings, 2784 .get_settings = igb_get_settings,
2618 .set_settings = igb_set_settings, 2785 .set_settings = igb_set_settings,
2619 .get_drvinfo = igb_get_drvinfo, 2786 .get_drvinfo = igb_get_drvinfo,
2620 .get_regs_len = igb_get_regs_len, 2787 .get_regs_len = igb_get_regs_len,
2621 .get_regs = igb_get_regs, 2788 .get_regs = igb_get_regs,
2622 .get_wol = igb_get_wol, 2789 .get_wol = igb_get_wol,
2623 .set_wol = igb_set_wol, 2790 .set_wol = igb_set_wol,
2624 .get_msglevel = igb_get_msglevel, 2791 .get_msglevel = igb_get_msglevel,
2625 .set_msglevel = igb_set_msglevel, 2792 .set_msglevel = igb_set_msglevel,
2626 .nway_reset = igb_nway_reset, 2793 .nway_reset = igb_nway_reset,
2627 .get_link = igb_get_link, 2794 .get_link = igb_get_link,
2628 .get_eeprom_len = igb_get_eeprom_len, 2795 .get_eeprom_len = igb_get_eeprom_len,
2629 .get_eeprom = igb_get_eeprom, 2796 .get_eeprom = igb_get_eeprom,
2630 .set_eeprom = igb_set_eeprom, 2797 .set_eeprom = igb_set_eeprom,
2631 .get_ringparam = igb_get_ringparam, 2798 .get_ringparam = igb_get_ringparam,
2632 .set_ringparam = igb_set_ringparam, 2799 .set_ringparam = igb_set_ringparam,
2633 .get_pauseparam = igb_get_pauseparam, 2800 .get_pauseparam = igb_get_pauseparam,
2634 .set_pauseparam = igb_set_pauseparam, 2801 .set_pauseparam = igb_set_pauseparam,
2635 .self_test = igb_diag_test, 2802 .self_test = igb_diag_test,
2636 .get_strings = igb_get_strings, 2803 .get_strings = igb_get_strings,
2637 .set_phys_id = igb_set_phys_id, 2804 .set_phys_id = igb_set_phys_id,
2638 .get_sset_count = igb_get_sset_count, 2805 .get_sset_count = igb_get_sset_count,
2639 .get_ethtool_stats = igb_get_ethtool_stats, 2806 .get_ethtool_stats = igb_get_ethtool_stats,
2640 .get_coalesce = igb_get_coalesce, 2807 .get_coalesce = igb_get_coalesce,
2641 .set_coalesce = igb_set_coalesce, 2808 .set_coalesce = igb_set_coalesce,
2642 .get_ts_info = igb_get_ts_info, 2809 .get_ts_info = igb_get_ts_info,
2643 .get_rxnfc = igb_get_rxnfc, 2810 .get_rxnfc = igb_get_rxnfc,
2644 .set_rxnfc = igb_set_rxnfc, 2811 .set_rxnfc = igb_set_rxnfc,
2645 .get_eee = igb_get_eee, 2812 .get_eee = igb_get_eee,
2646 .set_eee = igb_set_eee, 2813 .set_eee = igb_set_eee,
2814 .get_module_info = igb_get_module_info,
2815 .get_module_eeprom = igb_get_module_eeprom,
2647 .begin = igb_ethtool_begin, 2816 .begin = igb_ethtool_begin,
2648 .complete = igb_ethtool_complete, 2817 .complete = igb_ethtool_complete,
2649}; 2818};
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0478a1abe541..58f1ce967aeb 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -45,21 +45,21 @@ static struct i2c_board_info i350_sensor_info = {
45 45
46/* hwmon callback functions */ 46/* hwmon callback functions */
47static ssize_t igb_hwmon_show_location(struct device *dev, 47static ssize_t igb_hwmon_show_location(struct device *dev,
48 struct device_attribute *attr, 48 struct device_attribute *attr,
49 char *buf) 49 char *buf)
50{ 50{
51 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 51 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
52 dev_attr); 52 dev_attr);
53 return sprintf(buf, "loc%u\n", 53 return sprintf(buf, "loc%u\n",
54 igb_attr->sensor->location); 54 igb_attr->sensor->location);
55} 55}
56 56
57static ssize_t igb_hwmon_show_temp(struct device *dev, 57static ssize_t igb_hwmon_show_temp(struct device *dev,
58 struct device_attribute *attr, 58 struct device_attribute *attr,
59 char *buf) 59 char *buf)
60{ 60{
61 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 61 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
62 dev_attr); 62 dev_attr);
63 unsigned int value; 63 unsigned int value;
64 64
65 /* reset the temp field */ 65 /* reset the temp field */
@@ -74,11 +74,11 @@ static ssize_t igb_hwmon_show_temp(struct device *dev,
74} 74}
75 75
76static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, 76static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
77 struct device_attribute *attr, 77 struct device_attribute *attr,
78 char *buf) 78 char *buf)
79{ 79{
80 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 80 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
81 dev_attr); 81 dev_attr);
82 unsigned int value = igb_attr->sensor->caution_thresh; 82 unsigned int value = igb_attr->sensor->caution_thresh;
83 83
84 /* display millidegree */ 84 /* display millidegree */
@@ -88,11 +88,11 @@ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
88} 88}
89 89
90static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, 90static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
91 struct device_attribute *attr, 91 struct device_attribute *attr,
92 char *buf) 92 char *buf)
93{ 93{
94 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, 94 struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
95 dev_attr); 95 dev_attr);
96 unsigned int value = igb_attr->sensor->max_op_thresh; 96 unsigned int value = igb_attr->sensor->max_op_thresh;
97 97
98 /* display millidegree */ 98 /* display millidegree */
@@ -111,7 +111,8 @@ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
111 * the data structures we need to get the data to display. 111 * the data structures we need to get the data to display.
112 */ 112 */
113static int igb_add_hwmon_attr(struct igb_adapter *adapter, 113static int igb_add_hwmon_attr(struct igb_adapter *adapter,
114 unsigned int offset, int type) { 114 unsigned int offset, int type)
115{
115 int rc; 116 int rc;
116 unsigned int n_attr; 117 unsigned int n_attr;
117 struct hwmon_attr *igb_attr; 118 struct hwmon_attr *igb_attr;
@@ -217,7 +218,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
217 */ 218 */
218 n_attrs = E1000_MAX_SENSORS * 4; 219 n_attrs = E1000_MAX_SENSORS * 4;
219 igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), 220 igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
220 GFP_KERNEL); 221 GFP_KERNEL);
221 if (!igb_hwmon->hwmon_list) { 222 if (!igb_hwmon->hwmon_list) {
222 rc = -ENOMEM; 223 rc = -ENOMEM;
223 goto err; 224 goto err;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8496adfc6a68..38590252be64 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -77,6 +77,9 @@ static const struct e1000_info *igb_info_tbl[] = {
77}; 77};
78 78
79static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 79static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, 83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, 84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, 85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
@@ -169,6 +172,8 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
169static int igb_ndo_set_vf_vlan(struct net_device *netdev, 172static int igb_ndo_set_vf_vlan(struct net_device *netdev,
170 int vf, u16 vlan, u8 qos); 173 int vf, u16 vlan, u8 qos);
171static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 174static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
175static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
176 bool setting);
172static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 177static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
173 struct ifla_vf_info *ivi); 178 struct ifla_vf_info *ivi);
174static void igb_check_vf_rate_limit(struct igb_adapter *); 179static void igb_check_vf_rate_limit(struct igb_adapter *);
@@ -292,9 +297,7 @@ static const struct igb_reg_info igb_reg_info_tbl[] = {
292 {} 297 {}
293}; 298};
294 299
295/* 300/* igb_regdump - register printout routine */
296 * igb_regdump - register printout routine
297 */
298static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) 301static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
299{ 302{
300 int n = 0; 303 int n = 0;
@@ -360,9 +363,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
360 regs[2], regs[3]); 363 regs[2], regs[3]);
361} 364}
362 365
363/* 366/* igb_dump - Print registers, Tx-rings and Rx-rings */
364 * igb_dump - Print registers, tx-rings and rx-rings
365 */
366static void igb_dump(struct igb_adapter *adapter) 367static void igb_dump(struct igb_adapter *adapter)
367{ 368{
368 struct net_device *netdev = adapter->netdev; 369 struct net_device *netdev = adapter->netdev;
@@ -569,12 +570,13 @@ exit:
569 return; 570 return;
570} 571}
571 572
572/* igb_get_i2c_data - Reads the I2C SDA data bit 573/**
574 * igb_get_i2c_data - Reads the I2C SDA data bit
573 * @hw: pointer to hardware structure 575 * @hw: pointer to hardware structure
574 * @i2cctl: Current value of I2CCTL register 576 * @i2cctl: Current value of I2CCTL register
575 * 577 *
576 * Returns the I2C data bit value 578 * Returns the I2C data bit value
577 */ 579 **/
578static int igb_get_i2c_data(void *data) 580static int igb_get_i2c_data(void *data)
579{ 581{
580 struct igb_adapter *adapter = (struct igb_adapter *)data; 582 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -584,12 +586,13 @@ static int igb_get_i2c_data(void *data)
584 return ((i2cctl & E1000_I2C_DATA_IN) != 0); 586 return ((i2cctl & E1000_I2C_DATA_IN) != 0);
585} 587}
586 588
587/* igb_set_i2c_data - Sets the I2C data bit 589/**
590 * igb_set_i2c_data - Sets the I2C data bit
588 * @data: pointer to hardware structure 591 * @data: pointer to hardware structure
589 * @state: I2C data value (0 or 1) to set 592 * @state: I2C data value (0 or 1) to set
590 * 593 *
591 * Sets the I2C data bit 594 * Sets the I2C data bit
592 */ 595 **/
593static void igb_set_i2c_data(void *data, int state) 596static void igb_set_i2c_data(void *data, int state)
594{ 597{
595 struct igb_adapter *adapter = (struct igb_adapter *)data; 598 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -608,12 +611,13 @@ static void igb_set_i2c_data(void *data, int state)
608 611
609} 612}
610 613
611/* igb_set_i2c_clk - Sets the I2C SCL clock 614/**
615 * igb_set_i2c_clk - Sets the I2C SCL clock
612 * @data: pointer to hardware structure 616 * @data: pointer to hardware structure
613 * @state: state to set clock 617 * @state: state to set clock
614 * 618 *
615 * Sets the I2C clock line to state 619 * Sets the I2C clock line to state
616 */ 620 **/
617static void igb_set_i2c_clk(void *data, int state) 621static void igb_set_i2c_clk(void *data, int state)
618{ 622{
619 struct igb_adapter *adapter = (struct igb_adapter *)data; 623 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -631,11 +635,12 @@ static void igb_set_i2c_clk(void *data, int state)
631 wrfl(); 635 wrfl();
632} 636}
633 637
634/* igb_get_i2c_clk - Gets the I2C SCL clock state 638/**
639 * igb_get_i2c_clk - Gets the I2C SCL clock state
635 * @data: pointer to hardware structure 640 * @data: pointer to hardware structure
636 * 641 *
637 * Gets the I2C clock state 642 * Gets the I2C clock state
638 */ 643 **/
639static int igb_get_i2c_clk(void *data) 644static int igb_get_i2c_clk(void *data)
640{ 645{
641 struct igb_adapter *adapter = (struct igb_adapter *)data; 646 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -655,8 +660,10 @@ static const struct i2c_algo_bit_data igb_i2c_algo = {
655}; 660};
656 661
657/** 662/**
658 * igb_get_hw_dev - return device 663 * igb_get_hw_dev - return device
659 * used by hardware layer to print debugging information 664 * @hw: pointer to hardware structure
665 *
666 * used by hardware layer to print debugging information
660 **/ 667 **/
661struct net_device *igb_get_hw_dev(struct e1000_hw *hw) 668struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
662{ 669{
@@ -665,10 +672,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
665} 672}
666 673
667/** 674/**
668 * igb_init_module - Driver Registration Routine 675 * igb_init_module - Driver Registration Routine
669 * 676 *
670 * igb_init_module is the first routine called when the driver is 677 * igb_init_module is the first routine called when the driver is
671 * loaded. All it does is register with the PCI subsystem. 678 * loaded. All it does is register with the PCI subsystem.
672 **/ 679 **/
673static int __init igb_init_module(void) 680static int __init igb_init_module(void)
674{ 681{
@@ -688,10 +695,10 @@ static int __init igb_init_module(void)
688module_init(igb_init_module); 695module_init(igb_init_module);
689 696
690/** 697/**
691 * igb_exit_module - Driver Exit Cleanup Routine 698 * igb_exit_module - Driver Exit Cleanup Routine
692 * 699 *
693 * igb_exit_module is called just before the driver is removed 700 * igb_exit_module is called just before the driver is removed
694 * from memory. 701 * from memory.
695 **/ 702 **/
696static void __exit igb_exit_module(void) 703static void __exit igb_exit_module(void)
697{ 704{
@@ -705,11 +712,11 @@ module_exit(igb_exit_module);
705 712
706#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) 713#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
707/** 714/**
708 * igb_cache_ring_register - Descriptor ring to register mapping 715 * igb_cache_ring_register - Descriptor ring to register mapping
709 * @adapter: board private structure to initialize 716 * @adapter: board private structure to initialize
710 * 717 *
711 * Once we know the feature-set enabled for the device, we'll cache 718 * Once we know the feature-set enabled for the device, we'll cache
712 * the register offset the descriptor ring is assigned to. 719 * the register offset the descriptor ring is assigned to.
713 **/ 720 **/
714static void igb_cache_ring_register(struct igb_adapter *adapter) 721static void igb_cache_ring_register(struct igb_adapter *adapter)
715{ 722{
@@ -726,11 +733,12 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
726 if (adapter->vfs_allocated_count) { 733 if (adapter->vfs_allocated_count) {
727 for (; i < adapter->rss_queues; i++) 734 for (; i < adapter->rss_queues; i++)
728 adapter->rx_ring[i]->reg_idx = rbase_offset + 735 adapter->rx_ring[i]->reg_idx = rbase_offset +
729 Q_IDX_82576(i); 736 Q_IDX_82576(i);
730 } 737 }
731 case e1000_82575: 738 case e1000_82575:
732 case e1000_82580: 739 case e1000_82580:
733 case e1000_i350: 740 case e1000_i350:
741 case e1000_i354:
734 case e1000_i210: 742 case e1000_i210:
735 case e1000_i211: 743 case e1000_i211:
736 default: 744 default:
@@ -785,9 +793,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
785 switch (hw->mac.type) { 793 switch (hw->mac.type) {
786 case e1000_82575: 794 case e1000_82575:
787 /* The 82575 assigns vectors using a bitmask, which matches the 795 /* The 82575 assigns vectors using a bitmask, which matches the
788 bitmask for the EICR/EIMS/EIMC registers. To assign one 796 * bitmask for the EICR/EIMS/EIMC registers. To assign one
789 or more queues to a vector, we write the appropriate bits 797 * or more queues to a vector, we write the appropriate bits
790 into the MSIXBM register for that vector. */ 798 * into the MSIXBM register for that vector.
799 */
791 if (rx_queue > IGB_N0_QUEUE) 800 if (rx_queue > IGB_N0_QUEUE)
792 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 801 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
793 if (tx_queue > IGB_N0_QUEUE) 802 if (tx_queue > IGB_N0_QUEUE)
@@ -798,8 +807,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
798 q_vector->eims_value = msixbm; 807 q_vector->eims_value = msixbm;
799 break; 808 break;
800 case e1000_82576: 809 case e1000_82576:
801 /* 810 /* 82576 uses a table that essentially consists of 2 columns
802 * 82576 uses a table that essentially consists of 2 columns
803 * with 8 rows. The ordering is column-major so we use the 811 * with 8 rows. The ordering is column-major so we use the
804 * lower 3 bits as the row index, and the 4th bit as the 812 * lower 3 bits as the row index, and the 4th bit as the
805 * column offset. 813 * column offset.
@@ -816,10 +824,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
816 break; 824 break;
817 case e1000_82580: 825 case e1000_82580:
818 case e1000_i350: 826 case e1000_i350:
827 case e1000_i354:
819 case e1000_i210: 828 case e1000_i210:
820 case e1000_i211: 829 case e1000_i211:
821 /* 830 /* On 82580 and newer adapters the scheme is similar to 82576
822 * On 82580 and newer adapters the scheme is similar to 82576
823 * however instead of ordering column-major we have things 831 * however instead of ordering column-major we have things
824 * ordered row-major. So we traverse the table by using 832 * ordered row-major. So we traverse the table by using
825 * bit 0 as the column offset, and the remaining bits as the 833 * bit 0 as the column offset, and the remaining bits as the
@@ -848,10 +856,11 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
848} 856}
849 857
850/** 858/**
851 * igb_configure_msix - Configure MSI-X hardware 859 * igb_configure_msix - Configure MSI-X hardware
860 * @adapter: board private structure to initialize
852 * 861 *
853 * igb_configure_msix sets up the hardware to properly 862 * igb_configure_msix sets up the hardware to properly
854 * generate MSI-X interrupts. 863 * generate MSI-X interrupts.
855 **/ 864 **/
856static void igb_configure_msix(struct igb_adapter *adapter) 865static void igb_configure_msix(struct igb_adapter *adapter)
857{ 866{
@@ -875,8 +884,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
875 wr32(E1000_CTRL_EXT, tmp); 884 wr32(E1000_CTRL_EXT, tmp);
876 885
877 /* enable msix_other interrupt */ 886 /* enable msix_other interrupt */
878 array_wr32(E1000_MSIXBM(0), vector++, 887 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
879 E1000_EIMS_OTHER);
880 adapter->eims_other = E1000_EIMS_OTHER; 888 adapter->eims_other = E1000_EIMS_OTHER;
881 889
882 break; 890 break;
@@ -884,13 +892,15 @@ static void igb_configure_msix(struct igb_adapter *adapter)
884 case e1000_82576: 892 case e1000_82576:
885 case e1000_82580: 893 case e1000_82580:
886 case e1000_i350: 894 case e1000_i350:
895 case e1000_i354:
887 case e1000_i210: 896 case e1000_i210:
888 case e1000_i211: 897 case e1000_i211:
889 /* Turn on MSI-X capability first, or our settings 898 /* Turn on MSI-X capability first, or our settings
890 * won't stick. And it will take days to debug. */ 899 * won't stick. And it will take days to debug.
900 */
891 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 901 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
892 E1000_GPIE_PBA | E1000_GPIE_EIAME | 902 E1000_GPIE_PBA | E1000_GPIE_EIAME |
893 E1000_GPIE_NSICR); 903 E1000_GPIE_NSICR);
894 904
895 /* enable msix_other interrupt */ 905 /* enable msix_other interrupt */
896 adapter->eims_other = 1 << vector; 906 adapter->eims_other = 1 << vector;
@@ -912,10 +922,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
912} 922}
913 923
914/** 924/**
915 * igb_request_msix - Initialize MSI-X interrupts 925 * igb_request_msix - Initialize MSI-X interrupts
926 * @adapter: board private structure to initialize
916 * 927 *
917 * igb_request_msix allocates MSI-X vectors and requests interrupts from the 928 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
918 * kernel. 929 * kernel.
919 **/ 930 **/
920static int igb_request_msix(struct igb_adapter *adapter) 931static int igb_request_msix(struct igb_adapter *adapter)
921{ 932{
@@ -924,7 +935,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
924 int i, err = 0, vector = 0, free_vector = 0; 935 int i, err = 0, vector = 0, free_vector = 0;
925 936
926 err = request_irq(adapter->msix_entries[vector].vector, 937 err = request_irq(adapter->msix_entries[vector].vector,
927 igb_msix_other, 0, netdev->name, adapter); 938 igb_msix_other, 0, netdev->name, adapter);
928 if (err) 939 if (err)
929 goto err_out; 940 goto err_out;
930 941
@@ -948,8 +959,8 @@ static int igb_request_msix(struct igb_adapter *adapter)
948 sprintf(q_vector->name, "%s-unused", netdev->name); 959 sprintf(q_vector->name, "%s-unused", netdev->name);
949 960
950 err = request_irq(adapter->msix_entries[vector].vector, 961 err = request_irq(adapter->msix_entries[vector].vector,
951 igb_msix_ring, 0, q_vector->name, 962 igb_msix_ring, 0, q_vector->name,
952 q_vector); 963 q_vector);
953 if (err) 964 if (err)
954 goto err_free; 965 goto err_free;
955 } 966 }
@@ -982,13 +993,13 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
982} 993}
983 994
984/** 995/**
985 * igb_free_q_vector - Free memory allocated for specific interrupt vector 996 * igb_free_q_vector - Free memory allocated for specific interrupt vector
986 * @adapter: board private structure to initialize 997 * @adapter: board private structure to initialize
987 * @v_idx: Index of vector to be freed 998 * @v_idx: Index of vector to be freed
988 * 999 *
989 * This function frees the memory allocated to the q_vector. In addition if 1000 * This function frees the memory allocated to the q_vector. In addition if
990 * NAPI is enabled it will delete any references to the NAPI struct prior 1001 * NAPI is enabled it will delete any references to the NAPI struct prior
991 * to freeing the q_vector. 1002 * to freeing the q_vector.
992 **/ 1003 **/
993static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) 1004static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
994{ 1005{
@@ -1003,20 +1014,19 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1003 adapter->q_vector[v_idx] = NULL; 1014 adapter->q_vector[v_idx] = NULL;
1004 netif_napi_del(&q_vector->napi); 1015 netif_napi_del(&q_vector->napi);
1005 1016
1006 /* 1017 /* ixgbe_get_stats64() might access the rings on this vector,
1007 * ixgbe_get_stats64() might access the rings on this vector,
1008 * we must wait a grace period before freeing it. 1018 * we must wait a grace period before freeing it.
1009 */ 1019 */
1010 kfree_rcu(q_vector, rcu); 1020 kfree_rcu(q_vector, rcu);
1011} 1021}
1012 1022
1013/** 1023/**
1014 * igb_free_q_vectors - Free memory allocated for interrupt vectors 1024 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1015 * @adapter: board private structure to initialize 1025 * @adapter: board private structure to initialize
1016 * 1026 *
1017 * This function frees the memory allocated to the q_vectors. In addition if 1027 * This function frees the memory allocated to the q_vectors. In addition if
1018 * NAPI is enabled it will delete any references to the NAPI struct prior 1028 * NAPI is enabled it will delete any references to the NAPI struct prior
1019 * to freeing the q_vector. 1029 * to freeing the q_vector.
1020 **/ 1030 **/
1021static void igb_free_q_vectors(struct igb_adapter *adapter) 1031static void igb_free_q_vectors(struct igb_adapter *adapter)
1022{ 1032{
@@ -1031,10 +1041,11 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
1031} 1041}
1032 1042
1033/** 1043/**
1034 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts 1044 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1045 * @adapter: board private structure to initialize
1035 * 1046 *
1036 * This function resets the device so that it has 0 rx queues, tx queues, and 1047 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1037 * MSI-X interrupts allocated. 1048 * MSI-X interrupts allocated.
1038 */ 1049 */
1039static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) 1050static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1040{ 1051{
@@ -1043,10 +1054,12 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1043} 1054}
1044 1055
1045/** 1056/**
1046 * igb_set_interrupt_capability - set MSI or MSI-X if supported 1057 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1058 * @adapter: board private structure to initialize
1059 * @msix: boolean value of MSIX capability
1047 * 1060 *
1048 * Attempt to configure interrupts using the best available 1061 * Attempt to configure interrupts using the best available
1049 * capabilities of the hardware and kernel. 1062 * capabilities of the hardware and kernel.
1050 **/ 1063 **/
1051static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) 1064static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1052{ 1065{
@@ -1063,10 +1076,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1063 else 1076 else
1064 adapter->num_tx_queues = adapter->rss_queues; 1077 adapter->num_tx_queues = adapter->rss_queues;
1065 1078
1066 /* start with one vector for every rx queue */ 1079 /* start with one vector for every Rx queue */
1067 numvecs = adapter->num_rx_queues; 1080 numvecs = adapter->num_rx_queues;
1068 1081
1069 /* if tx handler is separate add 1 for every tx queue */ 1082 /* if Tx handler is separate add 1 for every Tx queue */
1070 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1083 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1071 numvecs += adapter->num_tx_queues; 1084 numvecs += adapter->num_tx_queues;
1072 1085
@@ -1128,16 +1141,16 @@ static void igb_add_ring(struct igb_ring *ring,
1128} 1141}
1129 1142
1130/** 1143/**
1131 * igb_alloc_q_vector - Allocate memory for a single interrupt vector 1144 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1132 * @adapter: board private structure to initialize 1145 * @adapter: board private structure to initialize
1133 * @v_count: q_vectors allocated on adapter, used for ring interleaving 1146 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1134 * @v_idx: index of vector in adapter struct 1147 * @v_idx: index of vector in adapter struct
1135 * @txr_count: total number of Tx rings to allocate 1148 * @txr_count: total number of Tx rings to allocate
1136 * @txr_idx: index of first Tx ring to allocate 1149 * @txr_idx: index of first Tx ring to allocate
1137 * @rxr_count: total number of Rx rings to allocate 1150 * @rxr_count: total number of Rx rings to allocate
1138 * @rxr_idx: index of first Rx ring to allocate 1151 * @rxr_idx: index of first Rx ring to allocate
1139 * 1152 *
1140 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1153 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1141 **/ 1154 **/
1142static int igb_alloc_q_vector(struct igb_adapter *adapter, 1155static int igb_alloc_q_vector(struct igb_adapter *adapter,
1143 int v_count, int v_idx, 1156 int v_count, int v_idx,
@@ -1179,6 +1192,17 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1179 /* initialize pointer to rings */ 1192 /* initialize pointer to rings */
1180 ring = q_vector->ring; 1193 ring = q_vector->ring;
1181 1194
1195 /* intialize ITR */
1196 if (rxr_count) {
1197 /* rx or rx/tx vector */
1198 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1199 q_vector->itr_val = adapter->rx_itr_setting;
1200 } else {
1201 /* tx only vector */
1202 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1203 q_vector->itr_val = adapter->tx_itr_setting;
1204 }
1205
1182 if (txr_count) { 1206 if (txr_count) {
1183 /* assign generic ring traits */ 1207 /* assign generic ring traits */
1184 ring->dev = &adapter->pdev->dev; 1208 ring->dev = &adapter->pdev->dev;
@@ -1221,9 +1245,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1221 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1245 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1222 1246
1223 /* 1247 /*
1224 * On i350, i210, and i211, loopback VLAN packets 1248 * On i350, i354, i210, and i211, loopback VLAN packets
1225 * have the tag byte-swapped. 1249 * have the tag byte-swapped.
1226 * */ 1250 */
1227 if (adapter->hw.mac.type >= e1000_i350) 1251 if (adapter->hw.mac.type >= e1000_i350)
1228 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); 1252 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1229 1253
@@ -1240,11 +1264,11 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1240 1264
1241 1265
1242/** 1266/**
1243 * igb_alloc_q_vectors - Allocate memory for interrupt vectors 1267 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1244 * @adapter: board private structure to initialize 1268 * @adapter: board private structure to initialize
1245 * 1269 *
1246 * We allocate one q_vector per queue interrupt. If allocation fails we 1270 * We allocate one q_vector per queue interrupt. If allocation fails we
1247 * return -ENOMEM. 1271 * return -ENOMEM.
1248 **/ 1272 **/
1249static int igb_alloc_q_vectors(struct igb_adapter *adapter) 1273static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1250{ 1274{
@@ -1298,9 +1322,11 @@ err_out:
1298} 1322}
1299 1323
1300/** 1324/**
1301 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 1325 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1326 * @adapter: board private structure to initialize
1327 * @msix: boolean value of MSIX capability
1302 * 1328 *
1303 * This function initializes the interrupts and allocates all of the queues. 1329 * This function initializes the interrupts and allocates all of the queues.
1304 **/ 1330 **/
1305static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) 1331static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1306{ 1332{
@@ -1325,10 +1351,11 @@ err_alloc_q_vectors:
1325} 1351}
1326 1352
1327/** 1353/**
1328 * igb_request_irq - initialize interrupts 1354 * igb_request_irq - initialize interrupts
1355 * @adapter: board private structure to initialize
1329 * 1356 *
1330 * Attempts to configure interrupts using the best available 1357 * Attempts to configure interrupts using the best available
1331 * capabilities of the hardware and kernel. 1358 * capabilities of the hardware and kernel.
1332 **/ 1359 **/
1333static int igb_request_irq(struct igb_adapter *adapter) 1360static int igb_request_irq(struct igb_adapter *adapter)
1334{ 1361{
@@ -1394,15 +1421,14 @@ static void igb_free_irq(struct igb_adapter *adapter)
1394} 1421}
1395 1422
1396/** 1423/**
1397 * igb_irq_disable - Mask off interrupt generation on the NIC 1424 * igb_irq_disable - Mask off interrupt generation on the NIC
1398 * @adapter: board private structure 1425 * @adapter: board private structure
1399 **/ 1426 **/
1400static void igb_irq_disable(struct igb_adapter *adapter) 1427static void igb_irq_disable(struct igb_adapter *adapter)
1401{ 1428{
1402 struct e1000_hw *hw = &adapter->hw; 1429 struct e1000_hw *hw = &adapter->hw;
1403 1430
1404 /* 1431 /* we need to be careful when disabling interrupts. The VFs are also
1405 * we need to be careful when disabling interrupts. The VFs are also
1406 * mapped into these registers and so clearing the bits can cause 1432 * mapped into these registers and so clearing the bits can cause
1407 * issues on the VF drivers so we only need to clear what we set 1433 * issues on the VF drivers so we only need to clear what we set
1408 */ 1434 */
@@ -1427,8 +1453,8 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1427} 1453}
1428 1454
1429/** 1455/**
1430 * igb_irq_enable - Enable default interrupt generation settings 1456 * igb_irq_enable - Enable default interrupt generation settings
1431 * @adapter: board private structure 1457 * @adapter: board private structure
1432 **/ 1458 **/
1433static void igb_irq_enable(struct igb_adapter *adapter) 1459static void igb_irq_enable(struct igb_adapter *adapter)
1434{ 1460{
@@ -1477,13 +1503,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
1477} 1503}
1478 1504
1479/** 1505/**
1480 * igb_release_hw_control - release control of the h/w to f/w 1506 * igb_release_hw_control - release control of the h/w to f/w
1481 * @adapter: address of board private structure 1507 * @adapter: address of board private structure
1482 *
1483 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1484 * For ASF and Pass Through versions of f/w this means that the
1485 * driver is no longer loaded.
1486 * 1508 *
1509 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1510 * For ASF and Pass Through versions of f/w this means that the
1511 * driver is no longer loaded.
1487 **/ 1512 **/
1488static void igb_release_hw_control(struct igb_adapter *adapter) 1513static void igb_release_hw_control(struct igb_adapter *adapter)
1489{ 1514{
@@ -1497,13 +1522,12 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
1497} 1522}
1498 1523
1499/** 1524/**
1500 * igb_get_hw_control - get control of the h/w from f/w 1525 * igb_get_hw_control - get control of the h/w from f/w
1501 * @adapter: address of board private structure 1526 * @adapter: address of board private structure
1502 *
1503 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1504 * For ASF and Pass Through versions of f/w this means that
1505 * the driver is loaded.
1506 * 1527 *
1528 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1529 * For ASF and Pass Through versions of f/w this means that
1530 * the driver is loaded.
1507 **/ 1531 **/
1508static void igb_get_hw_control(struct igb_adapter *adapter) 1532static void igb_get_hw_control(struct igb_adapter *adapter)
1509{ 1533{
@@ -1517,8 +1541,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
1517} 1541}
1518 1542
1519/** 1543/**
1520 * igb_configure - configure the hardware for RX and TX 1544 * igb_configure - configure the hardware for RX and TX
1521 * @adapter: private board structure 1545 * @adapter: private board structure
1522 **/ 1546 **/
1523static void igb_configure(struct igb_adapter *adapter) 1547static void igb_configure(struct igb_adapter *adapter)
1524{ 1548{
@@ -1541,7 +1565,8 @@ static void igb_configure(struct igb_adapter *adapter)
1541 1565
1542 /* call igb_desc_unused which always leaves 1566 /* call igb_desc_unused which always leaves
1543 * at least 1 descriptor unused to make sure 1567 * at least 1 descriptor unused to make sure
1544 * next_to_use != next_to_clean */ 1568 * next_to_use != next_to_clean
1569 */
1545 for (i = 0; i < adapter->num_rx_queues; i++) { 1570 for (i = 0; i < adapter->num_rx_queues; i++) {
1546 struct igb_ring *ring = adapter->rx_ring[i]; 1571 struct igb_ring *ring = adapter->rx_ring[i];
1547 igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); 1572 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
@@ -1549,8 +1574,8 @@ static void igb_configure(struct igb_adapter *adapter)
1549} 1574}
1550 1575
1551/** 1576/**
1552 * igb_power_up_link - Power up the phy/serdes link 1577 * igb_power_up_link - Power up the phy/serdes link
1553 * @adapter: address of board private structure 1578 * @adapter: address of board private structure
1554 **/ 1579 **/
1555void igb_power_up_link(struct igb_adapter *adapter) 1580void igb_power_up_link(struct igb_adapter *adapter)
1556{ 1581{
@@ -1563,8 +1588,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
1563} 1588}
1564 1589
1565/** 1590/**
1566 * igb_power_down_link - Power down the phy/serdes link 1591 * igb_power_down_link - Power down the phy/serdes link
1567 * @adapter: address of board private structure 1592 * @adapter: address of board private structure
1568 */ 1593 */
1569static void igb_power_down_link(struct igb_adapter *adapter) 1594static void igb_power_down_link(struct igb_adapter *adapter)
1570{ 1595{
@@ -1575,8 +1600,8 @@ static void igb_power_down_link(struct igb_adapter *adapter)
1575} 1600}
1576 1601
1577/** 1602/**
1578 * igb_up - Open the interface and prepare it to handle traffic 1603 * igb_up - Open the interface and prepare it to handle traffic
1579 * @adapter: board private structure 1604 * @adapter: board private structure
1580 **/ 1605 **/
1581int igb_up(struct igb_adapter *adapter) 1606int igb_up(struct igb_adapter *adapter)
1582{ 1607{
@@ -1624,7 +1649,8 @@ void igb_down(struct igb_adapter *adapter)
1624 int i; 1649 int i;
1625 1650
1626 /* signal that we're down so the interrupt handler does not 1651 /* signal that we're down so the interrupt handler does not
1627 * reschedule our watchdog timer */ 1652 * reschedule our watchdog timer
1653 */
1628 set_bit(__IGB_DOWN, &adapter->state); 1654 set_bit(__IGB_DOWN, &adapter->state);
1629 1655
1630 /* disable receives in the hardware */ 1656 /* disable receives in the hardware */
@@ -1694,6 +1720,7 @@ void igb_reset(struct igb_adapter *adapter)
1694 */ 1720 */
1695 switch (mac->type) { 1721 switch (mac->type) {
1696 case e1000_i350: 1722 case e1000_i350:
1723 case e1000_i354:
1697 case e1000_82580: 1724 case e1000_82580:
1698 pba = rd32(E1000_RXPBS); 1725 pba = rd32(E1000_RXPBS);
1699 pba = igb_rxpbs_adjust_82580(pba); 1726 pba = igb_rxpbs_adjust_82580(pba);
@@ -1720,14 +1747,16 @@ void igb_reset(struct igb_adapter *adapter)
1720 * rounded up to the next 1KB and expressed in KB. Likewise, 1747 * rounded up to the next 1KB and expressed in KB. Likewise,
1721 * the Rx FIFO should be large enough to accommodate at least 1748 * the Rx FIFO should be large enough to accommodate at least
1722 * one full receive packet and is similarly rounded up and 1749 * one full receive packet and is similarly rounded up and
1723 * expressed in KB. */ 1750 * expressed in KB.
1751 */
1724 pba = rd32(E1000_PBA); 1752 pba = rd32(E1000_PBA);
1725 /* upper 16 bits has Tx packet buffer allocation size in KB */ 1753 /* upper 16 bits has Tx packet buffer allocation size in KB */
1726 tx_space = pba >> 16; 1754 tx_space = pba >> 16;
1727 /* lower 16 bits has Rx packet buffer allocation size in KB */ 1755 /* lower 16 bits has Rx packet buffer allocation size in KB */
1728 pba &= 0xffff; 1756 pba &= 0xffff;
1729 /* the tx fifo also stores 16 bytes of information about the tx 1757 /* the Tx fifo also stores 16 bytes of information about the Tx
1730 * but don't include ethernet FCS because hardware appends it */ 1758 * but don't include ethernet FCS because hardware appends it
1759 */
1731 min_tx_space = (adapter->max_frame_size + 1760 min_tx_space = (adapter->max_frame_size +
1732 sizeof(union e1000_adv_tx_desc) - 1761 sizeof(union e1000_adv_tx_desc) -
1733 ETH_FCS_LEN) * 2; 1762 ETH_FCS_LEN) * 2;
@@ -1740,13 +1769,15 @@ void igb_reset(struct igb_adapter *adapter)
1740 1769
1741 /* If current Tx allocation is less than the min Tx FIFO size, 1770 /* If current Tx allocation is less than the min Tx FIFO size,
1742 * and the min Tx FIFO size is less than the current Rx FIFO 1771 * and the min Tx FIFO size is less than the current Rx FIFO
1743 * allocation, take space away from current Rx allocation */ 1772 * allocation, take space away from current Rx allocation
1773 */
1744 if (tx_space < min_tx_space && 1774 if (tx_space < min_tx_space &&
1745 ((min_tx_space - tx_space) < pba)) { 1775 ((min_tx_space - tx_space) < pba)) {
1746 pba = pba - (min_tx_space - tx_space); 1776 pba = pba - (min_tx_space - tx_space);
1747 1777
1748 /* if short on rx space, rx wins and must trump tx 1778 /* if short on Rx space, Rx wins and must trump Tx
1749 * adjustment */ 1779 * adjustment
1780 */
1750 if (pba < min_rx_space) 1781 if (pba < min_rx_space)
1751 pba = min_rx_space; 1782 pba = min_rx_space;
1752 } 1783 }
@@ -1758,7 +1789,8 @@ void igb_reset(struct igb_adapter *adapter)
1758 * (or the size used for early receive) above it in the Rx FIFO. 1789 * (or the size used for early receive) above it in the Rx FIFO.
1759 * Set it to the lower of: 1790 * Set it to the lower of:
1760 * - 90% of the Rx FIFO size, or 1791 * - 90% of the Rx FIFO size, or
1761 * - the full Rx FIFO size minus one full frame */ 1792 * - the full Rx FIFO size minus one full frame
1793 */
1762 hwm = min(((pba << 10) * 9 / 10), 1794 hwm = min(((pba << 10) * 9 / 10),
1763 ((pba << 10) - 2 * adapter->max_frame_size)); 1795 ((pba << 10) - 2 * adapter->max_frame_size));
1764 1796
@@ -1789,8 +1821,7 @@ void igb_reset(struct igb_adapter *adapter)
1789 if (hw->mac.ops.init_hw(hw)) 1821 if (hw->mac.ops.init_hw(hw))
1790 dev_err(&pdev->dev, "Hardware Error\n"); 1822 dev_err(&pdev->dev, "Hardware Error\n");
1791 1823
1792 /* 1824 /* Flow control settings reset on hardware reset, so guarantee flow
1793 * Flow control settings reset on hardware reset, so guarantee flow
1794 * control is off when forcing speed. 1825 * control is off when forcing speed.
1795 */ 1826 */
1796 if (!hw->mac.autoneg) 1827 if (!hw->mac.autoneg)
@@ -1826,9 +1857,8 @@ void igb_reset(struct igb_adapter *adapter)
1826static netdev_features_t igb_fix_features(struct net_device *netdev, 1857static netdev_features_t igb_fix_features(struct net_device *netdev,
1827 netdev_features_t features) 1858 netdev_features_t features)
1828{ 1859{
1829 /* 1860 /* Since there is no support for separate Rx/Tx vlan accel
1830 * Since there is no support for separate rx/tx vlan accel 1861 * enable/disable make sure Tx flag is always in same state as Rx.
1831 * enable/disable make sure tx flag is always in same state as rx.
1832 */ 1862 */
1833 if (features & NETIF_F_HW_VLAN_RX) 1863 if (features & NETIF_F_HW_VLAN_RX)
1834 features |= NETIF_F_HW_VLAN_TX; 1864 features |= NETIF_F_HW_VLAN_TX;
@@ -1876,6 +1906,7 @@ static const struct net_device_ops igb_netdev_ops = {
1876 .ndo_set_vf_mac = igb_ndo_set_vf_mac, 1906 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1877 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, 1907 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1878 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, 1908 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1909 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
1879 .ndo_get_vf_config = igb_ndo_get_vf_config, 1910 .ndo_get_vf_config = igb_ndo_get_vf_config,
1880#ifdef CONFIG_NET_POLL_CONTROLLER 1911#ifdef CONFIG_NET_POLL_CONTROLLER
1881 .ndo_poll_controller = igb_netpoll, 1912 .ndo_poll_controller = igb_netpoll,
@@ -1887,7 +1918,6 @@ static const struct net_device_ops igb_netdev_ops = {
1887/** 1918/**
1888 * igb_set_fw_version - Configure version string for ethtool 1919 * igb_set_fw_version - Configure version string for ethtool
1889 * @adapter: adapter struct 1920 * @adapter: adapter struct
1890 *
1891 **/ 1921 **/
1892void igb_set_fw_version(struct igb_adapter *adapter) 1922void igb_set_fw_version(struct igb_adapter *adapter)
1893{ 1923{
@@ -1923,10 +1953,10 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1923 return; 1953 return;
1924} 1954}
1925 1955
1926/* igb_init_i2c - Init I2C interface 1956/**
1957 * igb_init_i2c - Init I2C interface
1927 * @adapter: pointer to adapter structure 1958 * @adapter: pointer to adapter structure
1928 * 1959 **/
1929 */
1930static s32 igb_init_i2c(struct igb_adapter *adapter) 1960static s32 igb_init_i2c(struct igb_adapter *adapter)
1931{ 1961{
1932 s32 status = E1000_SUCCESS; 1962 s32 status = E1000_SUCCESS;
@@ -1951,15 +1981,15 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
1951} 1981}
1952 1982
1953/** 1983/**
1954 * igb_probe - Device Initialization Routine 1984 * igb_probe - Device Initialization Routine
1955 * @pdev: PCI device information struct 1985 * @pdev: PCI device information struct
1956 * @ent: entry in igb_pci_tbl 1986 * @ent: entry in igb_pci_tbl
1957 * 1987 *
1958 * Returns 0 on success, negative on failure 1988 * Returns 0 on success, negative on failure
1959 * 1989 *
1960 * igb_probe initializes an adapter identified by a pci_dev structure. 1990 * igb_probe initializes an adapter identified by a pci_dev structure.
1961 * The OS initialization, configuring of the adapter private structure, 1991 * The OS initialization, configuring of the adapter private structure,
1962 * and a hardware reset occur. 1992 * and a hardware reset occur.
1963 **/ 1993 **/
1964static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1994static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1965{ 1995{
@@ -1996,18 +2026,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1996 } else { 2026 } else {
1997 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2027 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1998 if (err) { 2028 if (err) {
1999 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2029 err = dma_set_coherent_mask(&pdev->dev,
2030 DMA_BIT_MASK(32));
2000 if (err) { 2031 if (err) {
2001 dev_err(&pdev->dev, "No usable DMA " 2032 dev_err(&pdev->dev,
2002 "configuration, aborting\n"); 2033 "No usable DMA configuration, aborting\n");
2003 goto err_dma; 2034 goto err_dma;
2004 } 2035 }
2005 } 2036 }
2006 } 2037 }
2007 2038
2008 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 2039 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
2009 IORESOURCE_MEM), 2040 IORESOURCE_MEM),
2010 igb_driver_name); 2041 igb_driver_name);
2011 if (err) 2042 if (err)
2012 goto err_pci_reg; 2043 goto err_pci_reg;
2013 2044
@@ -2085,8 +2116,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2085 dev_info(&pdev->dev, 2116 dev_info(&pdev->dev,
2086 "PHY reset is blocked due to SOL/IDER session.\n"); 2117 "PHY reset is blocked due to SOL/IDER session.\n");
2087 2118
2088 /* 2119 /* features is initialized to 0 in allocation, it might have bits
2089 * features is initialized to 0 in allocation, it might have bits
2090 * set by igb_sw_init so we should use an or instead of an 2120 * set by igb_sw_init so we should use an or instead of an
2091 * assignment. 2121 * assignment.
2092 */ 2122 */
@@ -2130,11 +2160,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2130 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); 2160 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2131 2161
2132 /* before reading the NVM, reset the controller to put the device in a 2162 /* before reading the NVM, reset the controller to put the device in a
2133 * known good starting state */ 2163 * known good starting state
2164 */
2134 hw->mac.ops.reset_hw(hw); 2165 hw->mac.ops.reset_hw(hw);
2135 2166
2136 /* 2167 /* make sure the NVM is good , i211 parts have special NVM that
2137 * make sure the NVM is good , i211 parts have special NVM that
2138 * doesn't contain a checksum 2168 * doesn't contain a checksum
2139 */ 2169 */
2140 if (hw->mac.type != e1000_i211) { 2170 if (hw->mac.type != e1000_i211) {
@@ -2161,9 +2191,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2161 igb_set_fw_version(adapter); 2191 igb_set_fw_version(adapter);
2162 2192
2163 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2193 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2164 (unsigned long) adapter); 2194 (unsigned long) adapter);
2165 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2195 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2166 (unsigned long) adapter); 2196 (unsigned long) adapter);
2167 2197
2168 INIT_WORK(&adapter->reset_task, igb_reset_task); 2198 INIT_WORK(&adapter->reset_task, igb_reset_task);
2169 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 2199 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -2185,8 +2215,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2185 /* Check the NVM for wake support on non-port A ports */ 2215 /* Check the NVM for wake support on non-port A ports */
2186 if (hw->mac.type >= e1000_82580) 2216 if (hw->mac.type >= e1000_82580)
2187 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2217 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2188 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2218 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2189 &eeprom_data); 2219 &eeprom_data);
2190 else if (hw->bus.func == 1) 2220 else if (hw->bus.func == 1)
2191 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 2221 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2192 2222
@@ -2195,7 +2225,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2195 2225
2196 /* now that we have the eeprom settings, apply the special cases where 2226 /* now that we have the eeprom settings, apply the special cases where
2197 * the eeprom may be wrong or the board simply won't support wake on 2227 * the eeprom may be wrong or the board simply won't support wake on
2198 * lan on a particular port */ 2228 * lan on a particular port
2229 */
2199 switch (pdev->device) { 2230 switch (pdev->device) {
2200 case E1000_DEV_ID_82575GB_QUAD_COPPER: 2231 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2201 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 2232 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
@@ -2204,7 +2235,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2204 case E1000_DEV_ID_82576_FIBER: 2235 case E1000_DEV_ID_82576_FIBER:
2205 case E1000_DEV_ID_82576_SERDES: 2236 case E1000_DEV_ID_82576_SERDES:
2206 /* Wake events only supported on port A for dual fiber 2237 /* Wake events only supported on port A for dual fiber
2207 * regardless of eeprom setting */ 2238 * regardless of eeprom setting
2239 */
2208 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 2240 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2209 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 2241 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2210 break; 2242 break;
@@ -2274,8 +2306,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2274 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { 2306 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2275 u16 ets_word; 2307 u16 ets_word;
2276 2308
2277 /* 2309 /* Read the NVM to determine if this i350 device supports an
2278 * Read the NVM to determine if this i350 device supports an
2279 * external thermal sensor. 2310 * external thermal sensor.
2280 */ 2311 */
2281 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); 2312 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
@@ -2294,17 +2325,20 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2294 igb_ptp_init(adapter); 2325 igb_ptp_init(adapter);
2295 2326
2296 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2327 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2297 /* print bus type/speed/width info */ 2328 /* print bus type/speed/width info, not applicable to i354 */
2298 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2329 if (hw->mac.type != e1000_i354) {
2299 netdev->name, 2330 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2300 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 2331 netdev->name,
2301 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : 2332 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2302 "unknown"), 2333 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2303 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 2334 "unknown"),
2304 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 2335 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
2305 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 2336 "Width x4" :
2306 "unknown"), 2337 (hw->bus.width == e1000_bus_width_pcie_x2) ?
2307 netdev->dev_addr); 2338 "Width x2" :
2339 (hw->bus.width == e1000_bus_width_pcie_x1) ?
2340 "Width x1" : "unknown"), netdev->dev_addr);
2341 }
2308 2342
2309 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); 2343 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2310 if (ret_val) 2344 if (ret_val)
@@ -2321,6 +2355,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2321 case e1000_i211: 2355 case e1000_i211:
2322 igb_set_eee_i350(hw); 2356 igb_set_eee_i350(hw);
2323 break; 2357 break;
2358 case e1000_i354:
2359 if (hw->phy.media_type == e1000_media_type_copper) {
2360 if ((rd32(E1000_CTRL_EXT) &
2361 E1000_CTRL_EXT_LINK_MODE_SGMII))
2362 igb_set_eee_i354(hw);
2363 }
2364 break;
2324 default: 2365 default:
2325 break; 2366 break;
2326 } 2367 }
@@ -2344,7 +2385,7 @@ err_ioremap:
2344 free_netdev(netdev); 2385 free_netdev(netdev);
2345err_alloc_etherdev: 2386err_alloc_etherdev:
2346 pci_release_selected_regions(pdev, 2387 pci_release_selected_regions(pdev,
2347 pci_select_bars(pdev, IORESOURCE_MEM)); 2388 pci_select_bars(pdev, IORESOURCE_MEM));
2348err_pci_reg: 2389err_pci_reg:
2349err_dma: 2390err_dma:
2350 pci_disable_device(pdev); 2391 pci_disable_device(pdev);
@@ -2444,26 +2485,24 @@ out:
2444} 2485}
2445 2486
2446#endif 2487#endif
2447/* 2488/**
2448 * igb_remove_i2c - Cleanup I2C interface 2489 * igb_remove_i2c - Cleanup I2C interface
2449 * @adapter: pointer to adapter structure 2490 * @adapter: pointer to adapter structure
2450 * 2491 **/
2451 */
2452static void igb_remove_i2c(struct igb_adapter *adapter) 2492static void igb_remove_i2c(struct igb_adapter *adapter)
2453{ 2493{
2454
2455 /* free the adapter bus structure */ 2494 /* free the adapter bus structure */
2456 i2c_del_adapter(&adapter->i2c_adap); 2495 i2c_del_adapter(&adapter->i2c_adap);
2457} 2496}
2458 2497
2459/** 2498/**
2460 * igb_remove - Device Removal Routine 2499 * igb_remove - Device Removal Routine
2461 * @pdev: PCI device information struct 2500 * @pdev: PCI device information struct
2462 * 2501 *
2463 * igb_remove is called by the PCI subsystem to alert the driver 2502 * igb_remove is called by the PCI subsystem to alert the driver
2464 * that it should release a PCI device. The could be caused by a 2503 * that it should release a PCI device. The could be caused by a
2465 * Hot-Plug event, or because the driver is going to be removed from 2504 * Hot-Plug event, or because the driver is going to be removed from
2466 * memory. 2505 * memory.
2467 **/ 2506 **/
2468static void igb_remove(struct pci_dev *pdev) 2507static void igb_remove(struct pci_dev *pdev)
2469{ 2508{
@@ -2477,8 +2516,7 @@ static void igb_remove(struct pci_dev *pdev)
2477#endif 2516#endif
2478 igb_remove_i2c(adapter); 2517 igb_remove_i2c(adapter);
2479 igb_ptp_stop(adapter); 2518 igb_ptp_stop(adapter);
2480 /* 2519 /* The watchdog timer may be rescheduled, so explicitly
2481 * The watchdog timer may be rescheduled, so explicitly
2482 * disable watchdog from being rescheduled. 2520 * disable watchdog from being rescheduled.
2483 */ 2521 */
2484 set_bit(__IGB_DOWN, &adapter->state); 2522 set_bit(__IGB_DOWN, &adapter->state);
@@ -2498,7 +2536,8 @@ static void igb_remove(struct pci_dev *pdev)
2498#endif 2536#endif
2499 2537
2500 /* Release control of h/w to f/w. If f/w is AMT enabled, this 2538 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2501 * would have already happened in close and is redundant. */ 2539 * would have already happened in close and is redundant.
2540 */
2502 igb_release_hw_control(adapter); 2541 igb_release_hw_control(adapter);
2503 2542
2504 unregister_netdev(netdev); 2543 unregister_netdev(netdev);
@@ -2513,7 +2552,7 @@ static void igb_remove(struct pci_dev *pdev)
2513 if (hw->flash_address) 2552 if (hw->flash_address)
2514 iounmap(hw->flash_address); 2553 iounmap(hw->flash_address);
2515 pci_release_selected_regions(pdev, 2554 pci_release_selected_regions(pdev,
2516 pci_select_bars(pdev, IORESOURCE_MEM)); 2555 pci_select_bars(pdev, IORESOURCE_MEM));
2517 2556
2518 kfree(adapter->shadow_vfta); 2557 kfree(adapter->shadow_vfta);
2519 free_netdev(netdev); 2558 free_netdev(netdev);
@@ -2524,13 +2563,13 @@ static void igb_remove(struct pci_dev *pdev)
2524} 2563}
2525 2564
2526/** 2565/**
2527 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space 2566 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2528 * @adapter: board private structure to initialize 2567 * @adapter: board private structure to initialize
2529 * 2568 *
2530 * This function initializes the vf specific data storage and then attempts to 2569 * This function initializes the vf specific data storage and then attempts to
2531 * allocate the VFs. The reason for ordering it this way is because it is much 2570 * allocate the VFs. The reason for ordering it this way is because it is much
2532 * mor expensive time wise to disable SR-IOV than it is to allocate and free 2571 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2533 * the memory for the VFs. 2572 * the memory for the VFs.
2534 **/ 2573 **/
2535static void igb_probe_vfs(struct igb_adapter *adapter) 2574static void igb_probe_vfs(struct igb_adapter *adapter)
2536{ 2575{
@@ -2576,6 +2615,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2576 } 2615 }
2577 /* fall through */ 2616 /* fall through */
2578 case e1000_82580: 2617 case e1000_82580:
2618 case e1000_i354:
2579 default: 2619 default:
2580 max_rss_queues = IGB_MAX_RX_QUEUES; 2620 max_rss_queues = IGB_MAX_RX_QUEUES;
2581 break; 2621 break;
@@ -2590,8 +2630,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2590 /* Device supports enough interrupts without queue pairing. */ 2630 /* Device supports enough interrupts without queue pairing. */
2591 break; 2631 break;
2592 case e1000_82576: 2632 case e1000_82576:
2593 /* 2633 /* If VFs are going to be allocated with RSS queues then we
2594 * If VFs are going to be allocated with RSS queues then we
2595 * should pair the queues in order to conserve interrupts due 2634 * should pair the queues in order to conserve interrupts due
2596 * to limited supply. 2635 * to limited supply.
2597 */ 2636 */
@@ -2601,10 +2640,10 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2601 /* fall through */ 2640 /* fall through */
2602 case e1000_82580: 2641 case e1000_82580:
2603 case e1000_i350: 2642 case e1000_i350:
2643 case e1000_i354:
2604 case e1000_i210: 2644 case e1000_i210:
2605 default: 2645 default:
2606 /* 2646 /* If rss_queues > half of max_rss_queues, pair the queues in
2607 * If rss_queues > half of max_rss_queues, pair the queues in
2608 * order to conserve interrupts due to limited supply. 2647 * order to conserve interrupts due to limited supply.
2609 */ 2648 */
2610 if (adapter->rss_queues > (max_rss_queues / 2)) 2649 if (adapter->rss_queues > (max_rss_queues / 2))
@@ -2614,12 +2653,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2614} 2653}
2615 2654
2616/** 2655/**
2617 * igb_sw_init - Initialize general software structures (struct igb_adapter) 2656 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2618 * @adapter: board private structure to initialize 2657 * @adapter: board private structure to initialize
2619 * 2658 *
2620 * igb_sw_init initializes the Adapter private data structure. 2659 * igb_sw_init initializes the Adapter private data structure.
2621 * Fields are initialized based on PCI device information and 2660 * Fields are initialized based on PCI device information and
2622 * OS network device settings (MTU size). 2661 * OS network device settings (MTU size).
2623 **/ 2662 **/
2624static int igb_sw_init(struct igb_adapter *adapter) 2663static int igb_sw_init(struct igb_adapter *adapter)
2625{ 2664{
@@ -2689,16 +2728,16 @@ static int igb_sw_init(struct igb_adapter *adapter)
2689} 2728}
2690 2729
2691/** 2730/**
2692 * igb_open - Called when a network interface is made active 2731 * igb_open - Called when a network interface is made active
2693 * @netdev: network interface device structure 2732 * @netdev: network interface device structure
2694 * 2733 *
2695 * Returns 0 on success, negative value on failure 2734 * Returns 0 on success, negative value on failure
2696 * 2735 *
2697 * The open entry point is called when a network interface is made 2736 * The open entry point is called when a network interface is made
2698 * active by the system (IFF_UP). At this point all resources needed 2737 * active by the system (IFF_UP). At this point all resources needed
2699 * for transmit and receive operations are allocated, the interrupt 2738 * for transmit and receive operations are allocated, the interrupt
2700 * handler is registered with the OS, the watchdog timer is started, 2739 * handler is registered with the OS, the watchdog timer is started,
2701 * and the stack is notified that the interface is ready. 2740 * and the stack is notified that the interface is ready.
2702 **/ 2741 **/
2703static int __igb_open(struct net_device *netdev, bool resuming) 2742static int __igb_open(struct net_device *netdev, bool resuming)
2704{ 2743{
@@ -2734,7 +2773,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2734 /* before we allocate an interrupt, we must be ready to handle it. 2773 /* before we allocate an interrupt, we must be ready to handle it.
2735 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2774 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2736 * as soon as we call pci_request_irq, so we have to setup our 2775 * as soon as we call pci_request_irq, so we have to setup our
2737 * clean_rx handler before we do so. */ 2776 * clean_rx handler before we do so.
2777 */
2738 igb_configure(adapter); 2778 igb_configure(adapter);
2739 2779
2740 err = igb_request_irq(adapter); 2780 err = igb_request_irq(adapter);
@@ -2803,15 +2843,15 @@ static int igb_open(struct net_device *netdev)
2803} 2843}
2804 2844
2805/** 2845/**
2806 * igb_close - Disables a network interface 2846 * igb_close - Disables a network interface
2807 * @netdev: network interface device structure 2847 * @netdev: network interface device structure
2808 * 2848 *
2809 * Returns 0, this is not allowed to fail 2849 * Returns 0, this is not allowed to fail
2810 * 2850 *
2811 * The close entry point is called when an interface is de-activated 2851 * The close entry point is called when an interface is de-activated
2812 * by the OS. The hardware is still under the driver's control, but 2852 * by the OS. The hardware is still under the driver's control, but
2813 * needs to be disabled. A global MAC reset is issued to stop the 2853 * needs to be disabled. A global MAC reset is issued to stop the
2814 * hardware, and all transmit and receive resources are freed. 2854 * hardware, and all transmit and receive resources are freed.
2815 **/ 2855 **/
2816static int __igb_close(struct net_device *netdev, bool suspending) 2856static int __igb_close(struct net_device *netdev, bool suspending)
2817{ 2857{
@@ -2840,10 +2880,10 @@ static int igb_close(struct net_device *netdev)
2840} 2880}
2841 2881
2842/** 2882/**
2843 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 2883 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2844 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2884 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2845 * 2885 *
2846 * Return 0 on success, negative on failure 2886 * Return 0 on success, negative on failure
2847 **/ 2887 **/
2848int igb_setup_tx_resources(struct igb_ring *tx_ring) 2888int igb_setup_tx_resources(struct igb_ring *tx_ring)
2849{ 2889{
@@ -2878,11 +2918,11 @@ err:
2878} 2918}
2879 2919
2880/** 2920/**
2881 * igb_setup_all_tx_resources - wrapper to allocate Tx resources 2921 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2882 * (Descriptors) for all queues 2922 * (Descriptors) for all queues
2883 * @adapter: board private structure 2923 * @adapter: board private structure
2884 * 2924 *
2885 * Return 0 on success, negative on failure 2925 * Return 0 on success, negative on failure
2886 **/ 2926 **/
2887static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2927static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2888{ 2928{
@@ -2904,8 +2944,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2904} 2944}
2905 2945
2906/** 2946/**
2907 * igb_setup_tctl - configure the transmit control registers 2947 * igb_setup_tctl - configure the transmit control registers
2908 * @adapter: Board private structure 2948 * @adapter: Board private structure
2909 **/ 2949 **/
2910void igb_setup_tctl(struct igb_adapter *adapter) 2950void igb_setup_tctl(struct igb_adapter *adapter)
2911{ 2951{
@@ -2930,11 +2970,11 @@ void igb_setup_tctl(struct igb_adapter *adapter)
2930} 2970}
2931 2971
2932/** 2972/**
2933 * igb_configure_tx_ring - Configure transmit ring after Reset 2973 * igb_configure_tx_ring - Configure transmit ring after Reset
2934 * @adapter: board private structure 2974 * @adapter: board private structure
2935 * @ring: tx ring to configure 2975 * @ring: tx ring to configure
2936 * 2976 *
2937 * Configure a transmit ring after a reset. 2977 * Configure a transmit ring after a reset.
2938 **/ 2978 **/
2939void igb_configure_tx_ring(struct igb_adapter *adapter, 2979void igb_configure_tx_ring(struct igb_adapter *adapter,
2940 struct igb_ring *ring) 2980 struct igb_ring *ring)
@@ -2950,9 +2990,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2950 mdelay(10); 2990 mdelay(10);
2951 2991
2952 wr32(E1000_TDLEN(reg_idx), 2992 wr32(E1000_TDLEN(reg_idx),
2953 ring->count * sizeof(union e1000_adv_tx_desc)); 2993 ring->count * sizeof(union e1000_adv_tx_desc));
2954 wr32(E1000_TDBAL(reg_idx), 2994 wr32(E1000_TDBAL(reg_idx),
2955 tdba & 0x00000000ffffffffULL); 2995 tdba & 0x00000000ffffffffULL);
2956 wr32(E1000_TDBAH(reg_idx), tdba >> 32); 2996 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2957 2997
2958 ring->tail = hw->hw_addr + E1000_TDT(reg_idx); 2998 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
@@ -2968,10 +3008,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2968} 3008}
2969 3009
2970/** 3010/**
2971 * igb_configure_tx - Configure transmit Unit after Reset 3011 * igb_configure_tx - Configure transmit Unit after Reset
2972 * @adapter: board private structure 3012 * @adapter: board private structure
2973 * 3013 *
2974 * Configure the Tx unit of the MAC after a reset. 3014 * Configure the Tx unit of the MAC after a reset.
2975 **/ 3015 **/
2976static void igb_configure_tx(struct igb_adapter *adapter) 3016static void igb_configure_tx(struct igb_adapter *adapter)
2977{ 3017{
@@ -2982,10 +3022,10 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2982} 3022}
2983 3023
2984/** 3024/**
2985 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 3025 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2986 * @rx_ring: rx descriptor ring (for a specific queue) to setup 3026 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
2987 * 3027 *
2988 * Returns 0 on success, negative on failure 3028 * Returns 0 on success, negative on failure
2989 **/ 3029 **/
2990int igb_setup_rx_resources(struct igb_ring *rx_ring) 3030int igb_setup_rx_resources(struct igb_ring *rx_ring)
2991{ 3031{
@@ -3021,11 +3061,11 @@ err:
3021} 3061}
3022 3062
3023/** 3063/**
3024 * igb_setup_all_rx_resources - wrapper to allocate Rx resources 3064 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3025 * (Descriptors) for all queues 3065 * (Descriptors) for all queues
3026 * @adapter: board private structure 3066 * @adapter: board private structure
3027 * 3067 *
3028 * Return 0 on success, negative on failure 3068 * Return 0 on success, negative on failure
3029 **/ 3069 **/
3030static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 3070static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3031{ 3071{
@@ -3047,8 +3087,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3047} 3087}
3048 3088
3049/** 3089/**
3050 * igb_setup_mrqc - configure the multiple receive queue control registers 3090 * igb_setup_mrqc - configure the multiple receive queue control registers
3051 * @adapter: Board private structure 3091 * @adapter: Board private structure
3052 **/ 3092 **/
3053static void igb_setup_mrqc(struct igb_adapter *adapter) 3093static void igb_setup_mrqc(struct igb_adapter *adapter)
3054{ 3094{
@@ -3081,8 +3121,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3081 break; 3121 break;
3082 } 3122 }
3083 3123
3084 /* 3124 /* Populate the indirection table 4 entries at a time. To do this
3085 * Populate the indirection table 4 entries at a time. To do this
3086 * we are generating the results for n and n+2 and then interleaving 3125 * we are generating the results for n and n+2 and then interleaving
3087 * those with the results with n+1 and n+3. 3126 * those with the results with n+1 and n+3.
3088 */ 3127 */
@@ -3098,8 +3137,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3098 wr32(E1000_RETA(j), reta); 3137 wr32(E1000_RETA(j), reta);
3099 } 3138 }
3100 3139
3101 /* 3140 /* Disable raw packet checksumming so that RSS hash is placed in
3102 * Disable raw packet checksumming so that RSS hash is placed in
3103 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 3141 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3104 * offloads as they are enabled by default 3142 * offloads as they are enabled by default
3105 */ 3143 */
@@ -3129,7 +3167,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3129 3167
3130 /* If VMDq is enabled then we set the appropriate mode for that, else 3168 /* If VMDq is enabled then we set the appropriate mode for that, else
3131 * we default to RSS so that an RSS hash is calculated per packet even 3169 * we default to RSS so that an RSS hash is calculated per packet even
3132 * if we are only using one queue */ 3170 * if we are only using one queue
3171 */
3133 if (adapter->vfs_allocated_count) { 3172 if (adapter->vfs_allocated_count) {
3134 if (hw->mac.type > e1000_82575) { 3173 if (hw->mac.type > e1000_82575) {
3135 /* Set the default pool for the PF's first queue */ 3174 /* Set the default pool for the PF's first queue */
@@ -3154,8 +3193,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3154} 3193}
3155 3194
3156/** 3195/**
3157 * igb_setup_rctl - configure the receive control registers 3196 * igb_setup_rctl - configure the receive control registers
3158 * @adapter: Board private structure 3197 * @adapter: Board private structure
3159 **/ 3198 **/
3160void igb_setup_rctl(struct igb_adapter *adapter) 3199void igb_setup_rctl(struct igb_adapter *adapter)
3161{ 3200{
@@ -3170,8 +3209,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3170 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 3209 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
3171 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3210 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3172 3211
3173 /* 3212 /* enable stripping of CRC. It's unlikely this will break BMC
3174 * enable stripping of CRC. It's unlikely this will break BMC
3175 * redirection as it did with e1000. Newer features require 3213 * redirection as it did with e1000. Newer features require
3176 * that the HW strips the CRC. 3214 * that the HW strips the CRC.
3177 */ 3215 */
@@ -3198,7 +3236,8 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3198 /* This is useful for sniffing bad packets. */ 3236 /* This is useful for sniffing bad packets. */
3199 if (adapter->netdev->features & NETIF_F_RXALL) { 3237 if (adapter->netdev->features & NETIF_F_RXALL) {
3200 /* UPE and MPE will be handled by normal PROMISC logic 3238 /* UPE and MPE will be handled by normal PROMISC logic
3201 * in e1000e_set_rx_mode */ 3239 * in e1000e_set_rx_mode
3240 */
3202 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3241 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3203 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3242 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3204 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3243 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3221,7 +3260,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3221 u32 vmolr; 3260 u32 vmolr;
3222 3261
3223 /* if it isn't the PF check to see if VFs are enabled and 3262 /* if it isn't the PF check to see if VFs are enabled and
3224 * increase the size to support vlan tags */ 3263 * increase the size to support vlan tags
3264 */
3225 if (vfn < adapter->vfs_allocated_count && 3265 if (vfn < adapter->vfs_allocated_count &&
3226 adapter->vf_data[vfn].vlans_enabled) 3266 adapter->vf_data[vfn].vlans_enabled)
3227 size += VLAN_TAG_SIZE; 3267 size += VLAN_TAG_SIZE;
@@ -3235,10 +3275,10 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3235} 3275}
3236 3276
3237/** 3277/**
3238 * igb_rlpml_set - set maximum receive packet size 3278 * igb_rlpml_set - set maximum receive packet size
3239 * @adapter: board private structure 3279 * @adapter: board private structure
3240 * 3280 *
3241 * Configure maximum receivable packet size. 3281 * Configure maximum receivable packet size.
3242 **/ 3282 **/
3243static void igb_rlpml_set(struct igb_adapter *adapter) 3283static void igb_rlpml_set(struct igb_adapter *adapter)
3244{ 3284{
@@ -3248,8 +3288,7 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
3248 3288
3249 if (pf_id) { 3289 if (pf_id) {
3250 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 3290 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
3251 /* 3291 /* If we're in VMDQ or SR-IOV mode, then set global RLPML
3252 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3253 * to our max jumbo frame size, in case we need to enable 3292 * to our max jumbo frame size, in case we need to enable
3254 * jumbo frames on one of the rings later. 3293 * jumbo frames on one of the rings later.
3255 * This will not pass over-length frames into the default 3294 * This will not pass over-length frames into the default
@@ -3267,17 +3306,16 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3267 struct e1000_hw *hw = &adapter->hw; 3306 struct e1000_hw *hw = &adapter->hw;
3268 u32 vmolr; 3307 u32 vmolr;
3269 3308
3270 /* 3309 /* This register exists only on 82576 and newer so if we are older then
3271 * This register exists only on 82576 and newer so if we are older then
3272 * we should exit and do nothing 3310 * we should exit and do nothing
3273 */ 3311 */
3274 if (hw->mac.type < e1000_82576) 3312 if (hw->mac.type < e1000_82576)
3275 return; 3313 return;
3276 3314
3277 vmolr = rd32(E1000_VMOLR(vfn)); 3315 vmolr = rd32(E1000_VMOLR(vfn));
3278 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ 3316 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3279 if (aupe) 3317 if (aupe)
3280 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ 3318 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3281 else 3319 else
3282 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ 3320 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3283 3321
@@ -3286,25 +3324,24 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3286 3324
3287 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) 3325 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3288 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ 3326 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3289 /* 3327 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
3290 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3291 * multicast packets 3328 * multicast packets
3292 */ 3329 */
3293 if (vfn <= adapter->vfs_allocated_count) 3330 if (vfn <= adapter->vfs_allocated_count)
3294 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ 3331 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3295 3332
3296 wr32(E1000_VMOLR(vfn), vmolr); 3333 wr32(E1000_VMOLR(vfn), vmolr);
3297} 3334}
3298 3335
3299/** 3336/**
3300 * igb_configure_rx_ring - Configure a receive ring after Reset 3337 * igb_configure_rx_ring - Configure a receive ring after Reset
3301 * @adapter: board private structure 3338 * @adapter: board private structure
3302 * @ring: receive ring to be configured 3339 * @ring: receive ring to be configured
3303 * 3340 *
3304 * Configure the Rx unit of the MAC after a reset. 3341 * Configure the Rx unit of the MAC after a reset.
3305 **/ 3342 **/
3306void igb_configure_rx_ring(struct igb_adapter *adapter, 3343void igb_configure_rx_ring(struct igb_adapter *adapter,
3307 struct igb_ring *ring) 3344 struct igb_ring *ring)
3308{ 3345{
3309 struct e1000_hw *hw = &adapter->hw; 3346 struct e1000_hw *hw = &adapter->hw;
3310 u64 rdba = ring->dma; 3347 u64 rdba = ring->dma;
@@ -3319,7 +3356,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3319 rdba & 0x00000000ffffffffULL); 3356 rdba & 0x00000000ffffffffULL);
3320 wr32(E1000_RDBAH(reg_idx), rdba >> 32); 3357 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3321 wr32(E1000_RDLEN(reg_idx), 3358 wr32(E1000_RDLEN(reg_idx),
3322 ring->count * sizeof(union e1000_adv_rx_desc)); 3359 ring->count * sizeof(union e1000_adv_rx_desc));
3323 3360
3324 /* initialize head and tail */ 3361 /* initialize head and tail */
3325 ring->tail = hw->hw_addr + E1000_RDT(reg_idx); 3362 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
@@ -3365,10 +3402,10 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3365} 3402}
3366 3403
3367/** 3404/**
3368 * igb_configure_rx - Configure receive Unit after Reset 3405 * igb_configure_rx - Configure receive Unit after Reset
3369 * @adapter: board private structure 3406 * @adapter: board private structure
3370 * 3407 *
3371 * Configure the Rx unit of the MAC after a reset. 3408 * Configure the Rx unit of the MAC after a reset.
3372 **/ 3409 **/
3373static void igb_configure_rx(struct igb_adapter *adapter) 3410static void igb_configure_rx(struct igb_adapter *adapter)
3374{ 3411{
@@ -3379,10 +3416,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3379 3416
3380 /* set the correct pool for the PF default MAC address in entry 0 */ 3417 /* set the correct pool for the PF default MAC address in entry 0 */
3381 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 3418 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3382 adapter->vfs_allocated_count); 3419 adapter->vfs_allocated_count);
3383 3420
3384 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3421 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3385 * the Base and Length of the Rx Descriptor Ring */ 3422 * the Base and Length of the Rx Descriptor Ring
3423 */
3386 for (i = 0; i < adapter->num_rx_queues; i++) { 3424 for (i = 0; i < adapter->num_rx_queues; i++) {
3387 struct igb_ring *rx_ring = adapter->rx_ring[i]; 3425 struct igb_ring *rx_ring = adapter->rx_ring[i];
3388 igb_set_rx_buffer_len(adapter, rx_ring); 3426 igb_set_rx_buffer_len(adapter, rx_ring);
@@ -3391,10 +3429,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3391} 3429}
3392 3430
3393/** 3431/**
3394 * igb_free_tx_resources - Free Tx Resources per Queue 3432 * igb_free_tx_resources - Free Tx Resources per Queue
3395 * @tx_ring: Tx descriptor ring for a specific queue 3433 * @tx_ring: Tx descriptor ring for a specific queue
3396 * 3434 *
3397 * Free all transmit software resources 3435 * Free all transmit software resources
3398 **/ 3436 **/
3399void igb_free_tx_resources(struct igb_ring *tx_ring) 3437void igb_free_tx_resources(struct igb_ring *tx_ring)
3400{ 3438{
@@ -3414,10 +3452,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
3414} 3452}
3415 3453
3416/** 3454/**
3417 * igb_free_all_tx_resources - Free Tx Resources for All Queues 3455 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3418 * @adapter: board private structure 3456 * @adapter: board private structure
3419 * 3457 *
3420 * Free all transmit software resources 3458 * Free all transmit software resources
3421 **/ 3459 **/
3422static void igb_free_all_tx_resources(struct igb_adapter *adapter) 3460static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3423{ 3461{
@@ -3450,8 +3488,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3450} 3488}
3451 3489
3452/** 3490/**
3453 * igb_clean_tx_ring - Free Tx Buffers 3491 * igb_clean_tx_ring - Free Tx Buffers
3454 * @tx_ring: ring to be cleaned 3492 * @tx_ring: ring to be cleaned
3455 **/ 3493 **/
3456static void igb_clean_tx_ring(struct igb_ring *tx_ring) 3494static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3457{ 3495{
@@ -3481,8 +3519,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3481} 3519}
3482 3520
3483/** 3521/**
3484 * igb_clean_all_tx_rings - Free Tx Buffers for all queues 3522 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3485 * @adapter: board private structure 3523 * @adapter: board private structure
3486 **/ 3524 **/
3487static void igb_clean_all_tx_rings(struct igb_adapter *adapter) 3525static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3488{ 3526{
@@ -3493,10 +3531,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3493} 3531}
3494 3532
3495/** 3533/**
3496 * igb_free_rx_resources - Free Rx Resources 3534 * igb_free_rx_resources - Free Rx Resources
3497 * @rx_ring: ring to clean the resources from 3535 * @rx_ring: ring to clean the resources from
3498 * 3536 *
3499 * Free all receive software resources 3537 * Free all receive software resources
3500 **/ 3538 **/
3501void igb_free_rx_resources(struct igb_ring *rx_ring) 3539void igb_free_rx_resources(struct igb_ring *rx_ring)
3502{ 3540{
@@ -3516,10 +3554,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
3516} 3554}
3517 3555
3518/** 3556/**
3519 * igb_free_all_rx_resources - Free Rx Resources for All Queues 3557 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3520 * @adapter: board private structure 3558 * @adapter: board private structure
3521 * 3559 *
3522 * Free all receive software resources 3560 * Free all receive software resources
3523 **/ 3561 **/
3524static void igb_free_all_rx_resources(struct igb_adapter *adapter) 3562static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3525{ 3563{
@@ -3530,8 +3568,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3530} 3568}
3531 3569
3532/** 3570/**
3533 * igb_clean_rx_ring - Free Rx Buffers per Queue 3571 * igb_clean_rx_ring - Free Rx Buffers per Queue
3534 * @rx_ring: ring to free buffers from 3572 * @rx_ring: ring to free buffers from
3535 **/ 3573 **/
3536static void igb_clean_rx_ring(struct igb_ring *rx_ring) 3574static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3537{ 3575{
@@ -3573,8 +3611,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3573} 3611}
3574 3612
3575/** 3613/**
3576 * igb_clean_all_rx_rings - Free Rx Buffers for all queues 3614 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3577 * @adapter: board private structure 3615 * @adapter: board private structure
3578 **/ 3616 **/
3579static void igb_clean_all_rx_rings(struct igb_adapter *adapter) 3617static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3580{ 3618{
@@ -3585,11 +3623,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3585} 3623}
3586 3624
3587/** 3625/**
3588 * igb_set_mac - Change the Ethernet Address of the NIC 3626 * igb_set_mac - Change the Ethernet Address of the NIC
3589 * @netdev: network interface device structure 3627 * @netdev: network interface device structure
3590 * @p: pointer to an address structure 3628 * @p: pointer to an address structure
3591 * 3629 *
3592 * Returns 0 on success, negative on failure 3630 * Returns 0 on success, negative on failure
3593 **/ 3631 **/
3594static int igb_set_mac(struct net_device *netdev, void *p) 3632static int igb_set_mac(struct net_device *netdev, void *p)
3595{ 3633{
@@ -3605,19 +3643,19 @@ static int igb_set_mac(struct net_device *netdev, void *p)
3605 3643
3606 /* set the correct pool for the new PF MAC address in entry 0 */ 3644 /* set the correct pool for the new PF MAC address in entry 0 */
3607 igb_rar_set_qsel(adapter, hw->mac.addr, 0, 3645 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3608 adapter->vfs_allocated_count); 3646 adapter->vfs_allocated_count);
3609 3647
3610 return 0; 3648 return 0;
3611} 3649}
3612 3650
3613/** 3651/**
3614 * igb_write_mc_addr_list - write multicast addresses to MTA 3652 * igb_write_mc_addr_list - write multicast addresses to MTA
3615 * @netdev: network interface device structure 3653 * @netdev: network interface device structure
3616 * 3654 *
3617 * Writes multicast address list to the MTA hash table. 3655 * Writes multicast address list to the MTA hash table.
3618 * Returns: -ENOMEM on failure 3656 * Returns: -ENOMEM on failure
3619 * 0 on no addresses written 3657 * 0 on no addresses written
3620 * X on writing X addresses to MTA 3658 * X on writing X addresses to MTA
3621 **/ 3659 **/
3622static int igb_write_mc_addr_list(struct net_device *netdev) 3660static int igb_write_mc_addr_list(struct net_device *netdev)
3623{ 3661{
@@ -3650,13 +3688,13 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
3650} 3688}
3651 3689
3652/** 3690/**
3653 * igb_write_uc_addr_list - write unicast addresses to RAR table 3691 * igb_write_uc_addr_list - write unicast addresses to RAR table
3654 * @netdev: network interface device structure 3692 * @netdev: network interface device structure
3655 * 3693 *
3656 * Writes unicast address list to the RAR table. 3694 * Writes unicast address list to the RAR table.
3657 * Returns: -ENOMEM on failure/insufficient address space 3695 * Returns: -ENOMEM on failure/insufficient address space
3658 * 0 on no addresses written 3696 * 0 on no addresses written
3659 * X on writing X addresses to the RAR table 3697 * X on writing X addresses to the RAR table
3660 **/ 3698 **/
3661static int igb_write_uc_addr_list(struct net_device *netdev) 3699static int igb_write_uc_addr_list(struct net_device *netdev)
3662{ 3700{
@@ -3677,8 +3715,8 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
3677 if (!rar_entries) 3715 if (!rar_entries)
3678 break; 3716 break;
3679 igb_rar_set_qsel(adapter, ha->addr, 3717 igb_rar_set_qsel(adapter, ha->addr,
3680 rar_entries--, 3718 rar_entries--,
3681 vfn); 3719 vfn);
3682 count++; 3720 count++;
3683 } 3721 }
3684 } 3722 }
@@ -3693,13 +3731,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
3693} 3731}
3694 3732
3695/** 3733/**
3696 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3734 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3697 * @netdev: network interface device structure 3735 * @netdev: network interface device structure
3698 * 3736 *
3699 * The set_rx_mode entry point is called whenever the unicast or multicast 3737 * The set_rx_mode entry point is called whenever the unicast or multicast
3700 * address lists or the network interface flags are updated. This routine is 3738 * address lists or the network interface flags are updated. This routine is
3701 * responsible for configuring the hardware for proper unicast, multicast, 3739 * responsible for configuring the hardware for proper unicast, multicast,
3702 * promiscuous mode, and all-multi behavior. 3740 * promiscuous mode, and all-multi behavior.
3703 **/ 3741 **/
3704static void igb_set_rx_mode(struct net_device *netdev) 3742static void igb_set_rx_mode(struct net_device *netdev)
3705{ 3743{
@@ -3723,8 +3761,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3723 rctl |= E1000_RCTL_MPE; 3761 rctl |= E1000_RCTL_MPE;
3724 vmolr |= E1000_VMOLR_MPME; 3762 vmolr |= E1000_VMOLR_MPME;
3725 } else { 3763 } else {
3726 /* 3764 /* Write addresses to the MTA, if the attempt fails
3727 * Write addresses to the MTA, if the attempt fails
3728 * then we should just turn on promiscuous mode so 3765 * then we should just turn on promiscuous mode so
3729 * that we can at least receive multicast traffic 3766 * that we can at least receive multicast traffic
3730 */ 3767 */
@@ -3736,8 +3773,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3736 vmolr |= E1000_VMOLR_ROMPE; 3773 vmolr |= E1000_VMOLR_ROMPE;
3737 } 3774 }
3738 } 3775 }
3739 /* 3776 /* Write addresses to available RAR registers, if there is not
3740 * Write addresses to available RAR registers, if there is not
3741 * sufficient space to store all the addresses then enable 3777 * sufficient space to store all the addresses then enable
3742 * unicast promiscuous mode 3778 * unicast promiscuous mode
3743 */ 3779 */
@@ -3750,8 +3786,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3750 } 3786 }
3751 wr32(E1000_RCTL, rctl); 3787 wr32(E1000_RCTL, rctl);
3752 3788
3753 /* 3789 /* In order to support SR-IOV and eventually VMDq it is necessary to set
3754 * In order to support SR-IOV and eventually VMDq it is necessary to set
3755 * the VMOLR to enable the appropriate modes. Without this workaround 3790 * the VMOLR to enable the appropriate modes. Without this workaround
3756 * we will have issues with VLAN tag stripping not being done for frames 3791 * we will have issues with VLAN tag stripping not being done for frames
3757 * that are only arriving because we are the default pool 3792 * that are only arriving because we are the default pool
@@ -3760,7 +3795,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3760 return; 3795 return;
3761 3796
3762 vmolr |= rd32(E1000_VMOLR(vfn)) & 3797 vmolr |= rd32(E1000_VMOLR(vfn)) &
3763 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); 3798 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3764 wr32(E1000_VMOLR(vfn), vmolr); 3799 wr32(E1000_VMOLR(vfn), vmolr);
3765 igb_restore_vf_multicasts(adapter); 3800 igb_restore_vf_multicasts(adapter);
3766} 3801}
@@ -3805,7 +3840,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
3805} 3840}
3806 3841
3807/* Need to wait a few seconds after link up to get diagnostic information from 3842/* Need to wait a few seconds after link up to get diagnostic information from
3808 * the phy */ 3843 * the phy
3844 */
3809static void igb_update_phy_info(unsigned long data) 3845static void igb_update_phy_info(unsigned long data)
3810{ 3846{
3811 struct igb_adapter *adapter = (struct igb_adapter *) data; 3847 struct igb_adapter *adapter = (struct igb_adapter *) data;
@@ -3813,8 +3849,8 @@ static void igb_update_phy_info(unsigned long data)
3813} 3849}
3814 3850
3815/** 3851/**
3816 * igb_has_link - check shared code for link and determine up/down 3852 * igb_has_link - check shared code for link and determine up/down
3817 * @adapter: pointer to driver private info 3853 * @adapter: pointer to driver private info
3818 **/ 3854 **/
3819bool igb_has_link(struct igb_adapter *adapter) 3855bool igb_has_link(struct igb_adapter *adapter)
3820{ 3856{
@@ -3859,17 +3895,16 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3859 ctrl_ext = rd32(E1000_CTRL_EXT); 3895 ctrl_ext = rd32(E1000_CTRL_EXT);
3860 3896
3861 if ((hw->phy.media_type == e1000_media_type_copper) && 3897 if ((hw->phy.media_type == e1000_media_type_copper) &&
3862 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) { 3898 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
3863 ret = !!(thstat & event); 3899 ret = !!(thstat & event);
3864 }
3865 } 3900 }
3866 3901
3867 return ret; 3902 return ret;
3868} 3903}
3869 3904
3870/** 3905/**
3871 * igb_watchdog - Timer Call-back 3906 * igb_watchdog - Timer Call-back
3872 * @data: pointer to adapter cast into an unsigned long 3907 * @data: pointer to adapter cast into an unsigned long
3873 **/ 3908 **/
3874static void igb_watchdog(unsigned long data) 3909static void igb_watchdog(unsigned long data)
3875{ 3910{
@@ -3881,8 +3916,8 @@ static void igb_watchdog(unsigned long data)
3881static void igb_watchdog_task(struct work_struct *work) 3916static void igb_watchdog_task(struct work_struct *work)
3882{ 3917{
3883 struct igb_adapter *adapter = container_of(work, 3918 struct igb_adapter *adapter = container_of(work,
3884 struct igb_adapter, 3919 struct igb_adapter,
3885 watchdog_task); 3920 watchdog_task);
3886 struct e1000_hw *hw = &adapter->hw; 3921 struct e1000_hw *hw = &adapter->hw;
3887 struct net_device *netdev = adapter->netdev; 3922 struct net_device *netdev = adapter->netdev;
3888 u32 link; 3923 u32 link;
@@ -3896,8 +3931,8 @@ static void igb_watchdog_task(struct work_struct *work)
3896 if (!netif_carrier_ok(netdev)) { 3931 if (!netif_carrier_ok(netdev)) {
3897 u32 ctrl; 3932 u32 ctrl;
3898 hw->mac.ops.get_speed_and_duplex(hw, 3933 hw->mac.ops.get_speed_and_duplex(hw,
3899 &adapter->link_speed, 3934 &adapter->link_speed,
3900 &adapter->link_duplex); 3935 &adapter->link_duplex);
3901 3936
3902 ctrl = rd32(E1000_CTRL); 3937 ctrl = rd32(E1000_CTRL);
3903 /* Links status message must follow this format */ 3938 /* Links status message must follow this format */
@@ -3980,7 +4015,8 @@ static void igb_watchdog_task(struct work_struct *work)
3980 /* We've lost link, so the controller stops DMA, 4015 /* We've lost link, so the controller stops DMA,
3981 * but we've got queued Tx work that's never going 4016 * but we've got queued Tx work that's never going
3982 * to get done, so reset controller to flush Tx. 4017 * to get done, so reset controller to flush Tx.
3983 * (Do the reset outside of interrupt context). */ 4018 * (Do the reset outside of interrupt context).
4019 */
3984 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 4020 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3985 adapter->tx_timeout_count++; 4021 adapter->tx_timeout_count++;
3986 schedule_work(&adapter->reset_task); 4022 schedule_work(&adapter->reset_task);
@@ -3993,7 +4029,7 @@ static void igb_watchdog_task(struct work_struct *work)
3993 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 4029 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3994 } 4030 }
3995 4031
3996 /* Cause software interrupt to ensure rx ring is cleaned */ 4032 /* Cause software interrupt to ensure Rx ring is cleaned */
3997 if (adapter->msix_entries) { 4033 if (adapter->msix_entries) {
3998 u32 eics = 0; 4034 u32 eics = 0;
3999 for (i = 0; i < adapter->num_q_vectors; i++) 4035 for (i = 0; i < adapter->num_q_vectors; i++)
@@ -4020,20 +4056,20 @@ enum latency_range {
4020}; 4056};
4021 4057
4022/** 4058/**
4023 * igb_update_ring_itr - update the dynamic ITR value based on packet size 4059 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4060 * @q_vector: pointer to q_vector
4024 * 4061 *
4025 * Stores a new ITR value based on strictly on packet size. This 4062 * Stores a new ITR value based on strictly on packet size. This
4026 * algorithm is less sophisticated than that used in igb_update_itr, 4063 * algorithm is less sophisticated than that used in igb_update_itr,
4027 * due to the difficulty of synchronizing statistics across multiple 4064 * due to the difficulty of synchronizing statistics across multiple
4028 * receive rings. The divisors and thresholds used by this function 4065 * receive rings. The divisors and thresholds used by this function
4029 * were determined based on theoretical maximum wire speed and testing 4066 * were determined based on theoretical maximum wire speed and testing
4030 * data, in order to minimize response time while increasing bulk 4067 * data, in order to minimize response time while increasing bulk
4031 * throughput. 4068 * throughput.
4032 * This functionality is controlled by the InterruptThrottleRate module 4069 * This functionality is controlled by the InterruptThrottleRate module
4033 * parameter (see igb_param.c) 4070 * parameter (see igb_param.c)
4034 * NOTE: This function is called only when operating in a multiqueue 4071 * NOTE: This function is called only when operating in a multiqueue
4035 * receive environment. 4072 * receive environment.
4036 * @q_vector: pointer to q_vector
4037 **/ 4073 **/
4038static void igb_update_ring_itr(struct igb_q_vector *q_vector) 4074static void igb_update_ring_itr(struct igb_q_vector *q_vector)
4039{ 4075{
@@ -4094,20 +4130,21 @@ clear_counts:
4094} 4130}
4095 4131
4096/** 4132/**
4097 * igb_update_itr - update the dynamic ITR value based on statistics 4133 * igb_update_itr - update the dynamic ITR value based on statistics
4098 * Stores a new ITR value based on packets and byte 4134 * @q_vector: pointer to q_vector
4099 * counts during the last interrupt. The advantage of per interrupt 4135 * @ring_container: ring info to update the itr for
4100 * computation is faster updates and more accurate ITR for the current 4136 *
4101 * traffic pattern. Constants in this function were computed 4137 * Stores a new ITR value based on packets and byte
4102 * based on theoretical maximum wire speed and thresholds were set based 4138 * counts during the last interrupt. The advantage of per interrupt
4103 * on testing data as well as attempting to minimize response time 4139 * computation is faster updates and more accurate ITR for the current
4104 * while increasing bulk throughput. 4140 * traffic pattern. Constants in this function were computed
4105 * this functionality is controlled by the InterruptThrottleRate module 4141 * based on theoretical maximum wire speed and thresholds were set based
4106 * parameter (see igb_param.c) 4142 * on testing data as well as attempting to minimize response time
4107 * NOTE: These calculations are only valid when operating in a single- 4143 * while increasing bulk throughput.
4108 * queue environment. 4144 * this functionality is controlled by the InterruptThrottleRate module
4109 * @q_vector: pointer to q_vector 4145 * parameter (see igb_param.c)
4110 * @ring_container: ring info to update the itr for 4146 * NOTE: These calculations are only valid when operating in a single-
4147 * queue environment.
4111 **/ 4148 **/
4112static void igb_update_itr(struct igb_q_vector *q_vector, 4149static void igb_update_itr(struct igb_q_vector *q_vector,
4113 struct igb_ring_container *ring_container) 4150 struct igb_ring_container *ring_container)
@@ -4205,12 +4242,12 @@ set_itr_now:
4205 if (new_itr != q_vector->itr_val) { 4242 if (new_itr != q_vector->itr_val) {
4206 /* this attempts to bias the interrupt rate towards Bulk 4243 /* this attempts to bias the interrupt rate towards Bulk
4207 * by adding intermediate steps when interrupt rate is 4244 * by adding intermediate steps when interrupt rate is
4208 * increasing */ 4245 * increasing
4246 */
4209 new_itr = new_itr > q_vector->itr_val ? 4247 new_itr = new_itr > q_vector->itr_val ?
4210 max((new_itr * q_vector->itr_val) / 4248 max((new_itr * q_vector->itr_val) /
4211 (new_itr + (q_vector->itr_val >> 2)), 4249 (new_itr + (q_vector->itr_val >> 2)),
4212 new_itr) : 4250 new_itr) : new_itr;
4213 new_itr;
4214 /* Don't write the value here; it resets the adapter's 4251 /* Don't write the value here; it resets the adapter's
4215 * internal timer, and causes us to delay far longer than 4252 * internal timer, and causes us to delay far longer than
4216 * we should between interrupts. Instead, we write the ITR 4253 * we should between interrupts. Instead, we write the ITR
@@ -4337,8 +4374,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4337 default: 4374 default:
4338 if (unlikely(net_ratelimit())) { 4375 if (unlikely(net_ratelimit())) {
4339 dev_warn(tx_ring->dev, 4376 dev_warn(tx_ring->dev,
4340 "partial checksum but proto=%x!\n", 4377 "partial checksum but proto=%x!\n",
4341 first->protocol); 4378 first->protocol);
4342 } 4379 }
4343 break; 4380 break;
4344 } 4381 }
@@ -4361,8 +4398,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4361 default: 4398 default:
4362 if (unlikely(net_ratelimit())) { 4399 if (unlikely(net_ratelimit())) {
4363 dev_warn(tx_ring->dev, 4400 dev_warn(tx_ring->dev,
4364 "partial checksum but l4 proto=%x!\n", 4401 "partial checksum but l4 proto=%x!\n",
4365 l4_hdr); 4402 l4_hdr);
4366 } 4403 }
4367 break; 4404 break;
4368 } 4405 }
@@ -4514,8 +4551,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4514 /* set the timestamp */ 4551 /* set the timestamp */
4515 first->time_stamp = jiffies; 4552 first->time_stamp = jiffies;
4516 4553
4517 /* 4554 /* Force memory writes to complete before letting h/w know there
4518 * Force memory writes to complete before letting h/w know there
4519 * are new descriptors to fetch. (Only applicable for weak-ordered 4555 * are new descriptors to fetch. (Only applicable for weak-ordered
4520 * memory model archs, such as IA-64). 4556 * memory model archs, such as IA-64).
4521 * 4557 *
@@ -4536,7 +4572,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4536 writel(i, tx_ring->tail); 4572 writel(i, tx_ring->tail);
4537 4573
4538 /* we need this if more than one processor can write to our tail 4574 /* we need this if more than one processor can write to our tail
4539 * at a time, it syncronizes IO on IA64/Altix systems */ 4575 * at a time, it synchronizes IO on IA64/Altix systems
4576 */
4540 mmiowb(); 4577 mmiowb();
4541 4578
4542 return; 4579 return;
@@ -4566,11 +4603,13 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4566 4603
4567 /* Herbert's original patch had: 4604 /* Herbert's original patch had:
4568 * smp_mb__after_netif_stop_queue(); 4605 * smp_mb__after_netif_stop_queue();
4569 * but since that doesn't exist yet, just open code it. */ 4606 * but since that doesn't exist yet, just open code it.
4607 */
4570 smp_mb(); 4608 smp_mb();
4571 4609
4572 /* We need to check again in a case another CPU has just 4610 /* We need to check again in a case another CPU has just
4573 * made room available. */ 4611 * made room available.
4612 */
4574 if (igb_desc_unused(tx_ring) < size) 4613 if (igb_desc_unused(tx_ring) < size)
4575 return -EBUSY; 4614 return -EBUSY;
4576 4615
@@ -4594,7 +4633,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4594netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4633netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4595 struct igb_ring *tx_ring) 4634 struct igb_ring *tx_ring)
4596{ 4635{
4597 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4598 struct igb_tx_buffer *first; 4636 struct igb_tx_buffer *first;
4599 int tso; 4637 int tso;
4600 u32 tx_flags = 0; 4638 u32 tx_flags = 0;
@@ -4629,15 +4667,18 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4629 4667
4630 skb_tx_timestamp(skb); 4668 skb_tx_timestamp(skb);
4631 4669
4632 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4670 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4633 !(adapter->ptp_tx_skb))) { 4671 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4634 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4635 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4636 4672
4637 adapter->ptp_tx_skb = skb_get(skb); 4673 if (!(adapter->ptp_tx_skb)) {
4638 adapter->ptp_tx_start = jiffies; 4674 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4639 if (adapter->hw.mac.type == e1000_82576) 4675 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4640 schedule_work(&adapter->ptp_tx_work); 4676
4677 adapter->ptp_tx_skb = skb_get(skb);
4678 adapter->ptp_tx_start = jiffies;
4679 if (adapter->hw.mac.type == e1000_82576)
4680 schedule_work(&adapter->ptp_tx_work);
4681 }
4641 } 4682 }
4642 4683
4643 if (vlan_tx_tag_present(skb)) { 4684 if (vlan_tx_tag_present(skb)) {
@@ -4694,8 +4735,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4694 return NETDEV_TX_OK; 4735 return NETDEV_TX_OK;
4695 } 4736 }
4696 4737
4697 /* 4738 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
4698 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4699 * in order to meet this minimum size requirement. 4739 * in order to meet this minimum size requirement.
4700 */ 4740 */
4701 if (unlikely(skb->len < 17)) { 4741 if (unlikely(skb->len < 17)) {
@@ -4709,8 +4749,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4709} 4749}
4710 4750
4711/** 4751/**
4712 * igb_tx_timeout - Respond to a Tx Hang 4752 * igb_tx_timeout - Respond to a Tx Hang
4713 * @netdev: network interface device structure 4753 * @netdev: network interface device structure
4714 **/ 4754 **/
4715static void igb_tx_timeout(struct net_device *netdev) 4755static void igb_tx_timeout(struct net_device *netdev)
4716{ 4756{
@@ -4739,13 +4779,12 @@ static void igb_reset_task(struct work_struct *work)
4739} 4779}
4740 4780
4741/** 4781/**
4742 * igb_get_stats64 - Get System Network Statistics 4782 * igb_get_stats64 - Get System Network Statistics
4743 * @netdev: network interface device structure 4783 * @netdev: network interface device structure
4744 * @stats: rtnl_link_stats64 pointer 4784 * @stats: rtnl_link_stats64 pointer
4745 *
4746 **/ 4785 **/
4747static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, 4786static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4748 struct rtnl_link_stats64 *stats) 4787 struct rtnl_link_stats64 *stats)
4749{ 4788{
4750 struct igb_adapter *adapter = netdev_priv(netdev); 4789 struct igb_adapter *adapter = netdev_priv(netdev);
4751 4790
@@ -4758,11 +4797,11 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4758} 4797}
4759 4798
4760/** 4799/**
4761 * igb_change_mtu - Change the Maximum Transfer Unit 4800 * igb_change_mtu - Change the Maximum Transfer Unit
4762 * @netdev: network interface device structure 4801 * @netdev: network interface device structure
4763 * @new_mtu: new value for maximum frame size 4802 * @new_mtu: new value for maximum frame size
4764 * 4803 *
4765 * Returns 0 on success, negative on failure 4804 * Returns 0 on success, negative on failure
4766 **/ 4805 **/
4767static int igb_change_mtu(struct net_device *netdev, int new_mtu) 4806static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4768{ 4807{
@@ -4805,10 +4844,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4805} 4844}
4806 4845
4807/** 4846/**
4808 * igb_update_stats - Update the board statistics counters 4847 * igb_update_stats - Update the board statistics counters
4809 * @adapter: board private structure 4848 * @adapter: board private structure
4810 **/ 4849 **/
4811
4812void igb_update_stats(struct igb_adapter *adapter, 4850void igb_update_stats(struct igb_adapter *adapter,
4813 struct rtnl_link_stats64 *net_stats) 4851 struct rtnl_link_stats64 *net_stats)
4814{ 4852{
@@ -4823,8 +4861,7 @@ void igb_update_stats(struct igb_adapter *adapter,
4823 4861
4824#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4862#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4825 4863
4826 /* 4864 /* Prevent stats update while adapter is being reset, or if the pci
4827 * Prevent stats update while adapter is being reset, or if the pci
4828 * connection is down. 4865 * connection is down.
4829 */ 4866 */
4830 if (adapter->link_speed == 0) 4867 if (adapter->link_speed == 0)
@@ -4958,7 +4995,8 @@ void igb_update_stats(struct igb_adapter *adapter,
4958 /* Rx Errors */ 4995 /* Rx Errors */
4959 4996
4960 /* RLEC on some newer hardware can be incorrect so build 4997 /* RLEC on some newer hardware can be incorrect so build
4961 * our own version based on RUC and ROC */ 4998 * our own version based on RUC and ROC
4999 */
4962 net_stats->rx_errors = adapter->stats.rxerrc + 5000 net_stats->rx_errors = adapter->stats.rxerrc +
4963 adapter->stats.crcerrs + adapter->stats.algnerrc + 5001 adapter->stats.crcerrs + adapter->stats.algnerrc +
4964 adapter->stats.ruc + adapter->stats.roc + 5002 adapter->stats.ruc + adapter->stats.roc +
@@ -5017,7 +5055,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5017 adapter->stats.doosync++; 5055 adapter->stats.doosync++;
5018 /* The DMA Out of Sync is also indication of a spoof event 5056 /* The DMA Out of Sync is also indication of a spoof event
5019 * in IOV mode. Check the Wrong VM Behavior register to 5057 * in IOV mode. Check the Wrong VM Behavior register to
5020 * see if it is really a spoof event. */ 5058 * see if it is really a spoof event.
5059 */
5021 igb_check_wvbr(adapter); 5060 igb_check_wvbr(adapter);
5022 } 5061 }
5023 5062
@@ -5091,8 +5130,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,
5091 if (hw->mac.type != e1000_82575) 5130 if (hw->mac.type != e1000_82575)
5092 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; 5131 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5093 5132
5094 /* 5133 /* We can enable relaxed ordering for reads, but not writes when
5095 * We can enable relaxed ordering for reads, but not writes when
5096 * DCA is enabled. This is due to a known issue in some chipsets 5134 * DCA is enabled. This is due to a known issue in some chipsets
5097 * which will cause the DCA tag to be cleared. 5135 * which will cause the DCA tag to be cleared.
5098 */ 5136 */
@@ -5113,8 +5151,7 @@ static void igb_update_rx_dca(struct igb_adapter *adapter,
5113 if (hw->mac.type != e1000_82575) 5151 if (hw->mac.type != e1000_82575)
5114 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; 5152 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5115 5153
5116 /* 5154 /* We can enable relaxed ordering for reads, but not writes when
5117 * We can enable relaxed ordering for reads, but not writes when
5118 * DCA is enabled. This is due to a known issue in some chipsets 5155 * DCA is enabled. This is due to a known issue in some chipsets
5119 * which will cause the DCA tag to be cleared. 5156 * which will cause the DCA tag to be cleared.
5120 */ 5157 */
@@ -5183,7 +5220,8 @@ static int __igb_notify_dca(struct device *dev, void *data)
5183 case DCA_PROVIDER_REMOVE: 5220 case DCA_PROVIDER_REMOVE:
5184 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 5221 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
5185 /* without this a class_device is left 5222 /* without this a class_device is left
5186 * hanging around in the sysfs model */ 5223 * hanging around in the sysfs model
5224 */
5187 dca_remove_requester(dev); 5225 dca_remove_requester(dev);
5188 dev_info(&pdev->dev, "DCA disabled\n"); 5226 dev_info(&pdev->dev, "DCA disabled\n");
5189 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 5227 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
@@ -5196,12 +5234,12 @@ static int __igb_notify_dca(struct device *dev, void *data)
5196} 5234}
5197 5235
5198static int igb_notify_dca(struct notifier_block *nb, unsigned long event, 5236static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5199 void *p) 5237 void *p)
5200{ 5238{
5201 int ret_val; 5239 int ret_val;
5202 5240
5203 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, 5241 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5204 __igb_notify_dca); 5242 __igb_notify_dca);
5205 5243
5206 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 5244 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5207} 5245}
@@ -5215,6 +5253,9 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5215 eth_zero_addr(mac_addr); 5253 eth_zero_addr(mac_addr);
5216 igb_set_vf_mac(adapter, vf, mac_addr); 5254 igb_set_vf_mac(adapter, vf, mac_addr);
5217 5255
5256 /* By default spoof check is enabled for all VFs */
5257 adapter->vf_data[vf].spoofchk_enabled = true;
5258
5218 return 0; 5259 return 0;
5219} 5260}
5220 5261
@@ -5273,7 +5314,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5273 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 5314 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5274 5315
5275 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | 5316 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
5276 IGB_VF_FLAG_MULTI_PROMISC); 5317 IGB_VF_FLAG_MULTI_PROMISC);
5277 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 5318 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5278 5319
5279 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 5320 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
@@ -5281,8 +5322,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5281 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; 5322 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
5282 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 5323 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5283 } else { 5324 } else {
5284 /* 5325 /* if we have hashes and we are clearing a multicast promisc
5285 * if we have hashes and we are clearing a multicast promisc
5286 * flag we need to write the hashes to the MTA as this step 5326 * flag we need to write the hashes to the MTA as this step
5287 * was previously skipped 5327 * was previously skipped
5288 */ 5328 */
@@ -5303,7 +5343,6 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5303 return -EINVAL; 5343 return -EINVAL;
5304 5344
5305 return 0; 5345 return 0;
5306
5307} 5346}
5308 5347
5309static int igb_set_vf_multicasts(struct igb_adapter *adapter, 5348static int igb_set_vf_multicasts(struct igb_adapter *adapter,
@@ -5510,22 +5549,20 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5510 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 5549 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5511 if (test_bit(__IGB_DOWN, &adapter->state)) { 5550 if (test_bit(__IGB_DOWN, &adapter->state)) {
5512 dev_warn(&adapter->pdev->dev, 5551 dev_warn(&adapter->pdev->dev,
5513 "The VF VLAN has been set," 5552 "The VF VLAN has been set, but the PF device is not up.\n");
5514 " but the PF device is not up.\n");
5515 dev_warn(&adapter->pdev->dev, 5553 dev_warn(&adapter->pdev->dev,
5516 "Bring the PF device up before" 5554 "Bring the PF device up before attempting to use the VF device.\n");
5517 " attempting to use the VF device.\n");
5518 } 5555 }
5519 } else { 5556 } else {
5520 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, 5557 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5521 false, vf); 5558 false, vf);
5522 igb_set_vmvir(adapter, vlan, vf); 5559 igb_set_vmvir(adapter, vlan, vf);
5523 igb_set_vmolr(adapter, vf, true); 5560 igb_set_vmolr(adapter, vf, true);
5524 adapter->vf_data[vf].pf_vlan = 0; 5561 adapter->vf_data[vf].pf_vlan = 0;
5525 adapter->vf_data[vf].pf_qos = 0; 5562 adapter->vf_data[vf].pf_qos = 0;
5526 } 5563 }
5527out: 5564out:
5528 return err; 5565 return err;
5529} 5566}
5530 5567
5531static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 5568static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -5603,8 +5640,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5603 5640
5604static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 5641static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5605{ 5642{
5606 /* 5643 /* The VF MAC Address is stored in a packed array of bytes
5607 * The VF MAC Address is stored in a packed array of bytes
5608 * starting at the second 32 bit word of the msg array 5644 * starting at the second 32 bit word of the msg array
5609 */ 5645 */
5610 unsigned char *addr = (char *)&msg[1]; 5646 unsigned char *addr = (char *)&msg[1];
@@ -5653,11 +5689,9 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5653 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 5689 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
5654 return; 5690 return;
5655 5691
5656 /* 5692 /* until the vf completes a reset it should not be
5657 * until the vf completes a reset it should not be
5658 * allowed to start any configuration. 5693 * allowed to start any configuration.
5659 */ 5694 */
5660
5661 if (msgbuf[0] == E1000_VF_RESET) { 5695 if (msgbuf[0] == E1000_VF_RESET) {
5662 igb_vf_reset_msg(adapter, vf); 5696 igb_vf_reset_msg(adapter, vf);
5663 return; 5697 return;
@@ -5677,9 +5711,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5677 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 5711 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5678 else 5712 else
5679 dev_warn(&pdev->dev, 5713 dev_warn(&pdev->dev,
5680 "VF %d attempted to override administratively " 5714 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
5681 "set MAC address\nReload the VF driver to " 5715 vf);
5682 "resume operations\n", vf);
5683 break; 5716 break;
5684 case E1000_VF_SET_PROMISC: 5717 case E1000_VF_SET_PROMISC:
5685 retval = igb_set_vf_promisc(adapter, msgbuf, vf); 5718 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5694,9 +5727,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5694 retval = -1; 5727 retval = -1;
5695 if (vf_data->pf_vlan) 5728 if (vf_data->pf_vlan)
5696 dev_warn(&pdev->dev, 5729 dev_warn(&pdev->dev,
5697 "VF %d attempted to override administratively " 5730 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
5698 "set VLAN tag\nReload the VF driver to " 5731 vf);
5699 "resume operations\n", vf);
5700 else 5732 else
5701 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 5733 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5702 break; 5734 break;
@@ -5765,9 +5797,9 @@ static void igb_set_uta(struct igb_adapter *adapter)
5765} 5797}
5766 5798
5767/** 5799/**
5768 * igb_intr_msi - Interrupt Handler 5800 * igb_intr_msi - Interrupt Handler
5769 * @irq: interrupt number 5801 * @irq: interrupt number
5770 * @data: pointer to a network interface device structure 5802 * @data: pointer to a network interface device structure
5771 **/ 5803 **/
5772static irqreturn_t igb_intr_msi(int irq, void *data) 5804static irqreturn_t igb_intr_msi(int irq, void *data)
5773{ 5805{
@@ -5810,9 +5842,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5810} 5842}
5811 5843
5812/** 5844/**
5813 * igb_intr - Legacy Interrupt Handler 5845 * igb_intr - Legacy Interrupt Handler
5814 * @irq: interrupt number 5846 * @irq: interrupt number
5815 * @data: pointer to a network interface device structure 5847 * @data: pointer to a network interface device structure
5816 **/ 5848 **/
5817static irqreturn_t igb_intr(int irq, void *data) 5849static irqreturn_t igb_intr(int irq, void *data)
5818{ 5850{
@@ -5820,11 +5852,13 @@ static irqreturn_t igb_intr(int irq, void *data)
5820 struct igb_q_vector *q_vector = adapter->q_vector[0]; 5852 struct igb_q_vector *q_vector = adapter->q_vector[0];
5821 struct e1000_hw *hw = &adapter->hw; 5853 struct e1000_hw *hw = &adapter->hw;
5822 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5854 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5823 * need for the IMC write */ 5855 * need for the IMC write
5856 */
5824 u32 icr = rd32(E1000_ICR); 5857 u32 icr = rd32(E1000_ICR);
5825 5858
5826 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5859 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5827 * not set, then the adapter didn't send an interrupt */ 5860 * not set, then the adapter didn't send an interrupt
5861 */
5828 if (!(icr & E1000_ICR_INT_ASSERTED)) 5862 if (!(icr & E1000_ICR_INT_ASSERTED))
5829 return IRQ_NONE; 5863 return IRQ_NONE;
5830 5864
@@ -5883,15 +5917,15 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5883} 5917}
5884 5918
5885/** 5919/**
5886 * igb_poll - NAPI Rx polling callback 5920 * igb_poll - NAPI Rx polling callback
5887 * @napi: napi polling structure 5921 * @napi: napi polling structure
5888 * @budget: count of how many packets we should handle 5922 * @budget: count of how many packets we should handle
5889 **/ 5923 **/
5890static int igb_poll(struct napi_struct *napi, int budget) 5924static int igb_poll(struct napi_struct *napi, int budget)
5891{ 5925{
5892 struct igb_q_vector *q_vector = container_of(napi, 5926 struct igb_q_vector *q_vector = container_of(napi,
5893 struct igb_q_vector, 5927 struct igb_q_vector,
5894 napi); 5928 napi);
5895 bool clean_complete = true; 5929 bool clean_complete = true;
5896 5930
5897#ifdef CONFIG_IGB_DCA 5931#ifdef CONFIG_IGB_DCA
@@ -5916,10 +5950,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
5916} 5950}
5917 5951
5918/** 5952/**
5919 * igb_clean_tx_irq - Reclaim resources after transmit completes 5953 * igb_clean_tx_irq - Reclaim resources after transmit completes
5920 * @q_vector: pointer to q_vector containing needed info 5954 * @q_vector: pointer to q_vector containing needed info
5921 * 5955 *
5922 * returns true if ring is completely cleaned 5956 * returns true if ring is completely cleaned
5923 **/ 5957 **/
5924static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 5958static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5925{ 5959{
@@ -6025,7 +6059,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6025 struct e1000_hw *hw = &adapter->hw; 6059 struct e1000_hw *hw = &adapter->hw;
6026 6060
6027 /* Detect a transmit hang in hardware, this serializes the 6061 /* Detect a transmit hang in hardware, this serializes the
6028 * check with the clearing of time_stamp and movement of i */ 6062 * check with the clearing of time_stamp and movement of i
6063 */
6029 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 6064 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6030 if (tx_buffer->next_to_watch && 6065 if (tx_buffer->next_to_watch &&
6031 time_after(jiffies, tx_buffer->time_stamp + 6066 time_after(jiffies, tx_buffer->time_stamp +
@@ -6064,8 +6099,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6064 6099
6065#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 6100#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6066 if (unlikely(total_packets && 6101 if (unlikely(total_packets &&
6067 netif_carrier_ok(tx_ring->netdev) && 6102 netif_carrier_ok(tx_ring->netdev) &&
6068 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 6103 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6069 /* Make sure that anybody stopping the queue after this 6104 /* Make sure that anybody stopping the queue after this
6070 * sees the new next_to_clean. 6105 * sees the new next_to_clean.
6071 */ 6106 */
@@ -6086,11 +6121,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6086} 6121}
6087 6122
6088/** 6123/**
6089 * igb_reuse_rx_page - page flip buffer and store it back on the ring 6124 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6090 * @rx_ring: rx descriptor ring to store buffers on 6125 * @rx_ring: rx descriptor ring to store buffers on
6091 * @old_buff: donor buffer to have page reused 6126 * @old_buff: donor buffer to have page reused
6092 * 6127 *
6093 * Synchronizes page for reuse by the adapter 6128 * Synchronizes page for reuse by the adapter
6094 **/ 6129 **/
6095static void igb_reuse_rx_page(struct igb_ring *rx_ring, 6130static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6096 struct igb_rx_buffer *old_buff) 6131 struct igb_rx_buffer *old_buff)
@@ -6150,19 +6185,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6150} 6185}
6151 6186
6152/** 6187/**
6153 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff 6188 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6154 * @rx_ring: rx descriptor ring to transact packets on 6189 * @rx_ring: rx descriptor ring to transact packets on
6155 * @rx_buffer: buffer containing page to add 6190 * @rx_buffer: buffer containing page to add
6156 * @rx_desc: descriptor containing length of buffer written by hardware 6191 * @rx_desc: descriptor containing length of buffer written by hardware
6157 * @skb: sk_buff to place the data into 6192 * @skb: sk_buff to place the data into
6158 * 6193 *
6159 * This function will add the data contained in rx_buffer->page to the skb. 6194 * This function will add the data contained in rx_buffer->page to the skb.
6160 * This is done either through a direct copy if the data in the buffer is 6195 * This is done either through a direct copy if the data in the buffer is
6161 * less than the skb header size, otherwise it will just attach the page as 6196 * less than the skb header size, otherwise it will just attach the page as
6162 * a frag to the skb. 6197 * a frag to the skb.
6163 * 6198 *
6164 * The function will then update the page offset if necessary and return 6199 * The function will then update the page offset if necessary and return
6165 * true if the buffer can be reused by the adapter. 6200 * true if the buffer can be reused by the adapter.
6166 **/ 6201 **/
6167static bool igb_add_rx_frag(struct igb_ring *rx_ring, 6202static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6168 struct igb_rx_buffer *rx_buffer, 6203 struct igb_rx_buffer *rx_buffer,
@@ -6305,8 +6340,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6305 return NULL; 6340 return NULL;
6306 } 6341 }
6307 6342
6308 /* 6343 /* we will be copying header into skb->data in
6309 * we will be copying header into skb->data in
6310 * pskb_may_pull so it is in our interest to prefetch 6344 * pskb_may_pull so it is in our interest to prefetch
6311 * it now to avoid a possible cache miss 6345 * it now to avoid a possible cache miss
6312 */ 6346 */
@@ -6354,8 +6388,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
6354 if (igb_test_staterr(rx_desc, 6388 if (igb_test_staterr(rx_desc,
6355 E1000_RXDEXT_STATERR_TCPE | 6389 E1000_RXDEXT_STATERR_TCPE |
6356 E1000_RXDEXT_STATERR_IPE)) { 6390 E1000_RXDEXT_STATERR_IPE)) {
6357 /* 6391 /* work around errata with sctp packets where the TCPE aka
6358 * work around errata with sctp packets where the TCPE aka
6359 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 6392 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6360 * packets, (aka let the stack check the crc32c) 6393 * packets, (aka let the stack check the crc32c)
6361 */ 6394 */
@@ -6386,15 +6419,15 @@ static inline void igb_rx_hash(struct igb_ring *ring,
6386} 6419}
6387 6420
6388/** 6421/**
6389 * igb_is_non_eop - process handling of non-EOP buffers 6422 * igb_is_non_eop - process handling of non-EOP buffers
6390 * @rx_ring: Rx ring being processed 6423 * @rx_ring: Rx ring being processed
6391 * @rx_desc: Rx descriptor for current buffer 6424 * @rx_desc: Rx descriptor for current buffer
6392 * @skb: current socket buffer containing buffer in progress 6425 * @skb: current socket buffer containing buffer in progress
6393 * 6426 *
6394 * This function updates next to clean. If the buffer is an EOP buffer 6427 * This function updates next to clean. If the buffer is an EOP buffer
6395 * this function exits returning false, otherwise it will place the 6428 * this function exits returning false, otherwise it will place the
6396 * sk_buff in the next buffer to be chained and return true indicating 6429 * sk_buff in the next buffer to be chained and return true indicating
6397 * that this is in fact a non-EOP buffer. 6430 * that this is in fact a non-EOP buffer.
6398 **/ 6431 **/
6399static bool igb_is_non_eop(struct igb_ring *rx_ring, 6432static bool igb_is_non_eop(struct igb_ring *rx_ring,
6400 union e1000_adv_rx_desc *rx_desc) 6433 union e1000_adv_rx_desc *rx_desc)
@@ -6414,15 +6447,15 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
6414} 6447}
6415 6448
6416/** 6449/**
6417 * igb_get_headlen - determine size of header for LRO/GRO 6450 * igb_get_headlen - determine size of header for LRO/GRO
6418 * @data: pointer to the start of the headers 6451 * @data: pointer to the start of the headers
6419 * @max_len: total length of section to find headers in 6452 * @max_len: total length of section to find headers in
6420 * 6453 *
6421 * This function is meant to determine the length of headers that will 6454 * This function is meant to determine the length of headers that will
6422 * be recognized by hardware for LRO, and GRO offloads. The main 6455 * be recognized by hardware for LRO, and GRO offloads. The main
6423 * motivation of doing this is to only perform one pull for IPv4 TCP 6456 * motivation of doing this is to only perform one pull for IPv4 TCP
6424 * packets so that we can do basic things like calculating the gso_size 6457 * packets so that we can do basic things like calculating the gso_size
6425 * based on the average data per packet. 6458 * based on the average data per packet.
6426 **/ 6459 **/
6427static unsigned int igb_get_headlen(unsigned char *data, 6460static unsigned int igb_get_headlen(unsigned char *data,
6428 unsigned int max_len) 6461 unsigned int max_len)
@@ -6473,7 +6506,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6473 return hdr.network - data; 6506 return hdr.network - data;
6474 6507
6475 /* record next protocol if header is present */ 6508 /* record next protocol if header is present */
6476 if (!hdr.ipv4->frag_off) 6509 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
6477 nexthdr = hdr.ipv4->protocol; 6510 nexthdr = hdr.ipv4->protocol;
6478 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 6511 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
6479 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 6512 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
@@ -6509,8 +6542,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6509 hdr.network += sizeof(struct udphdr); 6542 hdr.network += sizeof(struct udphdr);
6510 } 6543 }
6511 6544
6512 /* 6545 /* If everything has gone correctly hdr.network should be the
6513 * If everything has gone correctly hdr.network should be the
6514 * data section of the packet and will be the end of the header. 6546 * data section of the packet and will be the end of the header.
6515 * If not then it probably represents the end of the last recognized 6547 * If not then it probably represents the end of the last recognized
6516 * header. 6548 * header.
@@ -6522,17 +6554,17 @@ static unsigned int igb_get_headlen(unsigned char *data,
6522} 6554}
6523 6555
6524/** 6556/**
6525 * igb_pull_tail - igb specific version of skb_pull_tail 6557 * igb_pull_tail - igb specific version of skb_pull_tail
6526 * @rx_ring: rx descriptor ring packet is being transacted on 6558 * @rx_ring: rx descriptor ring packet is being transacted on
6527 * @rx_desc: pointer to the EOP Rx descriptor 6559 * @rx_desc: pointer to the EOP Rx descriptor
6528 * @skb: pointer to current skb being adjusted 6560 * @skb: pointer to current skb being adjusted
6529 * 6561 *
6530 * This function is an igb specific version of __pskb_pull_tail. The 6562 * This function is an igb specific version of __pskb_pull_tail. The
6531 * main difference between this version and the original function is that 6563 * main difference between this version and the original function is that
6532 * this function can make several assumptions about the state of things 6564 * this function can make several assumptions about the state of things
6533 * that allow for significant optimizations versus the standard function. 6565 * that allow for significant optimizations versus the standard function.
6534 * As a result we can do things like drop a frag and maintain an accurate 6566 * As a result we can do things like drop a frag and maintain an accurate
6535 * truesize for the skb. 6567 * truesize for the skb.
6536 */ 6568 */
6537static void igb_pull_tail(struct igb_ring *rx_ring, 6569static void igb_pull_tail(struct igb_ring *rx_ring,
6538 union e1000_adv_rx_desc *rx_desc, 6570 union e1000_adv_rx_desc *rx_desc,
@@ -6542,8 +6574,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6542 unsigned char *va; 6574 unsigned char *va;
6543 unsigned int pull_len; 6575 unsigned int pull_len;
6544 6576
6545 /* 6577 /* it is valid to use page_address instead of kmap since we are
6546 * it is valid to use page_address instead of kmap since we are
6547 * working with pages allocated out of the lomem pool per 6578 * working with pages allocated out of the lomem pool per
6548 * alloc_page(GFP_ATOMIC) 6579 * alloc_page(GFP_ATOMIC)
6549 */ 6580 */
@@ -6563,8 +6594,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6563 va += IGB_TS_HDR_LEN; 6594 va += IGB_TS_HDR_LEN;
6564 } 6595 }
6565 6596
6566 /* 6597 /* we need the header to contain the greater of either ETH_HLEN or
6567 * we need the header to contain the greater of either ETH_HLEN or
6568 * 60 bytes if the skb->len is less than 60 for skb_pad. 6598 * 60 bytes if the skb->len is less than 60 for skb_pad.
6569 */ 6599 */
6570 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); 6600 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
@@ -6580,24 +6610,23 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6580} 6610}
6581 6611
6582/** 6612/**
6583 * igb_cleanup_headers - Correct corrupted or empty headers 6613 * igb_cleanup_headers - Correct corrupted or empty headers
6584 * @rx_ring: rx descriptor ring packet is being transacted on 6614 * @rx_ring: rx descriptor ring packet is being transacted on
6585 * @rx_desc: pointer to the EOP Rx descriptor 6615 * @rx_desc: pointer to the EOP Rx descriptor
6586 * @skb: pointer to current skb being fixed 6616 * @skb: pointer to current skb being fixed
6587 * 6617 *
6588 * Address the case where we are pulling data in on pages only 6618 * Address the case where we are pulling data in on pages only
6589 * and as such no data is present in the skb header. 6619 * and as such no data is present in the skb header.
6590 * 6620 *
6591 * In addition if skb is not at least 60 bytes we need to pad it so that 6621 * In addition if skb is not at least 60 bytes we need to pad it so that
6592 * it is large enough to qualify as a valid Ethernet frame. 6622 * it is large enough to qualify as a valid Ethernet frame.
6593 * 6623 *
6594 * Returns true if an error was encountered and skb was freed. 6624 * Returns true if an error was encountered and skb was freed.
6595 **/ 6625 **/
6596static bool igb_cleanup_headers(struct igb_ring *rx_ring, 6626static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6597 union e1000_adv_rx_desc *rx_desc, 6627 union e1000_adv_rx_desc *rx_desc,
6598 struct sk_buff *skb) 6628 struct sk_buff *skb)
6599{ 6629{
6600
6601 if (unlikely((igb_test_staterr(rx_desc, 6630 if (unlikely((igb_test_staterr(rx_desc,
6602 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { 6631 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6603 struct net_device *netdev = rx_ring->netdev; 6632 struct net_device *netdev = rx_ring->netdev;
@@ -6624,14 +6653,14 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6624} 6653}
6625 6654
6626/** 6655/**
6627 * igb_process_skb_fields - Populate skb header fields from Rx descriptor 6656 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
6628 * @rx_ring: rx descriptor ring packet is being transacted on 6657 * @rx_ring: rx descriptor ring packet is being transacted on
6629 * @rx_desc: pointer to the EOP Rx descriptor 6658 * @rx_desc: pointer to the EOP Rx descriptor
6630 * @skb: pointer to current skb being populated 6659 * @skb: pointer to current skb being populated
6631 * 6660 *
6632 * This function checks the ring, descriptor, and packet information in 6661 * This function checks the ring, descriptor, and packet information in
6633 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 6662 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6634 * other fields within the skb. 6663 * other fields within the skb.
6635 **/ 6664 **/
6636static void igb_process_skb_fields(struct igb_ring *rx_ring, 6665static void igb_process_skb_fields(struct igb_ring *rx_ring,
6637 union e1000_adv_rx_desc *rx_desc, 6666 union e1000_adv_rx_desc *rx_desc,
@@ -6762,8 +6791,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6762 /* map page for use */ 6791 /* map page for use */
6763 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 6792 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6764 6793
6765 /* 6794 /* if mapping failed free memory back to system since
6766 * if mapping failed free memory back to system since
6767 * there isn't much point in holding memory we can't use 6795 * there isn't much point in holding memory we can't use
6768 */ 6796 */
6769 if (dma_mapping_error(rx_ring->dev, dma)) { 6797 if (dma_mapping_error(rx_ring->dev, dma)) {
@@ -6789,8 +6817,8 @@ static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
6789} 6817}
6790 6818
6791/** 6819/**
6792 * igb_alloc_rx_buffers - Replace used receive buffers; packet split 6820 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
6793 * @adapter: address of board private structure 6821 * @adapter: address of board private structure
6794 **/ 6822 **/
6795void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) 6823void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6796{ 6824{
@@ -6810,8 +6838,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6810 if (!igb_alloc_mapped_page(rx_ring, bi)) 6838 if (!igb_alloc_mapped_page(rx_ring, bi))
6811 break; 6839 break;
6812 6840
6813 /* 6841 /* Refresh the desc even if buffer_addrs didn't change
6814 * Refresh the desc even if buffer_addrs didn't change
6815 * because each write-back erases this info. 6842 * because each write-back erases this info.
6816 */ 6843 */
6817 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + 6844 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
@@ -6842,8 +6869,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6842 /* update next to alloc since we have filled the ring */ 6869 /* update next to alloc since we have filled the ring */
6843 rx_ring->next_to_alloc = i; 6870 rx_ring->next_to_alloc = i;
6844 6871
6845 /* 6872 /* Force memory writes to complete before letting h/w
6846 * Force memory writes to complete before letting h/w
6847 * know there are new descriptors to fetch. (Only 6873 * know there are new descriptors to fetch. (Only
6848 * applicable for weak-ordered memory model archs, 6874 * applicable for weak-ordered memory model archs,
6849 * such as IA-64). 6875 * such as IA-64).
@@ -7004,15 +7030,24 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
7004 mac->autoneg = 0; 7030 mac->autoneg = 0;
7005 7031
7006 /* Make sure dplx is at most 1 bit and lsb of speed is not set 7032 /* Make sure dplx is at most 1 bit and lsb of speed is not set
7007 * for the switch() below to work */ 7033 * for the switch() below to work
7034 */
7008 if ((spd & 1) || (dplx & ~1)) 7035 if ((spd & 1) || (dplx & ~1))
7009 goto err_inval; 7036 goto err_inval;
7010 7037
7011 /* Fiber NIC's only allow 1000 Gbps Full duplex */ 7038 /* Fiber NIC's only allow 1000 gbps Full duplex
7012 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && 7039 * and 100Mbps Full duplex for 100baseFx sfp
7013 spd != SPEED_1000 && 7040 */
7014 dplx != DUPLEX_FULL) 7041 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
7015 goto err_inval; 7042 switch (spd + dplx) {
7043 case SPEED_10 + DUPLEX_HALF:
7044 case SPEED_10 + DUPLEX_FULL:
7045 case SPEED_100 + DUPLEX_HALF:
7046 goto err_inval;
7047 default:
7048 break;
7049 }
7050 }
7016 7051
7017 switch (spd + dplx) { 7052 switch (spd + dplx) {
7018 case SPEED_10 + DUPLEX_HALF: 7053 case SPEED_10 + DUPLEX_HALF:
@@ -7111,7 +7146,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7111 igb_power_up_link(adapter); 7146 igb_power_up_link(adapter);
7112 7147
7113 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7148 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7114 * would have already happened in close and is redundant. */ 7149 * would have already happened in close and is redundant.
7150 */
7115 igb_release_hw_control(adapter); 7151 igb_release_hw_control(adapter);
7116 7152
7117 pci_disable_device(pdev); 7153 pci_disable_device(pdev);
@@ -7173,7 +7209,8 @@ static int igb_resume(struct device *dev)
7173 igb_reset(adapter); 7209 igb_reset(adapter);
7174 7210
7175 /* let the f/w know that the h/w is now under the control of the 7211 /* let the f/w know that the h/w is now under the control of the
7176 * driver. */ 7212 * driver.
7213 */
7177 igb_get_hw_control(adapter); 7214 igb_get_hw_control(adapter);
7178 7215
7179 wr32(E1000_WUS, ~0); 7216 wr32(E1000_WUS, ~0);
@@ -7309,8 +7346,7 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7309} 7346}
7310 7347
7311#ifdef CONFIG_NET_POLL_CONTROLLER 7348#ifdef CONFIG_NET_POLL_CONTROLLER
7312/* 7349/* Polling 'interrupt' - used by things like netconsole to send skbs
7313 * Polling 'interrupt' - used by things like netconsole to send skbs
7314 * without having to re-enable interrupts. It's not called while 7350 * without having to re-enable interrupts. It's not called while
7315 * the interrupt routine is executing. 7351 * the interrupt routine is executing.
7316 */ 7352 */
@@ -7333,13 +7369,13 @@ static void igb_netpoll(struct net_device *netdev)
7333#endif /* CONFIG_NET_POLL_CONTROLLER */ 7369#endif /* CONFIG_NET_POLL_CONTROLLER */
7334 7370
7335/** 7371/**
7336 * igb_io_error_detected - called when PCI error is detected 7372 * igb_io_error_detected - called when PCI error is detected
7337 * @pdev: Pointer to PCI device 7373 * @pdev: Pointer to PCI device
7338 * @state: The current pci connection state 7374 * @state: The current pci connection state
7339 * 7375 *
7340 * This function is called after a PCI bus error affecting 7376 * This function is called after a PCI bus error affecting
7341 * this device has been detected. 7377 * this device has been detected.
7342 */ 7378 **/
7343static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, 7379static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7344 pci_channel_state_t state) 7380 pci_channel_state_t state)
7345{ 7381{
@@ -7360,12 +7396,12 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7360} 7396}
7361 7397
7362/** 7398/**
7363 * igb_io_slot_reset - called after the pci bus has been reset. 7399 * igb_io_slot_reset - called after the pci bus has been reset.
7364 * @pdev: Pointer to PCI device 7400 * @pdev: Pointer to PCI device
7365 * 7401 *
7366 * Restart the card from scratch, as if from a cold-boot. Implementation 7402 * Restart the card from scratch, as if from a cold-boot. Implementation
7367 * resembles the first-half of the igb_resume routine. 7403 * resembles the first-half of the igb_resume routine.
7368 */ 7404 **/
7369static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) 7405static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7370{ 7406{
7371 struct net_device *netdev = pci_get_drvdata(pdev); 7407 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7393,8 +7429,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7393 7429
7394 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7430 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7395 if (err) { 7431 if (err) {
7396 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " 7432 dev_err(&pdev->dev,
7397 "failed 0x%0x\n", err); 7433 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7434 err);
7398 /* non-fatal, continue */ 7435 /* non-fatal, continue */
7399 } 7436 }
7400 7437
@@ -7402,12 +7439,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7402} 7439}
7403 7440
7404/** 7441/**
7405 * igb_io_resume - called when traffic can start flowing again. 7442 * igb_io_resume - called when traffic can start flowing again.
7406 * @pdev: Pointer to PCI device 7443 * @pdev: Pointer to PCI device
7407 * 7444 *
7408 * This callback is called when the error recovery driver tells us that 7445 * This callback is called when the error recovery driver tells us that
7409 * its OK to resume normal operation. Implementation resembles the 7446 * its OK to resume normal operation. Implementation resembles the
7410 * second-half of the igb_resume routine. 7447 * second-half of the igb_resume routine.
7411 */ 7448 */
7412static void igb_io_resume(struct pci_dev *pdev) 7449static void igb_io_resume(struct pci_dev *pdev)
7413{ 7450{
@@ -7424,12 +7461,13 @@ static void igb_io_resume(struct pci_dev *pdev)
7424 netif_device_attach(netdev); 7461 netif_device_attach(netdev);
7425 7462
7426 /* let the f/w know that the h/w is now under the control of the 7463 /* let the f/w know that the h/w is now under the control of the
7427 * driver. */ 7464 * driver.
7465 */
7428 igb_get_hw_control(adapter); 7466 igb_get_hw_control(adapter);
7429} 7467}
7430 7468
7431static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, 7469static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7432 u8 qsel) 7470 u8 qsel)
7433{ 7471{
7434 u32 rar_low, rar_high; 7472 u32 rar_low, rar_high;
7435 struct e1000_hw *hw = &adapter->hw; 7473 struct e1000_hw *hw = &adapter->hw;
@@ -7438,7 +7476,7 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7438 * from network order (big endian) to little endian 7476 * from network order (big endian) to little endian
7439 */ 7477 */
7440 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | 7478 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7441 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 7479 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7442 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 7480 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7443 7481
7444 /* Indicate to hardware the Address is Valid. */ 7482 /* Indicate to hardware the Address is Valid. */
@@ -7456,11 +7494,12 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7456} 7494}
7457 7495
7458static int igb_set_vf_mac(struct igb_adapter *adapter, 7496static int igb_set_vf_mac(struct igb_adapter *adapter,
7459 int vf, unsigned char *mac_addr) 7497 int vf, unsigned char *mac_addr)
7460{ 7498{
7461 struct e1000_hw *hw = &adapter->hw; 7499 struct e1000_hw *hw = &adapter->hw;
7462 /* VF MAC addresses start at end of receive addresses and moves 7500 /* VF MAC addresses start at end of receive addresses and moves
7463 * torwards the first, as a result a collision should not be possible */ 7501 * towards the first, as a result a collision should not be possible
7502 */
7464 int rar_entry = hw->mac.rar_entry_count - (vf + 1); 7503 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
7465 7504
7466 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 7505 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
@@ -7477,13 +7516,13 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7477 return -EINVAL; 7516 return -EINVAL;
7478 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; 7517 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7479 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); 7518 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7480 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" 7519 dev_info(&adapter->pdev->dev,
7481 " change effective."); 7520 "Reload the VF driver to make this change effective.");
7482 if (test_bit(__IGB_DOWN, &adapter->state)) { 7521 if (test_bit(__IGB_DOWN, &adapter->state)) {
7483 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," 7522 dev_warn(&adapter->pdev->dev,
7484 " but the PF device is not up.\n"); 7523 "The VF MAC address has been set, but the PF device is not up.\n");
7485 dev_warn(&adapter->pdev->dev, "Bring the PF device up before" 7524 dev_warn(&adapter->pdev->dev,
7486 " attempting to use the VF device.\n"); 7525 "Bring the PF device up before attempting to use the VF device.\n");
7487 } 7526 }
7488 return igb_set_vf_mac(adapter, vf, mac); 7527 return igb_set_vf_mac(adapter, vf, mac);
7489} 7528}
@@ -7510,19 +7549,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7510 /* Calculate the rate factor values to set */ 7549 /* Calculate the rate factor values to set */
7511 rf_int = link_speed / tx_rate; 7550 rf_int = link_speed / tx_rate;
7512 rf_dec = (link_speed - (rf_int * tx_rate)); 7551 rf_dec = (link_speed - (rf_int * tx_rate));
7513 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; 7552 rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
7553 tx_rate;
7514 7554
7515 bcnrc_val = E1000_RTTBCNRC_RS_ENA; 7555 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7516 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & 7556 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
7517 E1000_RTTBCNRC_RF_INT_MASK); 7557 E1000_RTTBCNRC_RF_INT_MASK);
7518 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); 7558 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7519 } else { 7559 } else {
7520 bcnrc_val = 0; 7560 bcnrc_val = 0;
7521 } 7561 }
7522 7562
7523 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ 7563 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7524 /* 7564 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7525 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7526 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. 7565 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7527 */ 7566 */
7528 wr32(E1000_RTTBCNRM, 0x14); 7567 wr32(E1000_RTTBCNRM, 0x14);
@@ -7544,8 +7583,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7544 reset_rate = true; 7583 reset_rate = true;
7545 adapter->vf_rate_link_speed = 0; 7584 adapter->vf_rate_link_speed = 0;
7546 dev_info(&adapter->pdev->dev, 7585 dev_info(&adapter->pdev->dev,
7547 "Link speed has been changed. VF Transmit " 7586 "Link speed has been changed. VF Transmit rate is disabled\n");
7548 "rate is disabled\n");
7549 } 7587 }
7550 7588
7551 for (i = 0; i < adapter->vfs_allocated_count; i++) { 7589 for (i = 0; i < adapter->vfs_allocated_count; i++) {
@@ -7553,8 +7591,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7553 adapter->vf_data[i].tx_rate = 0; 7591 adapter->vf_data[i].tx_rate = 0;
7554 7592
7555 igb_set_vf_rate_limit(&adapter->hw, i, 7593 igb_set_vf_rate_limit(&adapter->hw, i,
7556 adapter->vf_data[i].tx_rate, 7594 adapter->vf_data[i].tx_rate,
7557 actual_link_speed); 7595 actual_link_speed);
7558 } 7596 }
7559} 7597}
7560 7598
@@ -7580,6 +7618,33 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
7580 return 0; 7618 return 0;
7581} 7619}
7582 7620
7621static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
7622 bool setting)
7623{
7624 struct igb_adapter *adapter = netdev_priv(netdev);
7625 struct e1000_hw *hw = &adapter->hw;
7626 u32 reg_val, reg_offset;
7627
7628 if (!adapter->vfs_allocated_count)
7629 return -EOPNOTSUPP;
7630
7631 if (vf >= adapter->vfs_allocated_count)
7632 return -EINVAL;
7633
7634 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
7635 reg_val = rd32(reg_offset);
7636 if (setting)
7637 reg_val |= ((1 << vf) |
7638 (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7639 else
7640 reg_val &= ~((1 << vf) |
7641 (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
7642 wr32(reg_offset, reg_val);
7643
7644 adapter->vf_data[vf].spoofchk_enabled = setting;
7645 return E1000_SUCCESS;
7646}
7647
7583static int igb_ndo_get_vf_config(struct net_device *netdev, 7648static int igb_ndo_get_vf_config(struct net_device *netdev,
7584 int vf, struct ifla_vf_info *ivi) 7649 int vf, struct ifla_vf_info *ivi)
7585{ 7650{
@@ -7591,6 +7656,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
7591 ivi->tx_rate = adapter->vf_data[vf].tx_rate; 7656 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
7592 ivi->vlan = adapter->vf_data[vf].pf_vlan; 7657 ivi->vlan = adapter->vf_data[vf].pf_vlan;
7593 ivi->qos = adapter->vf_data[vf].pf_qos; 7658 ivi->qos = adapter->vf_data[vf].pf_qos;
7659 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
7594 return 0; 7660 return 0;
7595} 7661}
7596 7662
@@ -7603,6 +7669,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7603 case e1000_82575: 7669 case e1000_82575:
7604 case e1000_i210: 7670 case e1000_i210:
7605 case e1000_i211: 7671 case e1000_i211:
7672 case e1000_i354:
7606 default: 7673 default:
7607 /* replication is not supported for 82575 */ 7674 /* replication is not supported for 82575 */
7608 return; 7675 return;
@@ -7625,7 +7692,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7625 igb_vmdq_set_loopback_pf(hw, true); 7692 igb_vmdq_set_loopback_pf(hw, true);
7626 igb_vmdq_set_replication_pf(hw, true); 7693 igb_vmdq_set_replication_pf(hw, true);
7627 igb_vmdq_set_anti_spoofing_pf(hw, true, 7694 igb_vmdq_set_anti_spoofing_pf(hw, true,
7628 adapter->vfs_allocated_count); 7695 adapter->vfs_allocated_count);
7629 } else { 7696 } else {
7630 igb_vmdq_set_loopback_pf(hw, false); 7697 igb_vmdq_set_loopback_pf(hw, false);
7631 igb_vmdq_set_replication_pf(hw, false); 7698 igb_vmdq_set_replication_pf(hw, false);
@@ -7645,8 +7712,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7645 /* force threshold to 0. */ 7712 /* force threshold to 0. */
7646 wr32(E1000_DMCTXTH, 0); 7713 wr32(E1000_DMCTXTH, 0);
7647 7714
7648 /* 7715 /* DMA Coalescing high water mark needs to be greater
7649 * DMA Coalescing high water mark needs to be greater
7650 * than the Rx threshold. Set hwm to PBA - max frame 7716 * than the Rx threshold. Set hwm to PBA - max frame
7651 * size in 16B units, capping it at PBA - 6KB. 7717 * size in 16B units, capping it at PBA - 6KB.
7652 */ 7718 */
@@ -7659,8 +7725,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7659 & E1000_FCRTC_RTH_COAL_MASK); 7725 & E1000_FCRTC_RTH_COAL_MASK);
7660 wr32(E1000_FCRTC, reg); 7726 wr32(E1000_FCRTC, reg);
7661 7727
7662 /* 7728 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
7663 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7664 * frame size, capping it at PBA - 10KB. 7729 * frame size, capping it at PBA - 10KB.
7665 */ 7730 */
7666 dmac_thr = pba - adapter->max_frame_size / 512; 7731 dmac_thr = pba - adapter->max_frame_size / 512;
@@ -7678,11 +7743,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7678 reg |= (1000 >> 5); 7743 reg |= (1000 >> 5);
7679 7744
7680 /* Disable BMC-to-OS Watchdog Enable */ 7745 /* Disable BMC-to-OS Watchdog Enable */
7681 reg &= ~E1000_DMACR_DC_BMC2OSW_EN; 7746 if (hw->mac.type != e1000_i354)
7747 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7748
7682 wr32(E1000_DMACR, reg); 7749 wr32(E1000_DMACR, reg);
7683 7750
7684 /* 7751 /* no lower threshold to disable
7685 * no lower threshold to disable
7686 * coalescing(smart fifb)-UTRESH=0 7752 * coalescing(smart fifb)-UTRESH=0
7687 */ 7753 */
7688 wr32(E1000_DMCRTRH, 0); 7754 wr32(E1000_DMCRTRH, 0);
@@ -7691,15 +7757,13 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7691 7757
7692 wr32(E1000_DMCTLX, reg); 7758 wr32(E1000_DMCTLX, reg);
7693 7759
7694 /* 7760 /* free space in tx packet buffer to wake from
7695 * free space in tx packet buffer to wake from
7696 * DMA coal 7761 * DMA coal
7697 */ 7762 */
7698 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - 7763 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7699 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); 7764 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7700 7765
7701 /* 7766 /* make low power state decision controlled
7702 * make low power state decision controlled
7703 * by DMA coal 7767 * by DMA coal
7704 */ 7768 */
7705 reg = rd32(E1000_PCIEMISC); 7769 reg = rd32(E1000_PCIEMISC);
@@ -7713,7 +7777,8 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7713 } 7777 }
7714} 7778}
7715 7779
7716/* igb_read_i2c_byte - Reads 8 bit word over I2C 7780/**
7781 * igb_read_i2c_byte - Reads 8 bit word over I2C
7717 * @hw: pointer to hardware structure 7782 * @hw: pointer to hardware structure
7718 * @byte_offset: byte offset to read 7783 * @byte_offset: byte offset to read
7719 * @dev_addr: device address 7784 * @dev_addr: device address
@@ -7721,9 +7786,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7721 * 7786 *
7722 * Performs byte read operation over I2C interface at 7787 * Performs byte read operation over I2C interface at
7723 * a specified device address. 7788 * a specified device address.
7724 */ 7789 **/
7725s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 7790s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7726 u8 dev_addr, u8 *data) 7791 u8 dev_addr, u8 *data)
7727{ 7792{
7728 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7793 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7729 struct i2c_client *this_client = adapter->i2c_client; 7794 struct i2c_client *this_client = adapter->i2c_client;
@@ -7750,7 +7815,8 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7750 } 7815 }
7751} 7816}
7752 7817
7753/* igb_write_i2c_byte - Writes 8 bit word over I2C 7818/**
7819 * igb_write_i2c_byte - Writes 8 bit word over I2C
7754 * @hw: pointer to hardware structure 7820 * @hw: pointer to hardware structure
7755 * @byte_offset: byte offset to write 7821 * @byte_offset: byte offset to write
7756 * @dev_addr: device address 7822 * @dev_addr: device address
@@ -7758,9 +7824,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7758 * 7824 *
7759 * Performs byte write operation over I2C interface at 7825 * Performs byte write operation over I2C interface at
7760 * a specified device address. 7826 * a specified device address.
7761 */ 7827 **/
7762s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 7828s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7763 u8 dev_addr, u8 data) 7829 u8 dev_addr, u8 data)
7764{ 7830{
7765 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7831 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7766 struct i2c_client *this_client = adapter->i2c_client; 7832 struct i2c_client *this_client = adapter->i2c_client;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0a237507ee85..7e8c477b0ab9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,5 +1,4 @@
1/* 1/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
2 * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
3 * 2 *
4 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> 3 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
5 * 4 *
@@ -27,8 +26,7 @@
27#define INCVALUE_MASK 0x7fffffff 26#define INCVALUE_MASK 0x7fffffff
28#define ISGN 0x80000000 27#define ISGN 0x80000000
29 28
30/* 29/* The 82580 timesync updates the system timer every 8ns by 8ns,
31 * The 82580 timesync updates the system timer every 8ns by 8ns,
32 * and this update value cannot be reprogrammed. 30 * and this update value cannot be reprogrammed.
33 * 31 *
34 * Neither the 82576 nor the 82580 offer registers wide enough to hold 32 * Neither the 82576 nor the 82580 offer registers wide enough to hold
@@ -77,10 +75,7 @@
77#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
78#define IGB_NBITS_82580 40 76#define IGB_NBITS_82580 40
79 77
80/* 78/* SYSTIM read access for the 82576 */
81 * SYSTIM read access for the 82576
82 */
83
84static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) 79static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
85{ 80{
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 81 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -97,10 +92,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
97 return val; 92 return val;
98} 93}
99 94
100/* 95/* SYSTIM read access for the 82580 */
101 * SYSTIM read access for the 82580
102 */
103
104static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) 96static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
105{ 97{
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); 98 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -108,8 +100,7 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
108 u64 val; 100 u64 val;
109 u32 lo, hi, jk; 101 u32 lo, hi, jk;
110 102
111 /* 103 /* The timestamp latches on lowest register read. For the 82580
112 * The timestamp latches on lowest register read. For the 82580
113 * the lowest register is SYSTIMR instead of SYSTIML. However we only 104 * the lowest register is SYSTIMR instead of SYSTIML. However we only
114 * need to provide nanosecond resolution, so we just ignore it. 105 * need to provide nanosecond resolution, so we just ignore it.
115 */ 106 */
@@ -123,17 +114,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
123 return val; 114 return val;
124} 115}
125 116
126/* 117/* SYSTIM read access for I210/I211 */
127 * SYSTIM read access for I210/I211
128 */
129
130static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) 118static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
131{ 119{
132 struct e1000_hw *hw = &adapter->hw; 120 struct e1000_hw *hw = &adapter->hw;
133 u32 sec, nsec, jk; 121 u32 sec, nsec, jk;
134 122
135 /* 123 /* The timestamp latches on lowest register read. For I210/I211, the
136 * The timestamp latches on lowest register read. For I210/I211, the
137 * lowest register is SYSTIMR. Since we only need to provide nanosecond 124 * lowest register is SYSTIMR. Since we only need to provide nanosecond
138 * resolution, we can ignore it. 125 * resolution, we can ignore it.
139 */ 126 */
@@ -150,8 +137,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,
150{ 137{
151 struct e1000_hw *hw = &adapter->hw; 138 struct e1000_hw *hw = &adapter->hw;
152 139
153 /* 140 /* Writing the SYSTIMR register is not necessary as it only provides
154 * Writing the SYSTIMR register is not necessary as it only provides
155 * sub-nanosecond resolution. 141 * sub-nanosecond resolution.
156 */ 142 */
157 wr32(E1000_SYSTIML, ts->tv_nsec); 143 wr32(E1000_SYSTIML, ts->tv_nsec);
@@ -185,6 +171,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
185 switch (adapter->hw.mac.type) { 171 switch (adapter->hw.mac.type) {
186 case e1000_82576: 172 case e1000_82576:
187 case e1000_82580: 173 case e1000_82580:
174 case e1000_i354:
188 case e1000_i350: 175 case e1000_i350:
189 spin_lock_irqsave(&adapter->tmreg_lock, flags); 176 spin_lock_irqsave(&adapter->tmreg_lock, flags);
190 177
@@ -207,10 +194,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
207 } 194 }
208} 195}
209 196
210/* 197/* PTP clock operations */
211 * PTP clock operations
212 */
213
214static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) 198static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
215{ 199{
216 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, 200 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
@@ -387,7 +371,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
387 * 371 *
388 * This work function polls the TSYNCTXCTL valid bit to determine when a 372 * This work function polls the TSYNCTXCTL valid bit to determine when a
389 * timestamp has been taken for the current stored skb. 373 * timestamp has been taken for the current stored skb.
390 */ 374 **/
391void igb_ptp_tx_work(struct work_struct *work) 375void igb_ptp_tx_work(struct work_struct *work)
392{ 376{
393 struct igb_adapter *adapter = container_of(work, struct igb_adapter, 377 struct igb_adapter *adapter = container_of(work, struct igb_adapter,
@@ -437,7 +421,7 @@ static void igb_ptp_overflow_check(struct work_struct *work)
437 * dropped an Rx packet that was timestamped when the ring is full. The 421 * dropped an Rx packet that was timestamped when the ring is full. The
438 * particular error is rare but leaves the device in a state unable to timestamp 422 * particular error is rare but leaves the device in a state unable to timestamp
439 * any future packets. 423 * any future packets.
440 */ 424 **/
441void igb_ptp_rx_hang(struct igb_adapter *adapter) 425void igb_ptp_rx_hang(struct igb_adapter *adapter)
442{ 426{
443 struct e1000_hw *hw = &adapter->hw; 427 struct e1000_hw *hw = &adapter->hw;
@@ -481,7 +465,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
481 * If we were asked to do hardware stamping and such a time stamp is 465 * If we were asked to do hardware stamping and such a time stamp is
482 * available, then it must have been for this skb here because we only 466 * available, then it must have been for this skb here because we only
483 * allow only one such packet into the queue. 467 * allow only one such packet into the queue.
484 */ 468 **/
485void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) 469void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
486{ 470{
487 struct e1000_hw *hw = &adapter->hw; 471 struct e1000_hw *hw = &adapter->hw;
@@ -506,15 +490,14 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
506 * This function is meant to retrieve a timestamp from the first buffer of an 490 * This function is meant to retrieve a timestamp from the first buffer of an
507 * incoming frame. The value is stored in little endian format starting on 491 * incoming frame. The value is stored in little endian format starting on
508 * byte 8. 492 * byte 8.
509 */ 493 **/
510void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, 494void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
511 unsigned char *va, 495 unsigned char *va,
512 struct sk_buff *skb) 496 struct sk_buff *skb)
513{ 497{
514 __le64 *regval = (__le64 *)va; 498 __le64 *regval = (__le64 *)va;
515 499
516 /* 500 /* The timestamp is recorded in little endian format.
517 * The timestamp is recorded in little endian format.
518 * DWORD: 0 1 2 3 501 * DWORD: 0 1 2 3
519 * Field: Reserved Reserved SYSTIML SYSTIMH 502 * Field: Reserved Reserved SYSTIML SYSTIMH
520 */ 503 */
@@ -529,7 +512,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
529 * 512 *
530 * This function is meant to retrieve a timestamp from the internal registers 513 * This function is meant to retrieve a timestamp from the internal registers
531 * of the adapter and store it in the skb. 514 * of the adapter and store it in the skb.
532 */ 515 **/
533void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, 516void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
534 struct sk_buff *skb) 517 struct sk_buff *skb)
535{ 518{
@@ -537,8 +520,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
537 struct e1000_hw *hw = &adapter->hw; 520 struct e1000_hw *hw = &adapter->hw;
538 u64 regval; 521 u64 regval;
539 522
540 /* 523 /* If this bit is set, then the RX registers contain the time stamp. No
541 * If this bit is set, then the RX registers contain the time stamp. No
542 * other packet will be time stamped until we read these registers, so 524 * other packet will be time stamped until we read these registers, so
543 * read the registers to make them available again. Because only one 525 * read the registers to make them available again. Because only one
544 * packet can be time stamped at a time, we know that the register 526 * packet can be time stamped at a time, we know that the register
@@ -574,7 +556,6 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
574 * type has to be specified. Matching the kind of event packet is 556 * type has to be specified. Matching the kind of event packet is
575 * not supported, with the exception of "all V2 events regardless of 557 * not supported, with the exception of "all V2 events regardless of
576 * level 2 or 4". 558 * level 2 or 4".
577 *
578 **/ 559 **/
579int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 560int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
580 struct ifreq *ifr, int cmd) 561 struct ifreq *ifr, int cmd)
@@ -655,10 +636,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
655 return 0; 636 return 0;
656 } 637 }
657 638
658 /* 639 /* Per-packet timestamping only works if all packets are
659 * Per-packet timestamping only works if all packets are
660 * timestamped, so enable timestamping in all packets as 640 * timestamped, so enable timestamping in all packets as
661 * long as one rx filter was configured. 641 * long as one Rx filter was configured.
662 */ 642 */
663 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { 643 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
664 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 644 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
@@ -756,6 +736,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
756 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); 736 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
757 break; 737 break;
758 case e1000_82580: 738 case e1000_82580:
739 case e1000_i354:
759 case e1000_i350: 740 case e1000_i350:
760 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 741 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
761 adapter->ptp_caps.owner = THIS_MODULE; 742 adapter->ptp_caps.owner = THIS_MODULE;
@@ -844,6 +825,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
844 switch (adapter->hw.mac.type) { 825 switch (adapter->hw.mac.type) {
845 case e1000_82576: 826 case e1000_82576:
846 case e1000_82580: 827 case e1000_82580:
828 case e1000_i354:
847 case e1000_i350: 829 case e1000_i350:
848 cancel_delayed_work_sync(&adapter->ptp_overflow_work); 830 cancel_delayed_work_sync(&adapter->ptp_overflow_work);
849 break; 831 break;
@@ -888,6 +870,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
888 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); 870 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
889 break; 871 break;
890 case e1000_82580: 872 case e1000_82580:
873 case e1000_i354:
891 case e1000_i350: 874 case e1000_i350:
892 case e1000_i210: 875 case e1000_i210:
893 case e1000_i211: 876 case e1000_i211:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8e10cff7a89..ca932387a80f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -740,6 +740,11 @@ extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
740extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); 740extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
741extern void ixgbe_dbg_init(void); 741extern void ixgbe_dbg_init(void);
742extern void ixgbe_dbg_exit(void); 742extern void ixgbe_dbg_exit(void);
743#else
744static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
745static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
746static inline void ixgbe_dbg_init(void) {}
747static inline void ixgbe_dbg_exit(void) {}
743#endif /* CONFIG_DEBUG_FS */ 748#endif /* CONFIG_DEBUG_FS */
744static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 749static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
745{ 750{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6bd1dd13682c..c022f9c417a6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5123,14 +5123,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5123 5123
5124 netif_device_detach(netdev); 5124 netif_device_detach(netdev);
5125 5125
5126 rtnl_lock();
5126 if (netif_running(netdev)) { 5127 if (netif_running(netdev)) {
5127 rtnl_lock();
5128 ixgbe_down(adapter); 5128 ixgbe_down(adapter);
5129 ixgbe_free_irq(adapter); 5129 ixgbe_free_irq(adapter);
5130 ixgbe_free_all_tx_resources(adapter); 5130 ixgbe_free_all_tx_resources(adapter);
5131 ixgbe_free_all_rx_resources(adapter); 5131 ixgbe_free_all_rx_resources(adapter);
5132 rtnl_unlock();
5133 } 5132 }
5133 rtnl_unlock();
5134 5134
5135 ixgbe_clear_interrupt_scheme(adapter); 5135 ixgbe_clear_interrupt_scheme(adapter);
5136 5136
@@ -7206,6 +7206,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7206 case IXGBE_SUBDEV_ID_82599_SFP: 7206 case IXGBE_SUBDEV_ID_82599_SFP:
7207 case IXGBE_SUBDEV_ID_82599_RNDC: 7207 case IXGBE_SUBDEV_ID_82599_RNDC:
7208 case IXGBE_SUBDEV_ID_82599_ECNA_DP: 7208 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7209 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
7209 is_wol_supported = 1; 7210 is_wol_supported = 1;
7210 break; 7211 break;
7211 } 7212 }
@@ -7625,9 +7626,7 @@ skip_sriov:
7625 e_err(probe, "failed to allocate sysfs resources\n"); 7626 e_err(probe, "failed to allocate sysfs resources\n");
7626#endif /* CONFIG_IXGBE_HWMON */ 7627#endif /* CONFIG_IXGBE_HWMON */
7627 7628
7628#ifdef CONFIG_DEBUG_FS
7629 ixgbe_dbg_adapter_init(adapter); 7629 ixgbe_dbg_adapter_init(adapter);
7630#endif /* CONFIG_DEBUG_FS */
7631 7630
7632 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 7631 /* Need link setup for MNG FW, else wait for IXGBE_UP */
7633 if (hw->mng_fw_enabled && hw->mac.ops.setup_link) 7632 if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
@@ -7669,9 +7668,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
7669 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 7668 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7670 struct net_device *netdev = adapter->netdev; 7669 struct net_device *netdev = adapter->netdev;
7671 7670
7672#ifdef CONFIG_DEBUG_FS
7673 ixgbe_dbg_adapter_exit(adapter); 7671 ixgbe_dbg_adapter_exit(adapter);
7674#endif /*CONFIG_DEBUG_FS */
7675 7672
7676 set_bit(__IXGBE_DOWN, &adapter->state); 7673 set_bit(__IXGBE_DOWN, &adapter->state);
7677 cancel_work_sync(&adapter->service_task); 7674 cancel_work_sync(&adapter->service_task);
@@ -7934,15 +7931,11 @@ static int __init ixgbe_init_module(void)
7934 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); 7931 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7935 pr_info("%s\n", ixgbe_copyright); 7932 pr_info("%s\n", ixgbe_copyright);
7936 7933
7937#ifdef CONFIG_DEBUG_FS
7938 ixgbe_dbg_init(); 7934 ixgbe_dbg_init();
7939#endif /* CONFIG_DEBUG_FS */
7940 7935
7941 ret = pci_register_driver(&ixgbe_driver); 7936 ret = pci_register_driver(&ixgbe_driver);
7942 if (ret) { 7937 if (ret) {
7943#ifdef CONFIG_DEBUG_FS
7944 ixgbe_dbg_exit(); 7938 ixgbe_dbg_exit();
7945#endif /* CONFIG_DEBUG_FS */
7946 return ret; 7939 return ret;
7947 } 7940 }
7948 7941
@@ -7968,9 +7961,7 @@ static void __exit ixgbe_exit_module(void)
7968#endif 7961#endif
7969 pci_unregister_driver(&ixgbe_driver); 7962 pci_unregister_driver(&ixgbe_driver);
7970 7963
7971#ifdef CONFIG_DEBUG_FS
7972 ixgbe_dbg_exit(); 7964 ixgbe_dbg_exit();
7973#endif /* CONFIG_DEBUG_FS */
7974 7965
7975 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 7966 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7976} 7967}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 155a793705be..402f1a2ada3e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -57,6 +57,7 @@
57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
59#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 59#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
60#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 61#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
61#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D 62#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
62#define IXGBE_DEV_ID_82599EN_SFP 0x1557 63#define IXGBE_DEV_ID_82599EN_SFP 0x1557