diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2013-02-23 02:29:56 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2013-04-18 19:40:25 -0400 |
commit | b980ac18c95f3251038da7a3826370aff05a7434 (patch) | |
tree | 938b3acb07b97963db1006e61933a02b77ac3dca /drivers | |
parent | c8268921d443bd5c0c9b8fd7193d00533638ec03 (diff) |
igb: Fix code comments and whitespace
Aligns the multi-line code comments with the desired style for the
networking tree. Also cleaned up whitespace issues found during the
cleanup of code comments (i.e. remove unnecessary blank lines,
use tabs where possible, properly wrap lines and keep strings on a
single line)
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_82575.c | 88 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_defines.h | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_hw.h | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_i210.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mac.c | 105 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mac.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mbx.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_mbx.h | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_nvm.c | 25 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_phy.c | 258 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_regs.h | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 124 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ethtool.c | 137 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_hwmon.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 1139 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ptp.c | 57 |
16 files changed, 1010 insertions, 1183 deletions
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 3867ba192486..9d83058f2075 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c | |||
@@ -451,8 +451,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
451 | } | 451 | } |
452 | 452 | ||
453 | /* Set media type */ | 453 | /* Set media type */ |
454 | /* | 454 | /* The 82575 uses bits 22:23 for link mode. The mode can be changed |
455 | * The 82575 uses bits 22:23 for link mode. The mode can be changed | ||
456 | * based on the EEPROM. We cannot rely upon device ID. There | 455 | * based on the EEPROM. We cannot rely upon device ID. There |
457 | * is no distinguishable difference between fiber and internal | 456 | * is no distinguishable difference between fiber and internal |
458 | * SerDes mode on the 82575. There can be an external PHY attached | 457 | * SerDes mode on the 82575. There can be an external PHY attached |
@@ -621,8 +620,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) | |||
621 | u32 ctrl_ext; | 620 | u32 ctrl_ext; |
622 | u32 mdic; | 621 | u32 mdic; |
623 | 622 | ||
624 | /* | 623 | /* For SGMII PHYs, we try the list of possible addresses until |
625 | * For SGMII PHYs, we try the list of possible addresses until | ||
626 | * we find one that works. For non-SGMII PHYs | 624 | * we find one that works. For non-SGMII PHYs |
627 | * (e.g. integrated copper PHYs), an address of 1 should | 625 | * (e.g. integrated copper PHYs), an address of 1 should |
628 | * work. The result of this function should mean phy->phy_addr | 626 | * work. The result of this function should mean phy->phy_addr |
@@ -665,8 +663,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) | |||
665 | wrfl(); | 663 | wrfl(); |
666 | msleep(300); | 664 | msleep(300); |
667 | 665 | ||
668 | /* | 666 | /* The address field in the I2CCMD register is 3 bits and 0 is invalid. |
669 | * The address field in the I2CCMD register is 3 bits and 0 is invalid. | ||
670 | * Therefore, we need to test 1-7 | 667 | * Therefore, we need to test 1-7 |
671 | */ | 668 | */ |
672 | for (phy->addr = 1; phy->addr < 8; phy->addr++) { | 669 | for (phy->addr = 1; phy->addr < 8; phy->addr++) { |
@@ -674,8 +671,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) | |||
674 | if (ret_val == 0) { | 671 | if (ret_val == 0) { |
675 | hw_dbg("Vendor ID 0x%08X read at address %u\n", | 672 | hw_dbg("Vendor ID 0x%08X read at address %u\n", |
676 | phy_id, phy->addr); | 673 | phy_id, phy->addr); |
677 | /* | 674 | /* At the time of this writing, The M88 part is |
678 | * At the time of this writing, The M88 part is | ||
679 | * the only supported SGMII PHY product. | 675 | * the only supported SGMII PHY product. |
680 | */ | 676 | */ |
681 | if (phy_id == M88_VENDOR) | 677 | if (phy_id == M88_VENDOR) |
@@ -711,15 +707,13 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) | |||
711 | { | 707 | { |
712 | s32 ret_val; | 708 | s32 ret_val; |
713 | 709 | ||
714 | /* | 710 | /* This isn't a true "hard" reset, but is the only reset |
715 | * This isn't a true "hard" reset, but is the only reset | ||
716 | * available to us at this time. | 711 | * available to us at this time. |
717 | */ | 712 | */ |
718 | 713 | ||
719 | hw_dbg("Soft resetting SGMII attached PHY...\n"); | 714 | hw_dbg("Soft resetting SGMII attached PHY...\n"); |
720 | 715 | ||
721 | /* | 716 | /* SFP documentation requires the following to configure the SPF module |
722 | * SFP documentation requires the following to configure the SPF module | ||
723 | * to work on SGMII. No further documentation is given. | 717 | * to work on SGMII. No further documentation is given. |
724 | */ | 718 | */ |
725 | ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); | 719 | ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); |
@@ -774,8 +768,7 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) | |||
774 | data &= ~IGP02E1000_PM_D0_LPLU; | 768 | data &= ~IGP02E1000_PM_D0_LPLU; |
775 | ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 769 | ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
776 | data); | 770 | data); |
777 | /* | 771 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used |
778 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
779 | * during Dx states where the power conservation is most | 772 | * during Dx states where the power conservation is most |
780 | * important. During driver activity we should enable | 773 | * important. During driver activity we should enable |
781 | * SmartSpeed, so performance is maintained. | 774 | * SmartSpeed, so performance is maintained. |
@@ -838,8 +831,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) | |||
838 | } else { | 831 | } else { |
839 | data &= ~E1000_82580_PM_D0_LPLU; | 832 | data &= ~E1000_82580_PM_D0_LPLU; |
840 | 833 | ||
841 | /* | 834 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used |
842 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
843 | * during Dx states where the power conservation is most | 835 | * during Dx states where the power conservation is most |
844 | * important. During driver activity we should enable | 836 | * important. During driver activity we should enable |
845 | * SmartSpeed, so performance is maintained. | 837 | * SmartSpeed, so performance is maintained. |
@@ -877,8 +869,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) | |||
877 | 869 | ||
878 | if (!active) { | 870 | if (!active) { |
879 | data &= ~E1000_82580_PM_D3_LPLU; | 871 | data &= ~E1000_82580_PM_D3_LPLU; |
880 | /* | 872 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used |
881 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
882 | * during Dx states where the power conservation is most | 873 | * during Dx states where the power conservation is most |
883 | * important. During driver activity we should enable | 874 | * important. During driver activity we should enable |
884 | * SmartSpeed, so performance is maintained. | 875 | * SmartSpeed, so performance is maintained. |
@@ -964,8 +955,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) | |||
964 | if (!(swfw_sync & (fwmask | swmask))) | 955 | if (!(swfw_sync & (fwmask | swmask))) |
965 | break; | 956 | break; |
966 | 957 | ||
967 | /* | 958 | /* Firmware currently using resource (fwmask) |
968 | * Firmware currently using resource (fwmask) | ||
969 | * or other software thread using resource (swmask) | 959 | * or other software thread using resource (swmask) |
970 | */ | 960 | */ |
971 | igb_put_hw_semaphore(hw); | 961 | igb_put_hw_semaphore(hw); |
@@ -1065,8 +1055,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) | |||
1065 | if (hw->phy.media_type != e1000_media_type_copper) { | 1055 | if (hw->phy.media_type != e1000_media_type_copper) { |
1066 | ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, | 1056 | ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, |
1067 | &duplex); | 1057 | &duplex); |
1068 | /* | 1058 | /* Use this flag to determine if link needs to be checked or |
1069 | * Use this flag to determine if link needs to be checked or | ||
1070 | * not. If we have link clear the flag so that we do not | 1059 | * not. If we have link clear the flag so that we do not |
1071 | * continue to check for link. | 1060 | * continue to check for link. |
1072 | */ | 1061 | */ |
@@ -1135,15 +1124,13 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, | |||
1135 | *speed = 0; | 1124 | *speed = 0; |
1136 | *duplex = 0; | 1125 | *duplex = 0; |
1137 | 1126 | ||
1138 | /* | 1127 | /* Read the PCS Status register for link state. For non-copper mode, |
1139 | * Read the PCS Status register for link state. For non-copper mode, | ||
1140 | * the status register is not accurate. The PCS status register is | 1128 | * the status register is not accurate. The PCS status register is |
1141 | * used instead. | 1129 | * used instead. |
1142 | */ | 1130 | */ |
1143 | pcs = rd32(E1000_PCS_LSTAT); | 1131 | pcs = rd32(E1000_PCS_LSTAT); |
1144 | 1132 | ||
1145 | /* | 1133 | /* The link up bit determines when link is up on autoneg. The sync ok |
1146 | * The link up bit determines when link is up on autoneg. The sync ok | ||
1147 | * gets set once both sides sync up and agree upon link. Stable link | 1134 | * gets set once both sides sync up and agree upon link. Stable link |
1148 | * can be determined by checking for both link up and link sync ok | 1135 | * can be determined by checking for both link up and link sync ok |
1149 | */ | 1136 | */ |
@@ -1214,8 +1201,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) | |||
1214 | u32 ctrl, icr; | 1201 | u32 ctrl, icr; |
1215 | s32 ret_val; | 1202 | s32 ret_val; |
1216 | 1203 | ||
1217 | /* | 1204 | /* Prevent the PCI-E bus from sticking if there is no TLP connection |
1218 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
1219 | * on the last TLP read/write transaction when MAC is reset. | 1205 | * on the last TLP read/write transaction when MAC is reset. |
1220 | */ | 1206 | */ |
1221 | ret_val = igb_disable_pcie_master(hw); | 1207 | ret_val = igb_disable_pcie_master(hw); |
@@ -1244,8 +1230,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) | |||
1244 | 1230 | ||
1245 | ret_val = igb_get_auto_rd_done(hw); | 1231 | ret_val = igb_get_auto_rd_done(hw); |
1246 | if (ret_val) { | 1232 | if (ret_val) { |
1247 | /* | 1233 | /* When auto config read does not complete, do not |
1248 | * When auto config read does not complete, do not | ||
1249 | * return with an error. This can happen in situations | 1234 | * return with an error. This can happen in situations |
1250 | * where there is no eeprom and prevents getting link. | 1235 | * where there is no eeprom and prevents getting link. |
1251 | */ | 1236 | */ |
@@ -1308,8 +1293,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) | |||
1308 | /* Setup link and flow control */ | 1293 | /* Setup link and flow control */ |
1309 | ret_val = igb_setup_link(hw); | 1294 | ret_val = igb_setup_link(hw); |
1310 | 1295 | ||
1311 | /* | 1296 | /* Clear all of the statistics registers (clear on read). It is |
1312 | * Clear all of the statistics registers (clear on read). It is | ||
1313 | * important that we do this after we have tried to establish link | 1297 | * important that we do this after we have tried to establish link |
1314 | * because the symbol error count will increment wildly if there | 1298 | * because the symbol error count will increment wildly if there |
1315 | * is no link. | 1299 | * is no link. |
@@ -1412,8 +1396,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1412 | return ret_val; | 1396 | return ret_val; |
1413 | 1397 | ||
1414 | 1398 | ||
1415 | /* | 1399 | /* On the 82575, SerDes loopback mode persists until it is |
1416 | * On the 82575, SerDes loopback mode persists until it is | ||
1417 | * explicitly turned off or a power cycle is performed. A read to | 1400 | * explicitly turned off or a power cycle is performed. A read to |
1418 | * the register does not indicate its status. Therefore, we ensure | 1401 | * the register does not indicate its status. Therefore, we ensure |
1419 | * loopback mode is disabled during initialization. | 1402 | * loopback mode is disabled during initialization. |
@@ -1467,8 +1450,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1467 | pcs_autoneg = false; | 1450 | pcs_autoneg = false; |
1468 | } | 1451 | } |
1469 | 1452 | ||
1470 | /* | 1453 | /* non-SGMII modes only supports a speed of 1000/Full for the |
1471 | * non-SGMII modes only supports a speed of 1000/Full for the | ||
1472 | * link so it is best to just force the MAC and let the pcs | 1454 | * link so it is best to just force the MAC and let the pcs |
1473 | * link either autoneg or be forced to 1000/Full | 1455 | * link either autoneg or be forced to 1000/Full |
1474 | */ | 1456 | */ |
@@ -1482,8 +1464,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) | |||
1482 | 1464 | ||
1483 | wr32(E1000_CTRL, ctrl_reg); | 1465 | wr32(E1000_CTRL, ctrl_reg); |
1484 | 1466 | ||
1485 | /* | 1467 | /* New SerDes mode allows for forcing speed or autonegotiating speed |
1486 | * New SerDes mode allows for forcing speed or autonegotiating speed | ||
1487 | * at 1gb. Autoneg should be default set by most drivers. This is the | 1468 | * at 1gb. Autoneg should be default set by most drivers. This is the |
1488 | * mode that will be compatible with older link partners and switches. | 1469 | * mode that will be compatible with older link partners and switches. |
1489 | * However, both are supported by the hardware and some drivers/tools. | 1470 | * However, both are supported by the hardware and some drivers/tools. |
@@ -1593,8 +1574,7 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) | |||
1593 | { | 1574 | { |
1594 | s32 ret_val = 0; | 1575 | s32 ret_val = 0; |
1595 | 1576 | ||
1596 | /* | 1577 | /* If there's an alternate MAC address place it in RAR0 |
1597 | * If there's an alternate MAC address place it in RAR0 | ||
1598 | * so that it will override the Si installed default perm | 1578 | * so that it will override the Si installed default perm |
1599 | * address. | 1579 | * address. |
1600 | */ | 1580 | */ |
@@ -1778,8 +1758,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) | |||
1778 | if (gcr & E1000_GCR_CMPL_TMOUT_MASK) | 1758 | if (gcr & E1000_GCR_CMPL_TMOUT_MASK) |
1779 | goto out; | 1759 | goto out; |
1780 | 1760 | ||
1781 | /* | 1761 | /* if capabilities version is type 1 we can write the |
1782 | * if capababilities version is type 1 we can write the | ||
1783 | * timeout of 10ms to 200ms through the GCR register | 1762 | * timeout of 10ms to 200ms through the GCR register |
1784 | */ | 1763 | */ |
1785 | if (!(gcr & E1000_GCR_CAP_VER2)) { | 1764 | if (!(gcr & E1000_GCR_CAP_VER2)) { |
@@ -1787,8 +1766,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) | |||
1787 | goto out; | 1766 | goto out; |
1788 | } | 1767 | } |
1789 | 1768 | ||
1790 | /* | 1769 | /* for version 2 capabilities we need to write the config space |
1791 | * for version 2 capabilities we need to write the config space | ||
1792 | * directly in order to set the completion timeout value for | 1770 | * directly in order to set the completion timeout value for |
1793 | * 16ms to 55ms | 1771 | * 16ms to 55ms |
1794 | */ | 1772 | */ |
@@ -1880,7 +1858,6 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) | |||
1880 | break; | 1858 | break; |
1881 | } | 1859 | } |
1882 | 1860 | ||
1883 | |||
1884 | } | 1861 | } |
1885 | 1862 | ||
1886 | /** | 1863 | /** |
@@ -1915,7 +1892,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) | |||
1915 | { | 1892 | { |
1916 | s32 ret_val; | 1893 | s32 ret_val; |
1917 | 1894 | ||
1918 | |||
1919 | ret_val = hw->phy.ops.acquire(hw); | 1895 | ret_val = hw->phy.ops.acquire(hw); |
1920 | if (ret_val) | 1896 | if (ret_val) |
1921 | goto out; | 1897 | goto out; |
@@ -2017,8 +1993,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) | |||
2017 | /* Get current control state. */ | 1993 | /* Get current control state. */ |
2018 | ctrl = rd32(E1000_CTRL); | 1994 | ctrl = rd32(E1000_CTRL); |
2019 | 1995 | ||
2020 | /* | 1996 | /* Prevent the PCI-E bus from sticking if there is no TLP connection |
2021 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
2022 | * on the last TLP read/write transaction when MAC is reset. | 1997 | * on the last TLP read/write transaction when MAC is reset. |
2023 | */ | 1998 | */ |
2024 | ret_val = igb_disable_pcie_master(hw); | 1999 | ret_val = igb_disable_pcie_master(hw); |
@@ -2053,8 +2028,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw) | |||
2053 | 2028 | ||
2054 | ret_val = igb_get_auto_rd_done(hw); | 2029 | ret_val = igb_get_auto_rd_done(hw); |
2055 | if (ret_val) { | 2030 | if (ret_val) { |
2056 | /* | 2031 | /* When auto config read does not complete, do not |
2057 | * When auto config read does not complete, do not | ||
2058 | * return with an error. This can happen in situations | 2032 | * return with an error. This can happen in situations |
2059 | * where there is no eeprom and prevents getting link. | 2033 | * where there is no eeprom and prevents getting link. |
2060 | */ | 2034 | */ |
@@ -2198,7 +2172,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) | |||
2198 | 2172 | ||
2199 | if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { | 2173 | if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { |
2200 | /* if checksums compatibility bit is set validate checksums | 2174 | /* if checksums compatibility bit is set validate checksums |
2201 | * for all 4 ports. */ | 2175 | * for all 4 ports. |
2176 | */ | ||
2202 | eeprom_regions_count = 4; | 2177 | eeprom_regions_count = 4; |
2203 | } | 2178 | } |
2204 | 2179 | ||
@@ -2339,7 +2314,6 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) | |||
2339 | if (eee_su & E1000_EEE_SU_LPI_CLK_STP) | 2314 | if (eee_su & E1000_EEE_SU_LPI_CLK_STP) |
2340 | hw_dbg("LPI Clock Stop Bit should not be set!\n"); | 2315 | hw_dbg("LPI Clock Stop Bit should not be set!\n"); |
2341 | 2316 | ||
2342 | |||
2343 | } else { | 2317 | } else { |
2344 | ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | | 2318 | ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | |
2345 | E1000_IPCNFG_EEE_100M_AN); | 2319 | E1000_IPCNFG_EEE_100M_AN); |
@@ -2369,11 +2343,12 @@ static const u8 e1000_emc_therm_limit[4] = { | |||
2369 | E1000_EMC_DIODE3_THERM_LIMIT | 2343 | E1000_EMC_DIODE3_THERM_LIMIT |
2370 | }; | 2344 | }; |
2371 | 2345 | ||
2372 | /* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data | 2346 | /** |
2347 | * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data | ||
2373 | * @hw: pointer to hardware structure | 2348 | * @hw: pointer to hardware structure |
2374 | * | 2349 | * |
2375 | * Updates the temperatures in mac.thermal_sensor_data | 2350 | * Updates the temperatures in mac.thermal_sensor_data |
2376 | */ | 2351 | **/ |
2377 | s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) | 2352 | s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) |
2378 | { | 2353 | { |
2379 | s32 status = E1000_SUCCESS; | 2354 | s32 status = E1000_SUCCESS; |
@@ -2421,12 +2396,13 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) | |||
2421 | return status; | 2396 | return status; |
2422 | } | 2397 | } |
2423 | 2398 | ||
2424 | /* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds | 2399 | /** |
2400 | * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds | ||
2425 | * @hw: pointer to hardware structure | 2401 | * @hw: pointer to hardware structure |
2426 | * | 2402 | * |
2427 | * Sets the thermal sensor thresholds according to the NVM map | 2403 | * Sets the thermal sensor thresholds according to the NVM map |
2428 | * and save off the threshold and location values into mac.thermal_sensor_data | 2404 | * and save off the threshold and location values into mac.thermal_sensor_data |
2429 | */ | 2405 | **/ |
2430 | s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) | 2406 | s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) |
2431 | { | 2407 | { |
2432 | s32 status = E1000_SUCCESS; | 2408 | s32 status = E1000_SUCCESS; |
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 7e13337d3b9d..66a1df974284 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h | |||
@@ -138,8 +138,7 @@ | |||
138 | #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ | 138 | #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ |
139 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ | 139 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ |
140 | 140 | ||
141 | /* | 141 | /* Use byte values for the following shift parameters |
142 | * Use byte values for the following shift parameters | ||
143 | * Usage: | 142 | * Usage: |
144 | * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & | 143 | * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & |
145 | * E1000_PSRCTL_BSIZE0_MASK) | | 144 | * E1000_PSRCTL_BSIZE0_MASK) | |
@@ -382,8 +381,7 @@ | |||
382 | #define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ | 381 | #define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ |
383 | /* TCP Timer */ | 382 | /* TCP Timer */ |
384 | 383 | ||
385 | /* | 384 | /* This defines the bits that are set in the Interrupt Mask |
386 | * This defines the bits that are set in the Interrupt Mask | ||
387 | * Set/Read Register. Each bit is documented below: | 385 | * Set/Read Register. Each bit is documented below: |
388 | * o RXT0 = Receiver Timer Interrupt (ring 0) | 386 | * o RXT0 = Receiver Timer Interrupt (ring 0) |
389 | * o TXDW = Transmit Descriptor Written Back | 387 | * o TXDW = Transmit Descriptor Written Back |
@@ -440,8 +438,7 @@ | |||
440 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | 438 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ |
441 | 439 | ||
442 | /* Receive Address */ | 440 | /* Receive Address */ |
443 | /* | 441 | /* Number of high/low register pairs in the RAR. The RAR (Receive Address |
444 | * Number of high/low register pairs in the RAR. The RAR (Receive Address | ||
445 | * Registers) holds the directed and multicast addresses that we monitor. | 442 | * Registers) holds the directed and multicast addresses that we monitor. |
446 | * Technically, we have 16 spots. However, we reserve one of these spots | 443 | * Technically, we have 16 spots. However, we reserve one of these spots |
447 | * (RAR[15]) for our directed address used by controllers with | 444 | * (RAR[15]) for our directed address used by controllers with |
@@ -760,8 +757,7 @@ | |||
760 | #define MAX_PHY_MULTI_PAGE_REG 0xF | 757 | #define MAX_PHY_MULTI_PAGE_REG 0xF |
761 | 758 | ||
762 | /* Bit definitions for valid PHY IDs. */ | 759 | /* Bit definitions for valid PHY IDs. */ |
763 | /* | 760 | /* I = Integrated |
764 | * I = Integrated | ||
765 | * E = External | 761 | * E = External |
766 | */ | 762 | */ |
767 | #define M88E1111_I_PHY_ID 0x01410CC0 | 763 | #define M88E1111_I_PHY_ID 0x01410CC0 |
@@ -791,8 +787,7 @@ | |||
791 | #define M88E1000_PSCR_AUTO_X_1000T 0x0040 | 787 | #define M88E1000_PSCR_AUTO_X_1000T 0x0040 |
792 | /* Auto crossover enabled all speeds */ | 788 | /* Auto crossover enabled all speeds */ |
793 | #define M88E1000_PSCR_AUTO_X_MODE 0x0060 | 789 | #define M88E1000_PSCR_AUTO_X_MODE 0x0060 |
794 | /* | 790 | /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold |
795 | * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold | ||
796 | * 0=Normal 10BASE-T Rx Threshold | 791 | * 0=Normal 10BASE-T Rx Threshold |
797 | */ | 792 | */ |
798 | /* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ | 793 | /* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ |
@@ -802,8 +797,7 @@ | |||
802 | #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ | 797 | #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ |
803 | #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ | 798 | #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ |
804 | #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ | 799 | #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ |
805 | /* | 800 | /* 0 = <50M |
806 | * 0 = <50M | ||
807 | * 1 = 50-80M | 801 | * 1 = 50-80M |
808 | * 2 = 80-110M | 802 | * 2 = 80-110M |
809 | * 3 = 110-140M | 803 | * 3 = 110-140M |
@@ -816,20 +810,17 @@ | |||
816 | #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 | 810 | #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 |
817 | 811 | ||
818 | /* M88E1000 Extended PHY Specific Control Register */ | 812 | /* M88E1000 Extended PHY Specific Control Register */ |
819 | /* | 813 | /* 1 = Lost lock detect enabled. |
820 | * 1 = Lost lock detect enabled. | ||
821 | * Will assert lost lock and bring | 814 | * Will assert lost lock and bring |
822 | * link down if idle not seen | 815 | * link down if idle not seen |
823 | * within 1ms in 1000BASE-T | 816 | * within 1ms in 1000BASE-T |
824 | */ | 817 | */ |
825 | /* | 818 | /* Number of times we will attempt to autonegotiate before downshifting if we |
826 | * Number of times we will attempt to autonegotiate before downshifting if we | ||
827 | * are the master | 819 | * are the master |
828 | */ | 820 | */ |
829 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 | 821 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 |
830 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 | 822 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 |
831 | /* | 823 | /* Number of times we will attempt to autonegotiate before downshifting if we |
832 | * Number of times we will attempt to autonegotiate before downshifting if we | ||
833 | * are the slave | 824 | * are the slave |
834 | */ | 825 | */ |
835 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 | 826 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 |
@@ -844,8 +835,7 @@ | |||
844 | 835 | ||
845 | /* i347-AT4 Extended PHY Specific Control Register */ | 836 | /* i347-AT4 Extended PHY Specific Control Register */ |
846 | 837 | ||
847 | /* | 838 | /* Number of times we will attempt to autonegotiate before downshifting if we |
848 | * Number of times we will attempt to autonegotiate before downshifting if we | ||
849 | * are the master | 839 | * are the master |
850 | */ | 840 | */ |
851 | #define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 | 841 | #define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 |
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index f8cd124dcf1d..84df815ea0e8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h | |||
@@ -38,31 +38,31 @@ | |||
38 | 38 | ||
39 | struct e1000_hw; | 39 | struct e1000_hw; |
40 | 40 | ||
41 | #define E1000_DEV_ID_82576 0x10C9 | 41 | #define E1000_DEV_ID_82576 0x10C9 |
42 | #define E1000_DEV_ID_82576_FIBER 0x10E6 | 42 | #define E1000_DEV_ID_82576_FIBER 0x10E6 |
43 | #define E1000_DEV_ID_82576_SERDES 0x10E7 | 43 | #define E1000_DEV_ID_82576_SERDES 0x10E7 |
44 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 | 44 | #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 |
45 | #define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 | 45 | #define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 |
46 | #define E1000_DEV_ID_82576_NS 0x150A | 46 | #define E1000_DEV_ID_82576_NS 0x150A |
47 | #define E1000_DEV_ID_82576_NS_SERDES 0x1518 | 47 | #define E1000_DEV_ID_82576_NS_SERDES 0x1518 |
48 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D | 48 | #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D |
49 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7 | 49 | #define E1000_DEV_ID_82575EB_COPPER 0x10A7 |
50 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 | 50 | #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 |
51 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 | 51 | #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 |
52 | #define E1000_DEV_ID_82580_COPPER 0x150E | 52 | #define E1000_DEV_ID_82580_COPPER 0x150E |
53 | #define E1000_DEV_ID_82580_FIBER 0x150F | 53 | #define E1000_DEV_ID_82580_FIBER 0x150F |
54 | #define E1000_DEV_ID_82580_SERDES 0x1510 | 54 | #define E1000_DEV_ID_82580_SERDES 0x1510 |
55 | #define E1000_DEV_ID_82580_SGMII 0x1511 | 55 | #define E1000_DEV_ID_82580_SGMII 0x1511 |
56 | #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 | 56 | #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 |
57 | #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 | 57 | #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 |
58 | #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 | 58 | #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 |
59 | #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A | 59 | #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A |
60 | #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C | 60 | #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C |
61 | #define E1000_DEV_ID_DH89XXCC_SFP 0x0440 | 61 | #define E1000_DEV_ID_DH89XXCC_SFP 0x0440 |
62 | #define E1000_DEV_ID_I350_COPPER 0x1521 | 62 | #define E1000_DEV_ID_I350_COPPER 0x1521 |
63 | #define E1000_DEV_ID_I350_FIBER 0x1522 | 63 | #define E1000_DEV_ID_I350_FIBER 0x1522 |
64 | #define E1000_DEV_ID_I350_SERDES 0x1523 | 64 | #define E1000_DEV_ID_I350_SERDES 0x1523 |
65 | #define E1000_DEV_ID_I350_SGMII 0x1524 | 65 | #define E1000_DEV_ID_I350_SGMII 0x1524 |
66 | #define E1000_DEV_ID_I210_COPPER 0x1533 | 66 | #define E1000_DEV_ID_I210_COPPER 0x1533 |
67 | #define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 | 67 | #define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 |
68 | #define E1000_DEV_ID_I210_COPPER_IT 0x1535 | 68 | #define E1000_DEV_ID_I210_COPPER_IT 0x1535 |
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 7caa62b7d45a..7df442a3cdfd 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c | |||
@@ -103,7 +103,7 @@ void igb_release_nvm_i210(struct e1000_hw *hw) | |||
103 | * @hw: pointer to the HW structure | 103 | * @hw: pointer to the HW structure |
104 | * | 104 | * |
105 | * Release hardware semaphore used to access the PHY or NVM | 105 | * Release hardware semaphore used to access the PHY or NVM |
106 | */ | 106 | **/ |
107 | static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) | 107 | static void igb_put_hw_semaphore_i210(struct e1000_hw *hw) |
108 | { | 108 | { |
109 | u32 swsm; | 109 | u32 swsm; |
@@ -141,9 +141,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) | |||
141 | if (!(swfw_sync & fwmask)) | 141 | if (!(swfw_sync & fwmask)) |
142 | break; | 142 | break; |
143 | 143 | ||
144 | /* | 144 | /* Firmware currently using resource (fwmask) */ |
145 | * Firmware currently using resource (fwmask) | ||
146 | */ | ||
147 | igb_put_hw_semaphore_i210(hw); | 145 | igb_put_hw_semaphore_i210(hw); |
148 | mdelay(5); | 146 | mdelay(5); |
149 | i++; | 147 | i++; |
@@ -203,7 +201,8 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, | |||
203 | 201 | ||
204 | /* We cannot hold synchronization semaphores for too long, | 202 | /* We cannot hold synchronization semaphores for too long, |
205 | * because of forceful takeover procedure. However it is more efficient | 203 | * because of forceful takeover procedure. However it is more efficient |
206 | * to read in bursts than synchronizing access for each word. */ | 204 | * to read in bursts than synchronizing access for each word. |
205 | */ | ||
207 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { | 206 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { |
208 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? | 207 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? |
209 | E1000_EERD_EEWR_MAX_COUNT : (words - i); | 208 | E1000_EERD_EEWR_MAX_COUNT : (words - i); |
@@ -242,8 +241,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, | |||
242 | u32 attempts = 100000; | 241 | u32 attempts = 100000; |
243 | s32 ret_val = E1000_SUCCESS; | 242 | s32 ret_val = E1000_SUCCESS; |
244 | 243 | ||
245 | /* | 244 | /* A check for invalid values: offset too large, too many words, |
246 | * A check for invalid values: offset too large, too many words, | ||
247 | * too many words for the offset, and not enough words. | 245 | * too many words for the offset, and not enough words. |
248 | */ | 246 | */ |
249 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 247 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
@@ -294,7 +292,7 @@ out: | |||
294 | * | 292 | * |
295 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer | 293 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer |
296 | * partially written. | 294 | * partially written. |
297 | */ | 295 | **/ |
298 | s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, | 296 | s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, |
299 | u16 *data) | 297 | u16 *data) |
300 | { | 298 | { |
@@ -549,8 +547,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) | |||
549 | 547 | ||
550 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | 548 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { |
551 | 549 | ||
552 | /* | 550 | /* Replace the read function with semaphore grabbing with |
553 | * Replace the read function with semaphore grabbing with | ||
554 | * the one that skips this for a while. | 551 | * the one that skips this for a while. |
555 | * We have semaphore taken already here. | 552 | * We have semaphore taken already here. |
556 | */ | 553 | */ |
@@ -570,7 +567,6 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) | |||
570 | return status; | 567 | return status; |
571 | } | 568 | } |
572 | 569 | ||
573 | |||
574 | /** | 570 | /** |
575 | * igb_update_nvm_checksum_i210 - Update EEPROM checksum | 571 | * igb_update_nvm_checksum_i210 - Update EEPROM checksum |
576 | * @hw: pointer to the HW structure | 572 | * @hw: pointer to the HW structure |
@@ -585,8 +581,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) | |||
585 | u16 checksum = 0; | 581 | u16 checksum = 0; |
586 | u16 i, nvm_data; | 582 | u16 i, nvm_data; |
587 | 583 | ||
588 | /* | 584 | /* Read the first word from the EEPROM. If this times out or fails, do |
589 | * Read the first word from the EEPROM. If this times out or fails, do | ||
590 | * not continue or we could be in for a very long wait while every | 585 | * not continue or we could be in for a very long wait while every |
591 | * EEPROM read fails | 586 | * EEPROM read fails |
592 | */ | 587 | */ |
@@ -597,8 +592,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) | |||
597 | } | 592 | } |
598 | 593 | ||
599 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { | 594 | if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { |
600 | /* | 595 | /* Do not use hw->nvm.ops.write, hw->nvm.ops.read |
601 | * Do not use hw->nvm.ops.write, hw->nvm.ops.read | ||
602 | * because we do not want to take the synchronization | 596 | * because we do not want to take the synchronization |
603 | * semaphores twice here. | 597 | * semaphores twice here. |
604 | */ | 598 | */ |
@@ -635,7 +629,7 @@ out: | |||
635 | * igb_pool_flash_update_done_i210 - Pool FLUDONE status. | 629 | * igb_pool_flash_update_done_i210 - Pool FLUDONE status. |
636 | * @hw: pointer to the HW structure | 630 | * @hw: pointer to the HW structure |
637 | * | 631 | * |
638 | */ | 632 | **/ |
639 | static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) | 633 | static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) |
640 | { | 634 | { |
641 | s32 ret_val = -E1000_ERR_NVM; | 635 | s32 ret_val = -E1000_ERR_NVM; |
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 5d407f46a21b..afbab053269b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c | |||
@@ -230,8 +230,8 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) | |||
230 | * Checks the nvm for an alternate MAC address. An alternate MAC address | 230 | * Checks the nvm for an alternate MAC address. An alternate MAC address |
231 | * can be setup by pre-boot software and must be treated like a permanent | 231 | * can be setup by pre-boot software and must be treated like a permanent |
232 | * address and must override the actual permanent MAC address. If an | 232 | * address and must override the actual permanent MAC address. If an |
233 | * alternate MAC address is fopund it is saved in the hw struct and | 233 | * alternate MAC address is found it is saved in the hw struct and |
234 | * prgrammed into RAR0 and the cuntion returns success, otherwise the | 234 | * programmed into RAR0 and the function returns success, otherwise the |
235 | * function returns an error. | 235 | * function returns an error. |
236 | **/ | 236 | **/ |
237 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw) | 237 | s32 igb_check_alt_mac_addr(struct e1000_hw *hw) |
@@ -241,8 +241,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) | |||
241 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; | 241 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; |
242 | u8 alt_mac_addr[ETH_ALEN]; | 242 | u8 alt_mac_addr[ETH_ALEN]; |
243 | 243 | ||
244 | /* | 244 | /* Alternate MAC address is handled by the option ROM for 82580 |
245 | * Alternate MAC address is handled by the option ROM for 82580 | ||
246 | * and newer. SW support not required. | 245 | * and newer. SW support not required. |
247 | */ | 246 | */ |
248 | if (hw->mac.type >= e1000_82580) | 247 | if (hw->mac.type >= e1000_82580) |
@@ -285,8 +284,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) | |||
285 | goto out; | 284 | goto out; |
286 | } | 285 | } |
287 | 286 | ||
288 | /* | 287 | /* We have a valid alternate MAC address, and we want to treat it the |
289 | * We have a valid alternate MAC address, and we want to treat it the | ||
290 | * same as the normal permanent MAC address stored by the HW into the | 288 | * same as the normal permanent MAC address stored by the HW into the |
291 | * RAR. Do this by mapping this address into RAR0. | 289 | * RAR. Do this by mapping this address into RAR0. |
292 | */ | 290 | */ |
@@ -309,8 +307,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
309 | { | 307 | { |
310 | u32 rar_low, rar_high; | 308 | u32 rar_low, rar_high; |
311 | 309 | ||
312 | /* | 310 | /* HW expects these in little endian so we reverse the byte order |
313 | * HW expects these in little endian so we reverse the byte order | ||
314 | * from network order (big endian) to little endian | 311 | * from network order (big endian) to little endian |
315 | */ | 312 | */ |
316 | rar_low = ((u32) addr[0] | | 313 | rar_low = ((u32) addr[0] | |
@@ -323,8 +320,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
323 | if (rar_low || rar_high) | 320 | if (rar_low || rar_high) |
324 | rar_high |= E1000_RAH_AV; | 321 | rar_high |= E1000_RAH_AV; |
325 | 322 | ||
326 | /* | 323 | /* Some bridges will combine consecutive 32-bit writes into |
327 | * Some bridges will combine consecutive 32-bit writes into | ||
328 | * a single burst write, which will malfunction on some parts. | 324 | * a single burst write, which will malfunction on some parts. |
329 | * The flushes avoid this. | 325 | * The flushes avoid this. |
330 | */ | 326 | */ |
@@ -348,8 +344,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value) | |||
348 | { | 344 | { |
349 | u32 hash_bit, hash_reg, mta; | 345 | u32 hash_bit, hash_reg, mta; |
350 | 346 | ||
351 | /* | 347 | /* The MTA is a register array of 32-bit registers. It is |
352 | * The MTA is a register array of 32-bit registers. It is | ||
353 | * treated like an array of (32*mta_reg_count) bits. We want to | 348 | * treated like an array of (32*mta_reg_count) bits. We want to |
354 | * set bit BitArray[hash_value]. So we figure out what register | 349 | * set bit BitArray[hash_value]. So we figure out what register |
355 | * the bit is in, read it, OR in the new bit, then write | 350 | * the bit is in, read it, OR in the new bit, then write |
@@ -386,15 +381,13 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | |||
386 | /* Register count multiplied by bits per register */ | 381 | /* Register count multiplied by bits per register */ |
387 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; | 382 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; |
388 | 383 | ||
389 | /* | 384 | /* For a mc_filter_type of 0, bit_shift is the number of left-shifts |
390 | * For a mc_filter_type of 0, bit_shift is the number of left-shifts | ||
391 | * where 0xFF would still fall within the hash mask. | 385 | * where 0xFF would still fall within the hash mask. |
392 | */ | 386 | */ |
393 | while (hash_mask >> bit_shift != 0xFF) | 387 | while (hash_mask >> bit_shift != 0xFF) |
394 | bit_shift++; | 388 | bit_shift++; |
395 | 389 | ||
396 | /* | 390 | /* The portion of the address that is used for the hash table |
397 | * The portion of the address that is used for the hash table | ||
398 | * is determined by the mc_filter_type setting. | 391 | * is determined by the mc_filter_type setting. |
399 | * The algorithm is such that there is a total of 8 bits of shifting. | 392 | * The algorithm is such that there is a total of 8 bits of shifting. |
400 | * The bit_shift for a mc_filter_type of 0 represents the number of | 393 | * The bit_shift for a mc_filter_type of 0 represents the number of |
@@ -536,8 +529,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) | |||
536 | s32 ret_val; | 529 | s32 ret_val; |
537 | bool link; | 530 | bool link; |
538 | 531 | ||
539 | /* | 532 | /* We only want to go out to the PHY registers to see if Auto-Neg |
540 | * We only want to go out to the PHY registers to see if Auto-Neg | ||
541 | * has completed and/or if our link status has changed. The | 533 | * has completed and/or if our link status has changed. The |
542 | * get_link_status flag is set upon receiving a Link Status | 534 | * get_link_status flag is set upon receiving a Link Status |
543 | * Change or Rx Sequence Error interrupt. | 535 | * Change or Rx Sequence Error interrupt. |
@@ -547,8 +539,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) | |||
547 | goto out; | 539 | goto out; |
548 | } | 540 | } |
549 | 541 | ||
550 | /* | 542 | /* First we want to see if the MII Status Register reports |
551 | * First we want to see if the MII Status Register reports | ||
552 | * link. If so, then we want to get the current speed/duplex | 543 | * link. If so, then we want to get the current speed/duplex |
553 | * of the PHY. | 544 | * of the PHY. |
554 | */ | 545 | */ |
@@ -561,14 +552,12 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) | |||
561 | 552 | ||
562 | mac->get_link_status = false; | 553 | mac->get_link_status = false; |
563 | 554 | ||
564 | /* | 555 | /* Check if there was DownShift, must be checked |
565 | * Check if there was DownShift, must be checked | ||
566 | * immediately after link-up | 556 | * immediately after link-up |
567 | */ | 557 | */ |
568 | igb_check_downshift(hw); | 558 | igb_check_downshift(hw); |
569 | 559 | ||
570 | /* | 560 | /* If we are forcing speed/duplex, then we simply return since |
571 | * If we are forcing speed/duplex, then we simply return since | ||
572 | * we have already determined whether we have link or not. | 561 | * we have already determined whether we have link or not. |
573 | */ | 562 | */ |
574 | if (!mac->autoneg) { | 563 | if (!mac->autoneg) { |
@@ -576,15 +565,13 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) | |||
576 | goto out; | 565 | goto out; |
577 | } | 566 | } |
578 | 567 | ||
579 | /* | 568 | /* Auto-Neg is enabled. Auto Speed Detection takes care |
580 | * Auto-Neg is enabled. Auto Speed Detection takes care | ||
581 | * of MAC speed/duplex configuration. So we only need to | 569 | * of MAC speed/duplex configuration. So we only need to |
582 | * configure Collision Distance in the MAC. | 570 | * configure Collision Distance in the MAC. |
583 | */ | 571 | */ |
584 | igb_config_collision_dist(hw); | 572 | igb_config_collision_dist(hw); |
585 | 573 | ||
586 | /* | 574 | /* Configure Flow Control now that Auto-Neg has completed. |
587 | * Configure Flow Control now that Auto-Neg has completed. | ||
588 | * First, we need to restore the desired flow control | 575 | * First, we need to restore the desired flow control |
589 | * settings because we may have had to re-autoneg with a | 576 | * settings because we may have had to re-autoneg with a |
590 | * different link partner. | 577 | * different link partner. |
@@ -611,15 +598,13 @@ s32 igb_setup_link(struct e1000_hw *hw) | |||
611 | { | 598 | { |
612 | s32 ret_val = 0; | 599 | s32 ret_val = 0; |
613 | 600 | ||
614 | /* | 601 | /* In the case of the phy reset being blocked, we already have a link. |
615 | * In the case of the phy reset being blocked, we already have a link. | ||
616 | * We do not need to set it up again. | 602 | * We do not need to set it up again. |
617 | */ | 603 | */ |
618 | if (igb_check_reset_block(hw)) | 604 | if (igb_check_reset_block(hw)) |
619 | goto out; | 605 | goto out; |
620 | 606 | ||
621 | /* | 607 | /* If requested flow control is set to default, set flow control |
622 | * If requested flow control is set to default, set flow control | ||
623 | * based on the EEPROM flow control settings. | 608 | * based on the EEPROM flow control settings. |
624 | */ | 609 | */ |
625 | if (hw->fc.requested_mode == e1000_fc_default) { | 610 | if (hw->fc.requested_mode == e1000_fc_default) { |
@@ -628,8 +613,7 @@ s32 igb_setup_link(struct e1000_hw *hw) | |||
628 | goto out; | 613 | goto out; |
629 | } | 614 | } |
630 | 615 | ||
631 | /* | 616 | /* We want to save off the original Flow Control configuration just |
632 | * We want to save off the original Flow Control configuration just | ||
633 | * in case we get disconnected and then reconnected into a different | 617 | * in case we get disconnected and then reconnected into a different |
634 | * hub or switch with different Flow Control capabilities. | 618 | * hub or switch with different Flow Control capabilities. |
635 | */ | 619 | */ |
@@ -642,8 +626,7 @@ s32 igb_setup_link(struct e1000_hw *hw) | |||
642 | if (ret_val) | 626 | if (ret_val) |
643 | goto out; | 627 | goto out; |
644 | 628 | ||
645 | /* | 629 | /* Initialize the flow control address, type, and PAUSE timer |
646 | * Initialize the flow control address, type, and PAUSE timer | ||
647 | * registers to their default values. This is done even if flow | 630 | * registers to their default values. This is done even if flow |
648 | * control is disabled, because it does not hurt anything to | 631 | * control is disabled, because it does not hurt anything to |
649 | * initialize these registers. | 632 | * initialize these registers. |
@@ -696,16 +679,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw) | |||
696 | s32 ret_val = 0; | 679 | s32 ret_val = 0; |
697 | u32 fcrtl = 0, fcrth = 0; | 680 | u32 fcrtl = 0, fcrth = 0; |
698 | 681 | ||
699 | /* | 682 | /* Set the flow control receive threshold registers. Normally, |
700 | * Set the flow control receive threshold registers. Normally, | ||
701 | * these registers will be set to a default threshold that may be | 683 | * these registers will be set to a default threshold that may be |
702 | * adjusted later by the driver's runtime code. However, if the | 684 | * adjusted later by the driver's runtime code. However, if the |
703 | * ability to transmit pause frames is not enabled, then these | 685 | * ability to transmit pause frames is not enabled, then these |
704 | * registers will be set to 0. | 686 | * registers will be set to 0. |
705 | */ | 687 | */ |
706 | if (hw->fc.current_mode & e1000_fc_tx_pause) { | 688 | if (hw->fc.current_mode & e1000_fc_tx_pause) { |
707 | /* | 689 | /* We need to set up the Receive Threshold high and low water |
708 | * We need to set up the Receive Threshold high and low water | ||
709 | * marks as well as (optionally) enabling the transmission of | 690 | * marks as well as (optionally) enabling the transmission of |
710 | * XON frames. | 691 | * XON frames. |
711 | */ | 692 | */ |
@@ -733,8 +714,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) | |||
733 | s32 ret_val = 0; | 714 | s32 ret_val = 0; |
734 | u16 nvm_data; | 715 | u16 nvm_data; |
735 | 716 | ||
736 | /* | 717 | /* Read and store word 0x0F of the EEPROM. This word contains bits |
737 | * Read and store word 0x0F of the EEPROM. This word contains bits | ||
738 | * that determine the hardware's default PAUSE (flow control) mode, | 718 | * that determine the hardware's default PAUSE (flow control) mode, |
739 | * a bit that determines whether the HW defaults to enabling or | 719 | * a bit that determines whether the HW defaults to enabling or |
740 | * disabling auto-negotiation, and the direction of the | 720 | * disabling auto-negotiation, and the direction of the |
@@ -778,8 +758,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) | |||
778 | 758 | ||
779 | ctrl = rd32(E1000_CTRL); | 759 | ctrl = rd32(E1000_CTRL); |
780 | 760 | ||
781 | /* | 761 | /* Because we didn't get link via the internal auto-negotiation |
782 | * Because we didn't get link via the internal auto-negotiation | ||
783 | * mechanism (we either forced link or we got link via PHY | 762 | * mechanism (we either forced link or we got link via PHY |
784 | * auto-neg), we have to manually enable/disable transmit an | 763 | * auto-neg), we have to manually enable/disable transmit an |
785 | * receive flow control. | 764 | * receive flow control. |
@@ -843,8 +822,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
843 | u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; | 822 | u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; |
844 | u16 speed, duplex; | 823 | u16 speed, duplex; |
845 | 824 | ||
846 | /* | 825 | /* Check for the case where we have fiber media and auto-neg failed |
847 | * Check for the case where we have fiber media and auto-neg failed | ||
848 | * so we had to force link. In this case, we need to force the | 826 | * so we had to force link. In this case, we need to force the |
849 | * configuration of the MAC to match the "fc" parameter. | 827 | * configuration of the MAC to match the "fc" parameter. |
850 | */ | 828 | */ |
@@ -861,15 +839,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
861 | goto out; | 839 | goto out; |
862 | } | 840 | } |
863 | 841 | ||
864 | /* | 842 | /* Check for the case where we have copper media and auto-neg is |
865 | * Check for the case where we have copper media and auto-neg is | ||
866 | * enabled. In this case, we need to check and see if Auto-Neg | 843 | * enabled. In this case, we need to check and see if Auto-Neg |
867 | * has completed, and if so, how the PHY and link partner has | 844 | * has completed, and if so, how the PHY and link partner has |
868 | * flow control configured. | 845 | * flow control configured. |
869 | */ | 846 | */ |
870 | if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { | 847 | if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { |
871 | /* | 848 | /* Read the MII Status Register and check to see if AutoNeg |
872 | * Read the MII Status Register and check to see if AutoNeg | ||
873 | * has completed. We read this twice because this reg has | 849 | * has completed. We read this twice because this reg has |
874 | * some "sticky" (latched) bits. | 850 | * some "sticky" (latched) bits. |
875 | */ | 851 | */ |
@@ -888,8 +864,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
888 | goto out; | 864 | goto out; |
889 | } | 865 | } |
890 | 866 | ||
891 | /* | 867 | /* The AutoNeg process has completed, so we now need to |
892 | * The AutoNeg process has completed, so we now need to | ||
893 | * read both the Auto Negotiation Advertisement | 868 | * read both the Auto Negotiation Advertisement |
894 | * Register (Address 4) and the Auto_Negotiation Base | 869 | * Register (Address 4) and the Auto_Negotiation Base |
895 | * Page Ability Register (Address 5) to determine how | 870 | * Page Ability Register (Address 5) to determine how |
@@ -904,8 +879,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
904 | if (ret_val) | 879 | if (ret_val) |
905 | goto out; | 880 | goto out; |
906 | 881 | ||
907 | /* | 882 | /* Two bits in the Auto Negotiation Advertisement Register |
908 | * Two bits in the Auto Negotiation Advertisement Register | ||
909 | * (Address 4) and two bits in the Auto Negotiation Base | 883 | * (Address 4) and two bits in the Auto Negotiation Base |
910 | * Page Ability Register (Address 5) determine flow control | 884 | * Page Ability Register (Address 5) determine flow control |
911 | * for both the PHY and the link partner. The following | 885 | * for both the PHY and the link partner. The following |
@@ -940,8 +914,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
940 | */ | 914 | */ |
941 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | 915 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
942 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { | 916 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { |
943 | /* | 917 | /* Now we need to check if the user selected RX ONLY |
944 | * Now we need to check if the user selected RX ONLY | ||
945 | * of pause frames. In this case, we had to advertise | 918 | * of pause frames. In this case, we had to advertise |
946 | * FULL flow control because we could not advertise RX | 919 | * FULL flow control because we could not advertise RX |
947 | * ONLY. Hence, we must now check to see if we need to | 920 | * ONLY. Hence, we must now check to see if we need to |
@@ -956,8 +929,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
956 | "RX PAUSE frames only.\r\n"); | 929 | "RX PAUSE frames only.\r\n"); |
957 | } | 930 | } |
958 | } | 931 | } |
959 | /* | 932 | /* For receiving PAUSE frames ONLY. |
960 | * For receiving PAUSE frames ONLY. | ||
961 | * | 933 | * |
962 | * LOCAL DEVICE | LINK PARTNER | 934 | * LOCAL DEVICE | LINK PARTNER |
963 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 935 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
@@ -971,8 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
971 | hw->fc.current_mode = e1000_fc_tx_pause; | 943 | hw->fc.current_mode = e1000_fc_tx_pause; |
972 | hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); | 944 | hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); |
973 | } | 945 | } |
974 | /* | 946 | /* For transmitting PAUSE frames ONLY. |
975 | * For transmitting PAUSE frames ONLY. | ||
976 | * | 947 | * |
977 | * LOCAL DEVICE | LINK PARTNER | 948 | * LOCAL DEVICE | LINK PARTNER |
978 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 949 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
@@ -986,8 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
986 | hw->fc.current_mode = e1000_fc_rx_pause; | 957 | hw->fc.current_mode = e1000_fc_rx_pause; |
987 | hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); | 958 | hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); |
988 | } | 959 | } |
989 | /* | 960 | /* Per the IEEE spec, at this point flow control should be |
990 | * Per the IEEE spec, at this point flow control should be | ||
991 | * disabled. However, we want to consider that we could | 961 | * disabled. However, we want to consider that we could |
992 | * be connected to a legacy switch that doesn't advertise | 962 | * be connected to a legacy switch that doesn't advertise |
993 | * desired flow control, but can be forced on the link | 963 | * desired flow control, but can be forced on the link |
@@ -1017,8 +987,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
1017 | hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); | 987 | hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); |
1018 | } | 988 | } |
1019 | 989 | ||
1020 | /* | 990 | /* Now we need to do one last check... If we auto- |
1021 | * Now we need to do one last check... If we auto- | ||
1022 | * negotiated to HALF DUPLEX, flow control should not be | 991 | * negotiated to HALF DUPLEX, flow control should not be |
1023 | * enabled per IEEE 802.3 spec. | 992 | * enabled per IEEE 802.3 spec. |
1024 | */ | 993 | */ |
@@ -1031,8 +1000,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) | |||
1031 | if (duplex == HALF_DUPLEX) | 1000 | if (duplex == HALF_DUPLEX) |
1032 | hw->fc.current_mode = e1000_fc_none; | 1001 | hw->fc.current_mode = e1000_fc_none; |
1033 | 1002 | ||
1034 | /* | 1003 | /* Now we call a subroutine to actually force the MAC |
1035 | * Now we call a subroutine to actually force the MAC | ||
1036 | * controller to use the correct flow control settings. | 1004 | * controller to use the correct flow control settings. |
1037 | */ | 1005 | */ |
1038 | ret_val = igb_force_mac_fc(hw); | 1006 | ret_val = igb_force_mac_fc(hw); |
@@ -1427,8 +1395,7 @@ s32 igb_blink_led(struct e1000_hw *hw) | |||
1427 | u32 ledctl_blink = 0; | 1395 | u32 ledctl_blink = 0; |
1428 | u32 i; | 1396 | u32 i; |
1429 | 1397 | ||
1430 | /* | 1398 | /* set the blink bit for each LED that's "on" (0x0E) |
1431 | * set the blink bit for each LED that's "on" (0x0E) | ||
1432 | * in ledctl_mode2 | 1399 | * in ledctl_mode2 |
1433 | */ | 1400 | */ |
1434 | ledctl_blink = hw->mac.ledctl_mode2; | 1401 | ledctl_blink = hw->mac.ledctl_mode2; |
@@ -1467,7 +1434,7 @@ s32 igb_led_off(struct e1000_hw *hw) | |||
1467 | * @hw: pointer to the HW structure | 1434 | * @hw: pointer to the HW structure |
1468 | * | 1435 | * |
1469 | * Returns 0 (0) if successful, else returns -10 | 1436 | * Returns 0 (0) if successful, else returns -10 |
1470 | * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued | 1437 | * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused |
1471 | * the master requests to be disabled. | 1438 | * the master requests to be disabled. |
1472 | * | 1439 | * |
1473 | * Disables PCI-Express master access and verifies there are no pending | 1440 | * Disables PCI-Express master access and verifies there are no pending |
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index e6d6ce433261..5e13e83cc608 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h | |||
@@ -35,8 +35,7 @@ | |||
35 | #include "e1000_defines.h" | 35 | #include "e1000_defines.h" |
36 | #include "e1000_i210.h" | 36 | #include "e1000_i210.h" |
37 | 37 | ||
38 | /* | 38 | /* Functions that should not be called directly from drivers but can be used |
39 | * Functions that should not be called directly from drivers but can be used | ||
40 | * by other files in this 'shared code' | 39 | * by other files in this 'shared code' |
41 | */ | 40 | */ |
42 | s32 igb_blink_led(struct e1000_hw *hw); | 41 | s32 igb_blink_led(struct e1000_hw *hw); |
@@ -49,15 +48,15 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw); | |||
49 | s32 igb_get_bus_info_pcie(struct e1000_hw *hw); | 48 | s32 igb_get_bus_info_pcie(struct e1000_hw *hw); |
50 | s32 igb_get_hw_semaphore(struct e1000_hw *hw); | 49 | s32 igb_get_hw_semaphore(struct e1000_hw *hw); |
51 | s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, | 50 | s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, |
52 | u16 *duplex); | 51 | u16 *duplex); |
53 | s32 igb_id_led_init(struct e1000_hw *hw); | 52 | s32 igb_id_led_init(struct e1000_hw *hw); |
54 | s32 igb_led_off(struct e1000_hw *hw); | 53 | s32 igb_led_off(struct e1000_hw *hw); |
55 | void igb_update_mc_addr_list(struct e1000_hw *hw, | 54 | void igb_update_mc_addr_list(struct e1000_hw *hw, |
56 | u8 *mc_addr_list, u32 mc_addr_count); | 55 | u8 *mc_addr_list, u32 mc_addr_count); |
57 | s32 igb_setup_link(struct e1000_hw *hw); | 56 | s32 igb_setup_link(struct e1000_hw *hw); |
58 | s32 igb_validate_mdi_setting(struct e1000_hw *hw); | 57 | s32 igb_validate_mdi_setting(struct e1000_hw *hw); |
59 | s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, | 58 | s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, |
60 | u32 offset, u8 data); | 59 | u32 offset, u8 data); |
61 | 60 | ||
62 | void igb_clear_hw_cntrs_base(struct e1000_hw *hw); | 61 | void igb_clear_hw_cntrs_base(struct e1000_hw *hw); |
63 | void igb_clear_vfta(struct e1000_hw *hw); | 62 | void igb_clear_vfta(struct e1000_hw *hw); |
@@ -80,12 +79,12 @@ enum e1000_mng_mode { | |||
80 | e1000_mng_mode_host_if_only | 79 | e1000_mng_mode_host_if_only |
81 | }; | 80 | }; |
82 | 81 | ||
83 | #define E1000_FACTPS_MNGCG 0x20000000 | 82 | #define E1000_FACTPS_MNGCG 0x20000000 |
84 | 83 | ||
85 | #define E1000_FWSM_MODE_MASK 0xE | 84 | #define E1000_FWSM_MODE_MASK 0xE |
86 | #define E1000_FWSM_MODE_SHIFT 1 | 85 | #define E1000_FWSM_MODE_SHIFT 1 |
87 | 86 | ||
88 | #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 | 87 | #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 |
89 | 88 | ||
90 | extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); | 89 | extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); |
91 | 90 | ||
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 38e0df350904..dac1447fabf7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c | |||
@@ -196,7 +196,8 @@ out: | |||
196 | * returns SUCCESS if it successfully received a message notification and | 196 | * returns SUCCESS if it successfully received a message notification and |
197 | * copied it into the receive buffer. | 197 | * copied it into the receive buffer. |
198 | **/ | 198 | **/ |
199 | static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) | 199 | static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, |
200 | u16 mbx_id) | ||
200 | { | 201 | { |
201 | struct e1000_mbx_info *mbx = &hw->mbx; | 202 | struct e1000_mbx_info *mbx = &hw->mbx; |
202 | s32 ret_val = -E1000_ERR_MBX; | 203 | s32 ret_val = -E1000_ERR_MBX; |
@@ -222,7 +223,8 @@ out: | |||
222 | * returns SUCCESS if it successfully copied message into the buffer and | 223 | * returns SUCCESS if it successfully copied message into the buffer and |
223 | * received an ack to that message within delay * timeout period | 224 | * received an ack to that message within delay * timeout period |
224 | **/ | 225 | **/ |
225 | static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) | 226 | static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, |
227 | u16 mbx_id) | ||
226 | { | 228 | { |
227 | struct e1000_mbx_info *mbx = &hw->mbx; | 229 | struct e1000_mbx_info *mbx = &hw->mbx; |
228 | s32 ret_val = -E1000_ERR_MBX; | 230 | s32 ret_val = -E1000_ERR_MBX; |
@@ -325,7 +327,6 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) | |||
325 | s32 ret_val = -E1000_ERR_MBX; | 327 | s32 ret_val = -E1000_ERR_MBX; |
326 | u32 p2v_mailbox; | 328 | u32 p2v_mailbox; |
327 | 329 | ||
328 | |||
329 | /* Take ownership of the buffer */ | 330 | /* Take ownership of the buffer */ |
330 | wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); | 331 | wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); |
331 | 332 | ||
@@ -347,7 +348,7 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) | |||
347 | * returns SUCCESS if it successfully copied message into the buffer | 348 | * returns SUCCESS if it successfully copied message into the buffer |
348 | **/ | 349 | **/ |
349 | static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, | 350 | static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, |
350 | u16 vf_number) | 351 | u16 vf_number) |
351 | { | 352 | { |
352 | s32 ret_val; | 353 | s32 ret_val; |
353 | u16 i; | 354 | u16 i; |
@@ -388,7 +389,7 @@ out_no_write: | |||
388 | * a message due to a VF request so no polling for message is needed. | 389 | * a message due to a VF request so no polling for message is needed. |
389 | **/ | 390 | **/ |
390 | static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, | 391 | static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, |
391 | u16 vf_number) | 392 | u16 vf_number) |
392 | { | 393 | { |
393 | s32 ret_val; | 394 | s32 ret_val; |
394 | u16 i; | 395 | u16 i; |
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index c13b56d9edb2..de9bba41acf3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h | |||
@@ -30,42 +30,42 @@ | |||
30 | 30 | ||
31 | #include "e1000_hw.h" | 31 | #include "e1000_hw.h" |
32 | 32 | ||
33 | #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ | 33 | #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ |
34 | #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ | 34 | #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ |
35 | #define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | 35 | #define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ |
36 | #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | 36 | #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ |
37 | #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ | 37 | #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ |
38 | 38 | ||
39 | #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ | 39 | #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ |
40 | #define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ | 40 | #define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ |
41 | #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ | 41 | #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ |
42 | #define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ | 42 | #define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ |
43 | 43 | ||
44 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ | 44 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ |
45 | 45 | ||
46 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the | 46 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the |
47 | * PF. The reverse is true if it is E1000_PF_*. | 47 | * PF. The reverse is true if it is E1000_PF_*. |
48 | * Message ACK's are the value or'd with 0xF0000000 | 48 | * Message ACK's are the value or'd with 0xF0000000 |
49 | */ | 49 | */ |
50 | #define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with | 50 | /* Messages below or'd with this are the ACK */ |
51 | * this are the ACK */ | 51 | #define E1000_VT_MSGTYPE_ACK 0x80000000 |
52 | #define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with | 52 | /* Messages below or'd with this are the NACK */ |
53 | * this are the NACK */ | 53 | #define E1000_VT_MSGTYPE_NACK 0x40000000 |
54 | #define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still | 54 | /* Indicates that VF is still clear to send requests */ |
55 | clear to send requests */ | 55 | #define E1000_VT_MSGTYPE_CTS 0x20000000 |
56 | #define E1000_VT_MSGINFO_SHIFT 16 | 56 | #define E1000_VT_MSGINFO_SHIFT 16 |
57 | /* bits 23:16 are used for exra info for certain messages */ | 57 | /* bits 23:16 are used for exra info for certain messages */ |
58 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) | 58 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) |
59 | 59 | ||
60 | #define E1000_VF_RESET 0x01 /* VF requests reset */ | 60 | #define E1000_VF_RESET 0x01 /* VF requests reset */ |
61 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ | 61 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ |
62 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ | 62 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ |
63 | #define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ | 63 | #define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ |
64 | #define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ | 64 | #define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ |
65 | #define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ | 65 | #define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ |
66 | #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) | 66 | #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) |
67 | 67 | ||
68 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ | 68 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ |
69 | 69 | ||
70 | s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); | 70 | s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); |
71 | s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); | 71 | s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); |
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 5b62adbe134d..5e0dd0a8d7a8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c | |||
@@ -289,15 +289,14 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) | |||
289 | udelay(1); | 289 | udelay(1); |
290 | timeout = NVM_MAX_RETRY_SPI; | 290 | timeout = NVM_MAX_RETRY_SPI; |
291 | 291 | ||
292 | /* | 292 | /* Read "Status Register" repeatedly until the LSB is cleared. |
293 | * Read "Status Register" repeatedly until the LSB is cleared. | ||
294 | * The EEPROM will signal that the command has been completed | 293 | * The EEPROM will signal that the command has been completed |
295 | * by clearing bit 0 of the internal status register. If it's | 294 | * by clearing bit 0 of the internal status register. If it's |
296 | * not cleared within 'timeout', then error out. | 295 | * not cleared within 'timeout', then error out. |
297 | */ | 296 | */ |
298 | while (timeout) { | 297 | while (timeout) { |
299 | igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, | 298 | igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, |
300 | hw->nvm.opcode_bits); | 299 | hw->nvm.opcode_bits); |
301 | spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); | 300 | spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); |
302 | if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) | 301 | if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) |
303 | break; | 302 | break; |
@@ -335,8 +334,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
335 | u16 word_in; | 334 | u16 word_in; |
336 | u8 read_opcode = NVM_READ_OPCODE_SPI; | 335 | u8 read_opcode = NVM_READ_OPCODE_SPI; |
337 | 336 | ||
338 | /* | 337 | /* A check for invalid values: offset too large, too many words, |
339 | * A check for invalid values: offset too large, too many words, | ||
340 | * and not enough words. | 338 | * and not enough words. |
341 | */ | 339 | */ |
342 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 340 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
@@ -363,8 +361,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
363 | igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); | 361 | igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); |
364 | igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); | 362 | igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); |
365 | 363 | ||
366 | /* | 364 | /* Read the data. SPI NVMs increment the address with each byte |
367 | * Read the data. SPI NVMs increment the address with each byte | ||
368 | * read and will roll over if reading beyond the end. This allows | 365 | * read and will roll over if reading beyond the end. This allows |
369 | * us to read the whole NVM from any offset | 366 | * us to read the whole NVM from any offset |
370 | */ | 367 | */ |
@@ -395,8 +392,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
395 | u32 i, eerd = 0; | 392 | u32 i, eerd = 0; |
396 | s32 ret_val = 0; | 393 | s32 ret_val = 0; |
397 | 394 | ||
398 | /* | 395 | /* A check for invalid values: offset too large, too many words, |
399 | * A check for invalid values: offset too large, too many words, | ||
400 | * and not enough words. | 396 | * and not enough words. |
401 | */ | 397 | */ |
402 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 398 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
@@ -408,7 +404,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
408 | 404 | ||
409 | for (i = 0; i < words; i++) { | 405 | for (i = 0; i < words; i++) { |
410 | eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + | 406 | eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + |
411 | E1000_NVM_RW_REG_START; | 407 | E1000_NVM_RW_REG_START; |
412 | 408 | ||
413 | wr32(E1000_EERD, eerd); | 409 | wr32(E1000_EERD, eerd); |
414 | ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); | 410 | ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); |
@@ -441,8 +437,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
441 | s32 ret_val = -E1000_ERR_NVM; | 437 | s32 ret_val = -E1000_ERR_NVM; |
442 | u16 widx = 0; | 438 | u16 widx = 0; |
443 | 439 | ||
444 | /* | 440 | /* A check for invalid values: offset too large, too many words, |
445 | * A check for invalid values: offset too large, too many words, | ||
446 | * and not enough words. | 441 | * and not enough words. |
447 | */ | 442 | */ |
448 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 443 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
@@ -472,8 +467,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
472 | 467 | ||
473 | igb_standby_nvm(hw); | 468 | igb_standby_nvm(hw); |
474 | 469 | ||
475 | /* | 470 | /* Some SPI eeproms use the 8th address bit embedded in the |
476 | * Some SPI eeproms use the 8th address bit embedded in the | ||
477 | * opcode | 471 | * opcode |
478 | */ | 472 | */ |
479 | if ((nvm->address_bits == 8) && (offset >= 128)) | 473 | if ((nvm->address_bits == 8) && (offset >= 128)) |
@@ -538,8 +532,7 @@ s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) | |||
538 | goto out; | 532 | goto out; |
539 | } | 533 | } |
540 | 534 | ||
541 | /* | 535 | /* if nvm_data is not ptr guard the PBA must be in legacy format which |
542 | * if nvm_data is not ptr guard the PBA must be in legacy format which | ||
543 | * means pointer is actually our second data word for the PBA number | 536 | * means pointer is actually our second data word for the PBA number |
544 | * and we can decode it into an ascii string | 537 | * and we can decode it into an ascii string |
545 | */ | 538 | */ |
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 2918c979b5bb..72a44095c506 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c | |||
@@ -33,29 +33,29 @@ | |||
33 | 33 | ||
34 | static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); | 34 | static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); |
35 | static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, | 35 | static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, |
36 | u16 *phy_ctrl); | 36 | u16 *phy_ctrl); |
37 | static s32 igb_wait_autoneg(struct e1000_hw *hw); | 37 | static s32 igb_wait_autoneg(struct e1000_hw *hw); |
38 | static s32 igb_set_master_slave_mode(struct e1000_hw *hw); | 38 | static s32 igb_set_master_slave_mode(struct e1000_hw *hw); |
39 | 39 | ||
40 | /* Cable length tables */ | 40 | /* Cable length tables */ |
41 | static const u16 e1000_m88_cable_length_table[] = | 41 | static const u16 e1000_m88_cable_length_table[] = { |
42 | { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; | 42 | 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; |
43 | #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ | 43 | #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ |
44 | (sizeof(e1000_m88_cable_length_table) / \ | 44 | (sizeof(e1000_m88_cable_length_table) / \ |
45 | sizeof(e1000_m88_cable_length_table[0])) | 45 | sizeof(e1000_m88_cable_length_table[0])) |
46 | 46 | ||
47 | static const u16 e1000_igp_2_cable_length_table[] = | 47 | static const u16 e1000_igp_2_cable_length_table[] = { |
48 | { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, | 48 | 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, |
49 | 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, | 49 | 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, |
50 | 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, | 50 | 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, |
51 | 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, | 51 | 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, |
52 | 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, | 52 | 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, |
53 | 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, | 53 | 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, |
54 | 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, | 54 | 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, |
55 | 104, 109, 114, 118, 121, 124}; | 55 | 104, 109, 114, 118, 121, 124}; |
56 | #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ | 56 | #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ |
57 | (sizeof(e1000_igp_2_cable_length_table) / \ | 57 | (sizeof(e1000_igp_2_cable_length_table) / \ |
58 | sizeof(e1000_igp_2_cable_length_table[0])) | 58 | sizeof(e1000_igp_2_cable_length_table[0])) |
59 | 59 | ||
60 | /** | 60 | /** |
61 | * igb_check_reset_block - Check if PHY reset is blocked | 61 | * igb_check_reset_block - Check if PHY reset is blocked |
@@ -71,8 +71,7 @@ s32 igb_check_reset_block(struct e1000_hw *hw) | |||
71 | 71 | ||
72 | manc = rd32(E1000_MANC); | 72 | manc = rd32(E1000_MANC); |
73 | 73 | ||
74 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? | 74 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; |
75 | E1000_BLK_PHY_RESET : 0; | ||
76 | } | 75 | } |
77 | 76 | ||
78 | /** | 77 | /** |
@@ -149,8 +148,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
149 | goto out; | 148 | goto out; |
150 | } | 149 | } |
151 | 150 | ||
152 | /* | 151 | /* Set up Op-code, Phy Address, and register offset in the MDI |
153 | * Set up Op-code, Phy Address, and register offset in the MDI | ||
154 | * Control register. The MAC will take care of interfacing with the | 152 | * Control register. The MAC will take care of interfacing with the |
155 | * PHY to retrieve the desired data. | 153 | * PHY to retrieve the desired data. |
156 | */ | 154 | */ |
@@ -160,8 +158,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
160 | 158 | ||
161 | wr32(E1000_MDIC, mdic); | 159 | wr32(E1000_MDIC, mdic); |
162 | 160 | ||
163 | /* | 161 | /* Poll the ready bit to see if the MDI read completed |
164 | * Poll the ready bit to see if the MDI read completed | ||
165 | * Increasing the time out as testing showed failures with | 162 | * Increasing the time out as testing showed failures with |
166 | * the lower time out | 163 | * the lower time out |
167 | */ | 164 | */ |
@@ -207,8 +204,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
207 | goto out; | 204 | goto out; |
208 | } | 205 | } |
209 | 206 | ||
210 | /* | 207 | /* Set up Op-code, Phy Address, and register offset in the MDI |
211 | * Set up Op-code, Phy Address, and register offset in the MDI | ||
212 | * Control register. The MAC will take care of interfacing with the | 208 | * Control register. The MAC will take care of interfacing with the |
213 | * PHY to retrieve the desired data. | 209 | * PHY to retrieve the desired data. |
214 | */ | 210 | */ |
@@ -219,8 +215,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
219 | 215 | ||
220 | wr32(E1000_MDIC, mdic); | 216 | wr32(E1000_MDIC, mdic); |
221 | 217 | ||
222 | /* | 218 | /* Poll the ready bit to see if the MDI read completed |
223 | * Poll the ready bit to see if the MDI read completed | ||
224 | * Increasing the time out as testing showed failures with | 219 | * Increasing the time out as testing showed failures with |
225 | * the lower time out | 220 | * the lower time out |
226 | */ | 221 | */ |
@@ -259,15 +254,13 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) | |||
259 | struct e1000_phy_info *phy = &hw->phy; | 254 | struct e1000_phy_info *phy = &hw->phy; |
260 | u32 i, i2ccmd = 0; | 255 | u32 i, i2ccmd = 0; |
261 | 256 | ||
262 | 257 | /* Set up Op-code, Phy Address, and register address in the I2CCMD | |
263 | /* | ||
264 | * Set up Op-code, Phy Address, and register address in the I2CCMD | ||
265 | * register. The MAC will take care of interfacing with the | 258 | * register. The MAC will take care of interfacing with the |
266 | * PHY to retrieve the desired data. | 259 | * PHY to retrieve the desired data. |
267 | */ | 260 | */ |
268 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | | 261 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | |
269 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | | 262 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | |
270 | (E1000_I2CCMD_OPCODE_READ)); | 263 | (E1000_I2CCMD_OPCODE_READ)); |
271 | 264 | ||
272 | wr32(E1000_I2CCMD, i2ccmd); | 265 | wr32(E1000_I2CCMD, i2ccmd); |
273 | 266 | ||
@@ -317,15 +310,14 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) | |||
317 | /* Swap the data bytes for the I2C interface */ | 310 | /* Swap the data bytes for the I2C interface */ |
318 | phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); | 311 | phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); |
319 | 312 | ||
320 | /* | 313 | /* Set up Op-code, Phy Address, and register address in the I2CCMD |
321 | * Set up Op-code, Phy Address, and register address in the I2CCMD | ||
322 | * register. The MAC will take care of interfacing with the | 314 | * register. The MAC will take care of interfacing with the |
323 | * PHY to retrieve the desired data. | 315 | * PHY to retrieve the desired data. |
324 | */ | 316 | */ |
325 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | | 317 | i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | |
326 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | | 318 | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | |
327 | E1000_I2CCMD_OPCODE_WRITE | | 319 | E1000_I2CCMD_OPCODE_WRITE | |
328 | phy_data_swapped); | 320 | phy_data_swapped); |
329 | 321 | ||
330 | wr32(E1000_I2CCMD, i2ccmd); | 322 | wr32(E1000_I2CCMD, i2ccmd); |
331 | 323 | ||
@@ -371,8 +363,8 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) | |||
371 | 363 | ||
372 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 364 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
373 | ret_val = igb_write_phy_reg_mdic(hw, | 365 | ret_val = igb_write_phy_reg_mdic(hw, |
374 | IGP01E1000_PHY_PAGE_SELECT, | 366 | IGP01E1000_PHY_PAGE_SELECT, |
375 | (u16)offset); | 367 | (u16)offset); |
376 | if (ret_val) { | 368 | if (ret_val) { |
377 | hw->phy.ops.release(hw); | 369 | hw->phy.ops.release(hw); |
378 | goto out; | 370 | goto out; |
@@ -410,8 +402,8 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) | |||
410 | 402 | ||
411 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 403 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
412 | ret_val = igb_write_phy_reg_mdic(hw, | 404 | ret_val = igb_write_phy_reg_mdic(hw, |
413 | IGP01E1000_PHY_PAGE_SELECT, | 405 | IGP01E1000_PHY_PAGE_SELECT, |
414 | (u16)offset); | 406 | (u16)offset); |
415 | if (ret_val) { | 407 | if (ret_val) { |
416 | hw->phy.ops.release(hw); | 408 | hw->phy.ops.release(hw); |
417 | goto out; | 409 | goto out; |
@@ -419,7 +411,7 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) | |||
419 | } | 411 | } |
420 | 412 | ||
421 | ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 413 | ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
422 | data); | 414 | data); |
423 | 415 | ||
424 | hw->phy.ops.release(hw); | 416 | hw->phy.ops.release(hw); |
425 | 417 | ||
@@ -439,7 +431,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw) | |||
439 | s32 ret_val; | 431 | s32 ret_val; |
440 | u16 phy_data; | 432 | u16 phy_data; |
441 | 433 | ||
442 | |||
443 | if (phy->reset_disable) { | 434 | if (phy->reset_disable) { |
444 | ret_val = 0; | 435 | ret_val = 0; |
445 | goto out; | 436 | goto out; |
@@ -472,8 +463,7 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw) | |||
472 | if (ret_val) | 463 | if (ret_val) |
473 | goto out; | 464 | goto out; |
474 | phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; | 465 | phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; |
475 | /* | 466 | /* Options: |
476 | * Options: | ||
477 | * 0 - Auto (default) | 467 | * 0 - Auto (default) |
478 | * 1 - MDI mode | 468 | * 1 - MDI mode |
479 | * 2 - MDI-X mode | 469 | * 2 - MDI-X mode |
@@ -520,8 +510,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) | |||
520 | 510 | ||
521 | phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | 511 | phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; |
522 | 512 | ||
523 | /* | 513 | /* Options: |
524 | * Options: | ||
525 | * MDI/MDI-X = 0 (default) | 514 | * MDI/MDI-X = 0 (default) |
526 | * 0 - Auto for all speeds | 515 | * 0 - Auto for all speeds |
527 | * 1 - MDI mode | 516 | * 1 - MDI mode |
@@ -546,8 +535,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) | |||
546 | break; | 535 | break; |
547 | } | 536 | } |
548 | 537 | ||
549 | /* | 538 | /* Options: |
550 | * Options: | ||
551 | * disable_polarity_correction = 0 (default) | 539 | * disable_polarity_correction = 0 (default) |
552 | * Automatic Correction for Reversed Cable Polarity | 540 | * Automatic Correction for Reversed Cable Polarity |
553 | * 0 - Disabled | 541 | * 0 - Disabled |
@@ -562,12 +550,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) | |||
562 | goto out; | 550 | goto out; |
563 | 551 | ||
564 | if (phy->revision < E1000_REVISION_4) { | 552 | if (phy->revision < E1000_REVISION_4) { |
565 | /* | 553 | /* Force TX_CLK in the Extended PHY Specific Control Register |
566 | * Force TX_CLK in the Extended PHY Specific Control Register | ||
567 | * to 25MHz clock. | 554 | * to 25MHz clock. |
568 | */ | 555 | */ |
569 | ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 556 | ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
570 | &phy_data); | 557 | &phy_data); |
571 | if (ret_val) | 558 | if (ret_val) |
572 | goto out; | 559 | goto out; |
573 | 560 | ||
@@ -630,8 +617,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) | |||
630 | if (ret_val) | 617 | if (ret_val) |
631 | goto out; | 618 | goto out; |
632 | 619 | ||
633 | /* | 620 | /* Options: |
634 | * Options: | ||
635 | * MDI/MDI-X = 0 (default) | 621 | * MDI/MDI-X = 0 (default) |
636 | * 0 - Auto for all speeds | 622 | * 0 - Auto for all speeds |
637 | * 1 - MDI mode | 623 | * 1 - MDI mode |
@@ -659,8 +645,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) | |||
659 | break; | 645 | break; |
660 | } | 646 | } |
661 | 647 | ||
662 | /* | 648 | /* Options: |
663 | * Options: | ||
664 | * disable_polarity_correction = 0 (default) | 649 | * disable_polarity_correction = 0 (default) |
665 | * Automatic Correction for Reversed Cable Polarity | 650 | * Automatic Correction for Reversed Cable Polarity |
666 | * 0 - Disabled | 651 | * 0 - Disabled |
@@ -714,14 +699,12 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) | |||
714 | goto out; | 699 | goto out; |
715 | } | 700 | } |
716 | 701 | ||
717 | /* | 702 | /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid |
718 | * Wait 100ms for MAC to configure PHY from NVM settings, to avoid | ||
719 | * timeout issues when LFS is enabled. | 703 | * timeout issues when LFS is enabled. |
720 | */ | 704 | */ |
721 | msleep(100); | 705 | msleep(100); |
722 | 706 | ||
723 | /* | 707 | /* The NVM settings will configure LPLU in D3 for |
724 | * The NVM settings will configure LPLU in D3 for | ||
725 | * non-IGP1 PHYs. | 708 | * non-IGP1 PHYs. |
726 | */ | 709 | */ |
727 | if (phy->type == e1000_phy_igp) { | 710 | if (phy->type == e1000_phy_igp) { |
@@ -765,8 +748,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) | |||
765 | 748 | ||
766 | /* set auto-master slave resolution settings */ | 749 | /* set auto-master slave resolution settings */ |
767 | if (hw->mac.autoneg) { | 750 | if (hw->mac.autoneg) { |
768 | /* | 751 | /* when autonegotiation advertisement is only 1000Mbps then we |
769 | * when autonegotiation advertisement is only 1000Mbps then we | ||
770 | * should disable SmartSpeed and enable Auto MasterSlave | 752 | * should disable SmartSpeed and enable Auto MasterSlave |
771 | * resolution as hardware default. | 753 | * resolution as hardware default. |
772 | */ | 754 | */ |
@@ -844,14 +826,12 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw) | |||
844 | s32 ret_val; | 826 | s32 ret_val; |
845 | u16 phy_ctrl; | 827 | u16 phy_ctrl; |
846 | 828 | ||
847 | /* | 829 | /* Perform some bounds checking on the autoneg advertisement |
848 | * Perform some bounds checking on the autoneg advertisement | ||
849 | * parameter. | 830 | * parameter. |
850 | */ | 831 | */ |
851 | phy->autoneg_advertised &= phy->autoneg_mask; | 832 | phy->autoneg_advertised &= phy->autoneg_mask; |
852 | 833 | ||
853 | /* | 834 | /* If autoneg_advertised is zero, we assume it was not defaulted |
854 | * If autoneg_advertised is zero, we assume it was not defaulted | ||
855 | * by the calling code so we set to advertise full capability. | 835 | * by the calling code so we set to advertise full capability. |
856 | */ | 836 | */ |
857 | if (phy->autoneg_advertised == 0) | 837 | if (phy->autoneg_advertised == 0) |
@@ -865,8 +845,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw) | |||
865 | } | 845 | } |
866 | hw_dbg("Restarting Auto-Neg\n"); | 846 | hw_dbg("Restarting Auto-Neg\n"); |
867 | 847 | ||
868 | /* | 848 | /* Restart auto-negotiation by setting the Auto Neg Enable bit and |
869 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | ||
870 | * the Auto Neg Restart bit in the PHY control register. | 849 | * the Auto Neg Restart bit in the PHY control register. |
871 | */ | 850 | */ |
872 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); | 851 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); |
@@ -878,8 +857,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw) | |||
878 | if (ret_val) | 857 | if (ret_val) |
879 | goto out; | 858 | goto out; |
880 | 859 | ||
881 | /* | 860 | /* Does the user want to wait for Auto-Neg to complete here, or |
882 | * Does the user want to wait for Auto-Neg to complete here, or | ||
883 | * check at a later time (for example, callback routine). | 861 | * check at a later time (for example, callback routine). |
884 | */ | 862 | */ |
885 | if (phy->autoneg_wait_to_complete) { | 863 | if (phy->autoneg_wait_to_complete) { |
@@ -928,16 +906,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) | |||
928 | goto out; | 906 | goto out; |
929 | } | 907 | } |
930 | 908 | ||
931 | /* | 909 | /* Need to parse both autoneg_advertised and fc and set up |
932 | * Need to parse both autoneg_advertised and fc and set up | ||
933 | * the appropriate PHY registers. First we will parse for | 910 | * the appropriate PHY registers. First we will parse for |
934 | * autoneg_advertised software override. Since we can advertise | 911 | * autoneg_advertised software override. Since we can advertise |
935 | * a plethora of combinations, we need to check each bit | 912 | * a plethora of combinations, we need to check each bit |
936 | * individually. | 913 | * individually. |
937 | */ | 914 | */ |
938 | 915 | ||
939 | /* | 916 | /* First we clear all the 10/100 mb speed bits in the Auto-Neg |
940 | * First we clear all the 10/100 mb speed bits in the Auto-Neg | ||
941 | * Advertisement Register (Address 4) and the 1000 mb speed bits in | 917 | * Advertisement Register (Address 4) and the 1000 mb speed bits in |
942 | * the 1000Base-T Control Register (Address 9). | 918 | * the 1000Base-T Control Register (Address 9). |
943 | */ | 919 | */ |
@@ -983,8 +959,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) | |||
983 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 959 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
984 | } | 960 | } |
985 | 961 | ||
986 | /* | 962 | /* Check for a software override of the flow control settings, and |
987 | * Check for a software override of the flow control settings, and | ||
988 | * setup the PHY advertisement registers accordingly. If | 963 | * setup the PHY advertisement registers accordingly. If |
989 | * auto-negotiation is enabled, then software will have to set the | 964 | * auto-negotiation is enabled, then software will have to set the |
990 | * "PAUSE" bits to the correct value in the Auto-Negotiation | 965 | * "PAUSE" bits to the correct value in the Auto-Negotiation |
@@ -1003,15 +978,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) | |||
1003 | */ | 978 | */ |
1004 | switch (hw->fc.current_mode) { | 979 | switch (hw->fc.current_mode) { |
1005 | case e1000_fc_none: | 980 | case e1000_fc_none: |
1006 | /* | 981 | /* Flow control (RX & TX) is completely disabled by a |
1007 | * Flow control (RX & TX) is completely disabled by a | ||
1008 | * software over-ride. | 982 | * software over-ride. |
1009 | */ | 983 | */ |
1010 | mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 984 | mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1011 | break; | 985 | break; |
1012 | case e1000_fc_rx_pause: | 986 | case e1000_fc_rx_pause: |
1013 | /* | 987 | /* RX Flow control is enabled, and TX Flow control is |
1014 | * RX Flow control is enabled, and TX Flow control is | ||
1015 | * disabled, by a software over-ride. | 988 | * disabled, by a software over-ride. |
1016 | * | 989 | * |
1017 | * Since there really isn't a way to advertise that we are | 990 | * Since there really isn't a way to advertise that we are |
@@ -1023,16 +996,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) | |||
1023 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 996 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1024 | break; | 997 | break; |
1025 | case e1000_fc_tx_pause: | 998 | case e1000_fc_tx_pause: |
1026 | /* | 999 | /* TX Flow control is enabled, and RX Flow control is |
1027 | * TX Flow control is enabled, and RX Flow control is | ||
1028 | * disabled, by a software over-ride. | 1000 | * disabled, by a software over-ride. |
1029 | */ | 1001 | */ |
1030 | mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; | 1002 | mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; |
1031 | mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; | 1003 | mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; |
1032 | break; | 1004 | break; |
1033 | case e1000_fc_full: | 1005 | case e1000_fc_full: |
1034 | /* | 1006 | /* Flow control (both RX and TX) is enabled by a software |
1035 | * Flow control (both RX and TX) is enabled by a software | ||
1036 | * over-ride. | 1007 | * over-ride. |
1037 | */ | 1008 | */ |
1038 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 1009 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
@@ -1075,18 +1046,15 @@ s32 igb_setup_copper_link(struct e1000_hw *hw) | |||
1075 | s32 ret_val; | 1046 | s32 ret_val; |
1076 | bool link; | 1047 | bool link; |
1077 | 1048 | ||
1078 | |||
1079 | if (hw->mac.autoneg) { | 1049 | if (hw->mac.autoneg) { |
1080 | /* | 1050 | /* Setup autoneg and flow control advertisement and perform |
1081 | * Setup autoneg and flow control advertisement and perform | ||
1082 | * autonegotiation. | 1051 | * autonegotiation. |
1083 | */ | 1052 | */ |
1084 | ret_val = igb_copper_link_autoneg(hw); | 1053 | ret_val = igb_copper_link_autoneg(hw); |
1085 | if (ret_val) | 1054 | if (ret_val) |
1086 | goto out; | 1055 | goto out; |
1087 | } else { | 1056 | } else { |
1088 | /* | 1057 | /* PHY will be set to 10H, 10F, 100H or 100F |
1089 | * PHY will be set to 10H, 10F, 100H or 100F | ||
1090 | * depending on user settings. | 1058 | * depending on user settings. |
1091 | */ | 1059 | */ |
1092 | hw_dbg("Forcing Speed and Duplex\n"); | 1060 | hw_dbg("Forcing Speed and Duplex\n"); |
@@ -1097,14 +1065,10 @@ s32 igb_setup_copper_link(struct e1000_hw *hw) | |||
1097 | } | 1065 | } |
1098 | } | 1066 | } |
1099 | 1067 | ||
1100 | /* | 1068 | /* Check link status. Wait up to 100 microseconds for link to become |
1101 | * Check link status. Wait up to 100 microseconds for link to become | ||
1102 | * valid. | 1069 | * valid. |
1103 | */ | 1070 | */ |
1104 | ret_val = igb_phy_has_link(hw, | 1071 | ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); |
1105 | COPPER_LINK_UP_LIMIT, | ||
1106 | 10, | ||
1107 | &link); | ||
1108 | if (ret_val) | 1072 | if (ret_val) |
1109 | goto out; | 1073 | goto out; |
1110 | 1074 | ||
@@ -1145,8 +1109,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1145 | if (ret_val) | 1109 | if (ret_val) |
1146 | goto out; | 1110 | goto out; |
1147 | 1111 | ||
1148 | /* | 1112 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI |
1149 | * Clear Auto-Crossover to force MDI manually. IGP requires MDI | ||
1150 | * forced whenever speed and duplex are forced. | 1113 | * forced whenever speed and duplex are forced. |
1151 | */ | 1114 | */ |
1152 | ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); | 1115 | ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); |
@@ -1167,10 +1130,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1167 | if (phy->autoneg_wait_to_complete) { | 1130 | if (phy->autoneg_wait_to_complete) { |
1168 | hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); | 1131 | hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); |
1169 | 1132 | ||
1170 | ret_val = igb_phy_has_link(hw, | 1133 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); |
1171 | PHY_FORCE_LIMIT, | ||
1172 | 100000, | ||
1173 | &link); | ||
1174 | if (ret_val) | 1134 | if (ret_val) |
1175 | goto out; | 1135 | goto out; |
1176 | 1136 | ||
@@ -1178,10 +1138,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
1178 | hw_dbg("Link taking longer than expected.\n"); | 1138 | hw_dbg("Link taking longer than expected.\n"); |
1179 | 1139 | ||
1180 | /* Try once more */ | 1140 | /* Try once more */ |
1181 | ret_val = igb_phy_has_link(hw, | 1141 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); |
1182 | PHY_FORCE_LIMIT, | ||
1183 | 100000, | ||
1184 | &link); | ||
1185 | if (ret_val) | 1142 | if (ret_val) |
1186 | goto out; | 1143 | goto out; |
1187 | } | 1144 | } |
@@ -1209,8 +1166,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1209 | 1166 | ||
1210 | /* I210 and I211 devices support Auto-Crossover in forced operation. */ | 1167 | /* I210 and I211 devices support Auto-Crossover in forced operation. */ |
1211 | if (phy->type != e1000_phy_i210) { | 1168 | if (phy->type != e1000_phy_i210) { |
1212 | /* | 1169 | /* Clear Auto-Crossover to force MDI manually. M88E1000 |
1213 | * Clear Auto-Crossover to force MDI manually. M88E1000 | ||
1214 | * requires MDI forced whenever speed and duplex are forced. | 1170 | * requires MDI forced whenever speed and duplex are forced. |
1215 | */ | 1171 | */ |
1216 | ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, | 1172 | ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, |
@@ -1266,13 +1222,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1266 | if (!reset_dsp) | 1222 | if (!reset_dsp) |
1267 | hw_dbg("Link taking longer than expected.\n"); | 1223 | hw_dbg("Link taking longer than expected.\n"); |
1268 | else { | 1224 | else { |
1269 | /* | 1225 | /* We didn't get link. |
1270 | * We didn't get link. | ||
1271 | * Reset the DSP and cross our fingers. | 1226 | * Reset the DSP and cross our fingers. |
1272 | */ | 1227 | */ |
1273 | ret_val = phy->ops.write_reg(hw, | 1228 | ret_val = phy->ops.write_reg(hw, |
1274 | M88E1000_PHY_PAGE_SELECT, | 1229 | M88E1000_PHY_PAGE_SELECT, |
1275 | 0x001d); | 1230 | 0x001d); |
1276 | if (ret_val) | 1231 | if (ret_val) |
1277 | goto out; | 1232 | goto out; |
1278 | ret_val = igb_phy_reset_dsp(hw); | 1233 | ret_val = igb_phy_reset_dsp(hw); |
@@ -1298,8 +1253,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1298 | if (ret_val) | 1253 | if (ret_val) |
1299 | goto out; | 1254 | goto out; |
1300 | 1255 | ||
1301 | /* | 1256 | /* Resetting the phy means we need to re-force TX_CLK in the |
1302 | * Resetting the phy means we need to re-force TX_CLK in the | ||
1303 | * Extended PHY Specific Control Register to 25MHz clock from | 1257 | * Extended PHY Specific Control Register to 25MHz clock from |
1304 | * the reset value of 2.5MHz. | 1258 | * the reset value of 2.5MHz. |
1305 | */ | 1259 | */ |
@@ -1308,8 +1262,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1308 | if (ret_val) | 1262 | if (ret_val) |
1309 | goto out; | 1263 | goto out; |
1310 | 1264 | ||
1311 | /* | 1265 | /* In addition, we must re-enable CRS on Tx for both half and full |
1312 | * In addition, we must re-enable CRS on Tx for both half and full | ||
1313 | * duplex. | 1266 | * duplex. |
1314 | */ | 1267 | */ |
1315 | ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 1268 | ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
@@ -1336,7 +1289,7 @@ out: | |||
1336 | * take affect. | 1289 | * take affect. |
1337 | **/ | 1290 | **/ |
1338 | static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, | 1291 | static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, |
1339 | u16 *phy_ctrl) | 1292 | u16 *phy_ctrl) |
1340 | { | 1293 | { |
1341 | struct e1000_mac_info *mac = &hw->mac; | 1294 | struct e1000_mac_info *mac = &hw->mac; |
1342 | u32 ctrl; | 1295 | u32 ctrl; |
@@ -1417,8 +1370,7 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
1417 | data); | 1370 | data); |
1418 | if (ret_val) | 1371 | if (ret_val) |
1419 | goto out; | 1372 | goto out; |
1420 | /* | 1373 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used |
1421 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
1422 | * during Dx states where the power conservation is most | 1374 | * during Dx states where the power conservation is most |
1423 | * important. During driver activity we should enable | 1375 | * important. During driver activity we should enable |
1424 | * SmartSpeed, so performance is maintained. | 1376 | * SmartSpeed, so performance is maintained. |
@@ -1461,13 +1413,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
1461 | 1413 | ||
1462 | /* When LPLU is enabled, we should disable SmartSpeed */ | 1414 | /* When LPLU is enabled, we should disable SmartSpeed */ |
1463 | ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, | 1415 | ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, |
1464 | &data); | 1416 | &data); |
1465 | if (ret_val) | 1417 | if (ret_val) |
1466 | goto out; | 1418 | goto out; |
1467 | 1419 | ||
1468 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 1420 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
1469 | ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, | 1421 | ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, |
1470 | data); | 1422 | data); |
1471 | } | 1423 | } |
1472 | 1424 | ||
1473 | out: | 1425 | out: |
@@ -1556,8 +1508,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw) | |||
1556 | s32 ret_val; | 1508 | s32 ret_val; |
1557 | u16 data, offset, mask; | 1509 | u16 data, offset, mask; |
1558 | 1510 | ||
1559 | /* | 1511 | /* Polarity is determined based on the speed of |
1560 | * Polarity is determined based on the speed of | ||
1561 | * our connection. | 1512 | * our connection. |
1562 | */ | 1513 | */ |
1563 | ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); | 1514 | ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); |
@@ -1569,8 +1520,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw) | |||
1569 | offset = IGP01E1000_PHY_PCS_INIT_REG; | 1520 | offset = IGP01E1000_PHY_PCS_INIT_REG; |
1570 | mask = IGP01E1000_PHY_POLARITY_MASK; | 1521 | mask = IGP01E1000_PHY_POLARITY_MASK; |
1571 | } else { | 1522 | } else { |
1572 | /* | 1523 | /* This really only applies to 10Mbps since |
1573 | * This really only applies to 10Mbps since | ||
1574 | * there is no polarity for 100Mbps (always 0). | 1524 | * there is no polarity for 100Mbps (always 0). |
1575 | */ | 1525 | */ |
1576 | offset = IGP01E1000_PHY_PORT_STATUS; | 1526 | offset = IGP01E1000_PHY_PORT_STATUS; |
@@ -1589,7 +1539,7 @@ out: | |||
1589 | } | 1539 | } |
1590 | 1540 | ||
1591 | /** | 1541 | /** |
1592 | * igb_wait_autoneg - Wait for auto-neg compeletion | 1542 | * igb_wait_autoneg - Wait for auto-neg completion |
1593 | * @hw: pointer to the HW structure | 1543 | * @hw: pointer to the HW structure |
1594 | * | 1544 | * |
1595 | * Waits for auto-negotiation to complete or for the auto-negotiation time | 1545 | * Waits for auto-negotiation to complete or for the auto-negotiation time |
@@ -1613,8 +1563,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw) | |||
1613 | msleep(100); | 1563 | msleep(100); |
1614 | } | 1564 | } |
1615 | 1565 | ||
1616 | /* | 1566 | /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation |
1617 | * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation | ||
1618 | * has completed. | 1567 | * has completed. |
1619 | */ | 1568 | */ |
1620 | return ret_val; | 1569 | return ret_val; |
@@ -1630,21 +1579,19 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw) | |||
1630 | * Polls the PHY status register for link, 'iterations' number of times. | 1579 | * Polls the PHY status register for link, 'iterations' number of times. |
1631 | **/ | 1580 | **/ |
1632 | s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, | 1581 | s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, |
1633 | u32 usec_interval, bool *success) | 1582 | u32 usec_interval, bool *success) |
1634 | { | 1583 | { |
1635 | s32 ret_val = 0; | 1584 | s32 ret_val = 0; |
1636 | u16 i, phy_status; | 1585 | u16 i, phy_status; |
1637 | 1586 | ||
1638 | for (i = 0; i < iterations; i++) { | 1587 | for (i = 0; i < iterations; i++) { |
1639 | /* | 1588 | /* Some PHYs require the PHY_STATUS register to be read |
1640 | * Some PHYs require the PHY_STATUS register to be read | ||
1641 | * twice due to the link bit being sticky. No harm doing | 1589 | * twice due to the link bit being sticky. No harm doing |
1642 | * it across the board. | 1590 | * it across the board. |
1643 | */ | 1591 | */ |
1644 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); | 1592 | ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); |
1645 | if (ret_val) { | 1593 | if (ret_val) { |
1646 | /* | 1594 | /* If the first read fails, another entity may have |
1647 | * If the first read fails, another entity may have | ||
1648 | * ownership of the resources, wait and try again to | 1595 | * ownership of the resources, wait and try again to |
1649 | * see if they have relinquished the resources yet. | 1596 | * see if they have relinquished the resources yet. |
1650 | */ | 1597 | */ |
@@ -1834,10 +1781,10 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) | |||
1834 | u16 cur_agc_index, max_agc_index = 0; | 1781 | u16 cur_agc_index, max_agc_index = 0; |
1835 | u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; | 1782 | u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; |
1836 | static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { | 1783 | static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { |
1837 | IGP02E1000_PHY_AGC_A, | 1784 | IGP02E1000_PHY_AGC_A, |
1838 | IGP02E1000_PHY_AGC_B, | 1785 | IGP02E1000_PHY_AGC_B, |
1839 | IGP02E1000_PHY_AGC_C, | 1786 | IGP02E1000_PHY_AGC_C, |
1840 | IGP02E1000_PHY_AGC_D | 1787 | IGP02E1000_PHY_AGC_D |
1841 | }; | 1788 | }; |
1842 | 1789 | ||
1843 | /* Read the AGC registers for all channels */ | 1790 | /* Read the AGC registers for all channels */ |
@@ -1846,8 +1793,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) | |||
1846 | if (ret_val) | 1793 | if (ret_val) |
1847 | goto out; | 1794 | goto out; |
1848 | 1795 | ||
1849 | /* | 1796 | /* Getting bits 15:9, which represent the combination of |
1850 | * Getting bits 15:9, which represent the combination of | ||
1851 | * coarse and fine gain values. The result is a number | 1797 | * coarse and fine gain values. The result is a number |
1852 | * that can be put into the lookup table to obtain the | 1798 | * that can be put into the lookup table to obtain the |
1853 | * approximate cable length. | 1799 | * approximate cable length. |
@@ -2167,15 +2113,13 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) | |||
2167 | hw->phy.ops.write_reg(hw, 0x1796, 0x0008); | 2113 | hw->phy.ops.write_reg(hw, 0x1796, 0x0008); |
2168 | /* Change cg_icount + enable integbp for channels BCD */ | 2114 | /* Change cg_icount + enable integbp for channels BCD */ |
2169 | hw->phy.ops.write_reg(hw, 0x1798, 0xD008); | 2115 | hw->phy.ops.write_reg(hw, 0x1798, 0xD008); |
2170 | /* | 2116 | /* Change cg_icount + enable integbp + change prop_factor_master |
2171 | * Change cg_icount + enable integbp + change prop_factor_master | ||
2172 | * to 8 for channel A | 2117 | * to 8 for channel A |
2173 | */ | 2118 | */ |
2174 | hw->phy.ops.write_reg(hw, 0x1898, 0xD918); | 2119 | hw->phy.ops.write_reg(hw, 0x1898, 0xD918); |
2175 | /* Disable AHT in Slave mode on channel A */ | 2120 | /* Disable AHT in Slave mode on channel A */ |
2176 | hw->phy.ops.write_reg(hw, 0x187A, 0x0800); | 2121 | hw->phy.ops.write_reg(hw, 0x187A, 0x0800); |
2177 | /* | 2122 | /* Enable LPLU and disable AN to 1000 in non-D0a states, |
2178 | * Enable LPLU and disable AN to 1000 in non-D0a states, | ||
2179 | * Enable SPD+B2B | 2123 | * Enable SPD+B2B |
2180 | */ | 2124 | */ |
2181 | hw->phy.ops.write_reg(hw, 0x0019, 0x008D); | 2125 | hw->phy.ops.write_reg(hw, 0x0019, 0x008D); |
@@ -2257,8 +2201,8 @@ static s32 igb_check_polarity_82580(struct e1000_hw *hw) | |||
2257 | 2201 | ||
2258 | if (!ret_val) | 2202 | if (!ret_val) |
2259 | phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) | 2203 | phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) |
2260 | ? e1000_rev_polarity_reversed | 2204 | ? e1000_rev_polarity_reversed |
2261 | : e1000_rev_polarity_normal; | 2205 | : e1000_rev_polarity_normal; |
2262 | 2206 | ||
2263 | return ret_val; | 2207 | return ret_val; |
2264 | } | 2208 | } |
@@ -2278,7 +2222,6 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) | |||
2278 | u16 phy_data; | 2222 | u16 phy_data; |
2279 | bool link; | 2223 | bool link; |
2280 | 2224 | ||
2281 | |||
2282 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); | 2225 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); |
2283 | if (ret_val) | 2226 | if (ret_val) |
2284 | goto out; | 2227 | goto out; |
@@ -2289,8 +2232,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) | |||
2289 | if (ret_val) | 2232 | if (ret_val) |
2290 | goto out; | 2233 | goto out; |
2291 | 2234 | ||
2292 | /* | 2235 | /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI |
2293 | * Clear Auto-Crossover to force MDI manually. 82580 requires MDI | ||
2294 | * forced whenever speed and duplex are forced. | 2236 | * forced whenever speed and duplex are forced. |
2295 | */ | 2237 | */ |
2296 | ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); | 2238 | ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); |
@@ -2310,10 +2252,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) | |||
2310 | if (phy->autoneg_wait_to_complete) { | 2252 | if (phy->autoneg_wait_to_complete) { |
2311 | hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); | 2253 | hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); |
2312 | 2254 | ||
2313 | ret_val = igb_phy_has_link(hw, | 2255 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); |
2314 | PHY_FORCE_LIMIT, | ||
2315 | 100000, | ||
2316 | &link); | ||
2317 | if (ret_val) | 2256 | if (ret_val) |
2318 | goto out; | 2257 | goto out; |
2319 | 2258 | ||
@@ -2321,10 +2260,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) | |||
2321 | hw_dbg("Link taking longer than expected.\n"); | 2260 | hw_dbg("Link taking longer than expected.\n"); |
2322 | 2261 | ||
2323 | /* Try once more */ | 2262 | /* Try once more */ |
2324 | ret_val = igb_phy_has_link(hw, | 2263 | ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); |
2325 | PHY_FORCE_LIMIT, | ||
2326 | 100000, | ||
2327 | &link); | ||
2328 | if (ret_val) | 2264 | if (ret_val) |
2329 | goto out; | 2265 | goto out; |
2330 | } | 2266 | } |
@@ -2349,7 +2285,6 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw) | |||
2349 | u16 data; | 2285 | u16 data; |
2350 | bool link; | 2286 | bool link; |
2351 | 2287 | ||
2352 | |||
2353 | ret_val = igb_phy_has_link(hw, 1, 0, &link); | 2288 | ret_val = igb_phy_has_link(hw, 1, 0, &link); |
2354 | if (ret_val) | 2289 | if (ret_val) |
2355 | goto out; | 2290 | goto out; |
@@ -2383,12 +2318,12 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw) | |||
2383 | goto out; | 2318 | goto out; |
2384 | 2319 | ||
2385 | phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) | 2320 | phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) |
2386 | ? e1000_1000t_rx_status_ok | 2321 | ? e1000_1000t_rx_status_ok |
2387 | : e1000_1000t_rx_status_not_ok; | 2322 | : e1000_1000t_rx_status_not_ok; |
2388 | 2323 | ||
2389 | phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) | 2324 | phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) |
2390 | ? e1000_1000t_rx_status_ok | 2325 | ? e1000_1000t_rx_status_ok |
2391 | : e1000_1000t_rx_status_not_ok; | 2326 | : e1000_1000t_rx_status_not_ok; |
2392 | } else { | 2327 | } else { |
2393 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; | 2328 | phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; |
2394 | phy->local_rx = e1000_1000t_rx_status_undefined; | 2329 | phy->local_rx = e1000_1000t_rx_status_undefined; |
@@ -2412,13 +2347,12 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw) | |||
2412 | s32 ret_val; | 2347 | s32 ret_val; |
2413 | u16 phy_data, length; | 2348 | u16 phy_data, length; |
2414 | 2349 | ||
2415 | |||
2416 | ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); | 2350 | ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); |
2417 | if (ret_val) | 2351 | if (ret_val) |
2418 | goto out; | 2352 | goto out; |
2419 | 2353 | ||
2420 | length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> | 2354 | length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> |
2421 | I82580_DSTATUS_CABLE_LENGTH_SHIFT; | 2355 | I82580_DSTATUS_CABLE_LENGTH_SHIFT; |
2422 | 2356 | ||
2423 | if (length == E1000_CABLE_LENGTH_UNDEFINED) | 2357 | if (length == E1000_CABLE_LENGTH_UNDEFINED) |
2424 | ret_val = -E1000_ERR_PHY; | 2358 | ret_val = -E1000_ERR_PHY; |
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 15343286082e..971b6389358f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h | |||
@@ -117,21 +117,21 @@ | |||
117 | #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) | 117 | #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) |
118 | 118 | ||
119 | /* DMA Coalescing registers */ | 119 | /* DMA Coalescing registers */ |
120 | #define E1000_DMACR 0x02508 /* Control Register */ | 120 | #define E1000_DMACR 0x02508 /* Control Register */ |
121 | #define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ | 121 | #define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ |
122 | #define E1000_DMCTLX 0x02514 /* Time to Lx Request */ | 122 | #define E1000_DMCTLX 0x02514 /* Time to Lx Request */ |
123 | #define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ | 123 | #define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ |
124 | #define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ | 124 | #define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ |
125 | #define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ | 125 | #define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ |
126 | #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ | 126 | #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ |
127 | 127 | ||
128 | /* TX Rate Limit Registers */ | 128 | /* TX Rate Limit Registers */ |
129 | #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ | 129 | #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ |
130 | #define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ | 130 | #define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ |
131 | #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ | 131 | #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ |
132 | 132 | ||
133 | /* Split and Replication RX Control - RW */ | 133 | /* Split and Replication RX Control - RW */ |
134 | #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ | 134 | #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ |
135 | 135 | ||
136 | /* Thermal sensor configuration and status registers */ | 136 | /* Thermal sensor configuration and status registers */ |
137 | #define E1000_THMJT 0x08100 /* Junction Temperature */ | 137 | #define E1000_THMJT 0x08100 /* Junction Temperature */ |
@@ -140,8 +140,7 @@ | |||
140 | #define E1000_THHIGHTC 0x0810C /* High Threshold Control */ | 140 | #define E1000_THHIGHTC 0x0810C /* High Threshold Control */ |
141 | #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ | 141 | #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ |
142 | 142 | ||
143 | /* | 143 | /* Convenience macros |
144 | * Convenience macros | ||
145 | * | 144 | * |
146 | * Note: "_n" is the queue number of the register to be written to. | 145 | * Note: "_n" is the queue number of the register to be written to. |
147 | * | 146 | * |
@@ -287,7 +286,7 @@ | |||
287 | #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ | 286 | #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ |
288 | #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ | 287 | #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ |
289 | #define E1000_RA 0x05400 /* Receive Address - RW Array */ | 288 | #define E1000_RA 0x05400 /* Receive Address - RW Array */ |
290 | #define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ | 289 | #define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ |
291 | #define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) | 290 | #define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) |
292 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ | 291 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ |
293 | (0x054E0 + ((_i - 16) * 8))) | 292 | (0x054E0 + ((_i - 16) * 8))) |
@@ -360,21 +359,21 @@ | |||
360 | (readl(hw->hw_addr + reg + ((offset) << 2))) | 359 | (readl(hw->hw_addr + reg + ((offset) << 2))) |
361 | 360 | ||
362 | /* DMA Coalescing registers */ | 361 | /* DMA Coalescing registers */ |
363 | #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ | 362 | #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ |
364 | 363 | ||
365 | /* Energy Efficient Ethernet "EEE" register */ | 364 | /* Energy Efficient Ethernet "EEE" register */ |
366 | #define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ | 365 | #define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ |
367 | #define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ | 366 | #define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ |
368 | #define E1000_EEE_SU 0X0E34 /* EEE Setup */ | 367 | #define E1000_EEE_SU 0X0E34 /* EEE Setup */ |
369 | 368 | ||
370 | /* Thermal Sensor Register */ | 369 | /* Thermal Sensor Register */ |
371 | #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ | 370 | #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ |
372 | 371 | ||
373 | /* OS2BMC Registers */ | 372 | /* OS2BMC Registers */ |
374 | #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ | 373 | #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ |
375 | #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ | 374 | #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ |
376 | #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ | 375 | #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ |
377 | #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ | 376 | #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ |
378 | 377 | ||
379 | #define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ | 378 | #define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ |
380 | #define E1000_I210_FLMNGCTL 0x12038 | 379 | #define E1000_I210_FLMNGCTL 0x12038 |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 7cb039827a0c..cef8ec114bd8 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -44,54 +44,54 @@ | |||
44 | 44 | ||
45 | struct igb_adapter; | 45 | struct igb_adapter; |
46 | 46 | ||
47 | #define E1000_PCS_CFG_IGN_SD 1 | 47 | #define E1000_PCS_CFG_IGN_SD 1 |
48 | 48 | ||
49 | /* Interrupt defines */ | 49 | /* Interrupt defines */ |
50 | #define IGB_START_ITR 648 /* ~6000 ints/sec */ | 50 | #define IGB_START_ITR 648 /* ~6000 ints/sec */ |
51 | #define IGB_4K_ITR 980 | 51 | #define IGB_4K_ITR 980 |
52 | #define IGB_20K_ITR 196 | 52 | #define IGB_20K_ITR 196 |
53 | #define IGB_70K_ITR 56 | 53 | #define IGB_70K_ITR 56 |
54 | 54 | ||
55 | /* TX/RX descriptor defines */ | 55 | /* TX/RX descriptor defines */ |
56 | #define IGB_DEFAULT_TXD 256 | 56 | #define IGB_DEFAULT_TXD 256 |
57 | #define IGB_DEFAULT_TX_WORK 128 | 57 | #define IGB_DEFAULT_TX_WORK 128 |
58 | #define IGB_MIN_TXD 80 | 58 | #define IGB_MIN_TXD 80 |
59 | #define IGB_MAX_TXD 4096 | 59 | #define IGB_MAX_TXD 4096 |
60 | 60 | ||
61 | #define IGB_DEFAULT_RXD 256 | 61 | #define IGB_DEFAULT_RXD 256 |
62 | #define IGB_MIN_RXD 80 | 62 | #define IGB_MIN_RXD 80 |
63 | #define IGB_MAX_RXD 4096 | 63 | #define IGB_MAX_RXD 4096 |
64 | 64 | ||
65 | #define IGB_DEFAULT_ITR 3 /* dynamic */ | 65 | #define IGB_DEFAULT_ITR 3 /* dynamic */ |
66 | #define IGB_MAX_ITR_USECS 10000 | 66 | #define IGB_MAX_ITR_USECS 10000 |
67 | #define IGB_MIN_ITR_USECS 10 | 67 | #define IGB_MIN_ITR_USECS 10 |
68 | #define NON_Q_VECTORS 1 | 68 | #define NON_Q_VECTORS 1 |
69 | #define MAX_Q_VECTORS 8 | 69 | #define MAX_Q_VECTORS 8 |
70 | 70 | ||
71 | /* Transmit and receive queues */ | 71 | /* Transmit and receive queues */ |
72 | #define IGB_MAX_RX_QUEUES 8 | 72 | #define IGB_MAX_RX_QUEUES 8 |
73 | #define IGB_MAX_RX_QUEUES_82575 4 | 73 | #define IGB_MAX_RX_QUEUES_82575 4 |
74 | #define IGB_MAX_RX_QUEUES_I211 2 | 74 | #define IGB_MAX_RX_QUEUES_I211 2 |
75 | #define IGB_MAX_TX_QUEUES 8 | 75 | #define IGB_MAX_TX_QUEUES 8 |
76 | #define IGB_MAX_VF_MC_ENTRIES 30 | 76 | #define IGB_MAX_VF_MC_ENTRIES 30 |
77 | #define IGB_MAX_VF_FUNCTIONS 8 | 77 | #define IGB_MAX_VF_FUNCTIONS 8 |
78 | #define IGB_MAX_VFTA_ENTRIES 128 | 78 | #define IGB_MAX_VFTA_ENTRIES 128 |
79 | #define IGB_82576_VF_DEV_ID 0x10CA | 79 | #define IGB_82576_VF_DEV_ID 0x10CA |
80 | #define IGB_I350_VF_DEV_ID 0x1520 | 80 | #define IGB_I350_VF_DEV_ID 0x1520 |
81 | 81 | ||
82 | /* NVM version defines */ | 82 | /* NVM version defines */ |
83 | #define IGB_MAJOR_MASK 0xF000 | 83 | #define IGB_MAJOR_MASK 0xF000 |
84 | #define IGB_MINOR_MASK 0x0FF0 | 84 | #define IGB_MINOR_MASK 0x0FF0 |
85 | #define IGB_BUILD_MASK 0x000F | 85 | #define IGB_BUILD_MASK 0x000F |
86 | #define IGB_COMB_VER_MASK 0x00FF | 86 | #define IGB_COMB_VER_MASK 0x00FF |
87 | #define IGB_MAJOR_SHIFT 12 | 87 | #define IGB_MAJOR_SHIFT 12 |
88 | #define IGB_MINOR_SHIFT 4 | 88 | #define IGB_MINOR_SHIFT 4 |
89 | #define IGB_COMB_VER_SHFT 8 | 89 | #define IGB_COMB_VER_SHFT 8 |
90 | #define IGB_NVM_VER_INVALID 0xFFFF | 90 | #define IGB_NVM_VER_INVALID 0xFFFF |
91 | #define IGB_ETRACK_SHIFT 16 | 91 | #define IGB_ETRACK_SHIFT 16 |
92 | #define NVM_ETRACK_WORD 0x0042 | 92 | #define NVM_ETRACK_WORD 0x0042 |
93 | #define NVM_COMB_VER_OFF 0x0083 | 93 | #define NVM_COMB_VER_OFF 0x0083 |
94 | #define NVM_COMB_VER_PTR 0x003d | 94 | #define NVM_COMB_VER_PTR 0x003d |
95 | 95 | ||
96 | struct vf_data_storage { | 96 | struct vf_data_storage { |
97 | unsigned char vf_mac_addresses[ETH_ALEN]; | 97 | unsigned char vf_mac_addresses[ETH_ALEN]; |
@@ -121,14 +121,14 @@ struct vf_data_storage { | |||
121 | * descriptors until either it has this many to write back, or the | 121 | * descriptors until either it has this many to write back, or the |
122 | * ITR timer expires. | 122 | * ITR timer expires. |
123 | */ | 123 | */ |
124 | #define IGB_RX_PTHRESH 8 | 124 | #define IGB_RX_PTHRESH 8 |
125 | #define IGB_RX_HTHRESH 8 | 125 | #define IGB_RX_HTHRESH 8 |
126 | #define IGB_TX_PTHRESH 8 | 126 | #define IGB_TX_PTHRESH 8 |
127 | #define IGB_TX_HTHRESH 1 | 127 | #define IGB_TX_HTHRESH 1 |
128 | #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ | 128 | #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ |
129 | adapter->msix_entries) ? 1 : 4) | 129 | adapter->msix_entries) ? 1 : 4) |
130 | #define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ | 130 | #define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ |
131 | adapter->msix_entries) ? 1 : 16) | 131 | adapter->msix_entries) ? 1 : 16) |
132 | 132 | ||
133 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | 133 | /* this is the size past which hardware will drop packets when setting LPE=0 */ |
134 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | 134 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
@@ -140,17 +140,17 @@ struct vf_data_storage { | |||
140 | #define IGB_RX_BUFSZ IGB_RXBUFFER_2048 | 140 | #define IGB_RX_BUFSZ IGB_RXBUFFER_2048 |
141 | 141 | ||
142 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 142 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
143 | #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | 143 | #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
144 | 144 | ||
145 | #define AUTO_ALL_MODES 0 | 145 | #define AUTO_ALL_MODES 0 |
146 | #define IGB_EEPROM_APME 0x0400 | 146 | #define IGB_EEPROM_APME 0x0400 |
147 | 147 | ||
148 | #ifndef IGB_MASTER_SLAVE | 148 | #ifndef IGB_MASTER_SLAVE |
149 | /* Switch to override PHY master/slave setting */ | 149 | /* Switch to override PHY master/slave setting */ |
150 | #define IGB_MASTER_SLAVE e1000_ms_hw_default | 150 | #define IGB_MASTER_SLAVE e1000_ms_hw_default |
151 | #endif | 151 | #endif |
152 | 152 | ||
153 | #define IGB_MNG_VLAN_NONE -1 | 153 | #define IGB_MNG_VLAN_NONE -1 |
154 | 154 | ||
155 | enum igb_tx_flags { | 155 | enum igb_tx_flags { |
156 | /* cmd_type flags */ | 156 | /* cmd_type flags */ |
@@ -164,11 +164,10 @@ enum igb_tx_flags { | |||
164 | }; | 164 | }; |
165 | 165 | ||
166 | /* VLAN info */ | 166 | /* VLAN info */ |
167 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 | 167 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 |
168 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 | 168 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 |
169 | 169 | ||
170 | /* | 170 | /* The largest size we can write to the descriptor is 65535. In order to |
171 | * The largest size we can write to the descriptor is 65535. In order to | ||
172 | * maintain a power of two alignment we have to limit ourselves to 32K. | 171 | * maintain a power of two alignment we have to limit ourselves to 32K. |
173 | */ | 172 | */ |
174 | #define IGB_MAX_TXD_PWR 15 | 173 | #define IGB_MAX_TXD_PWR 15 |
@@ -187,7 +186,8 @@ enum igb_tx_flags { | |||
187 | #define IGB_SFF_8472_UNSUP 0x00 | 186 | #define IGB_SFF_8472_UNSUP 0x00 |
188 | 187 | ||
189 | /* wrapper around a pointer to a socket buffer, | 188 | /* wrapper around a pointer to a socket buffer, |
190 | * so a DMA handle can be stored along with the buffer */ | 189 | * so a DMA handle can be stored along with the buffer |
190 | */ | ||
191 | struct igb_tx_buffer { | 191 | struct igb_tx_buffer { |
192 | union e1000_adv_tx_desc *next_to_watch; | 192 | union e1000_adv_tx_desc *next_to_watch; |
193 | unsigned long time_stamp; | 193 | unsigned long time_stamp; |
@@ -306,11 +306,11 @@ enum e1000_ring_flags_t { | |||
306 | 306 | ||
307 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) | 307 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) |
308 | 308 | ||
309 | #define IGB_RX_DESC(R, i) \ | 309 | #define IGB_RX_DESC(R, i) \ |
310 | (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) | 310 | (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) |
311 | #define IGB_TX_DESC(R, i) \ | 311 | #define IGB_TX_DESC(R, i) \ |
312 | (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) | 312 | (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) |
313 | #define IGB_TX_CTXTDESC(R, i) \ | 313 | #define IGB_TX_CTXTDESC(R, i) \ |
314 | (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) | 314 | (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) |
315 | 315 | ||
316 | /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ | 316 | /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ |
@@ -469,12 +469,12 @@ struct igb_adapter { | |||
469 | #define IGB_FLAG_WOL_SUPPORTED (1 << 8) | 469 | #define IGB_FLAG_WOL_SUPPORTED (1 << 8) |
470 | 470 | ||
471 | /* DMA Coalescing defines */ | 471 | /* DMA Coalescing defines */ |
472 | #define IGB_MIN_TXPBSIZE 20408 | 472 | #define IGB_MIN_TXPBSIZE 20408 |
473 | #define IGB_TX_BUF_4096 4096 | 473 | #define IGB_TX_BUF_4096 4096 |
474 | #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ | 474 | #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ |
475 | 475 | ||
476 | #define IGB_82576_TSYNC_SHIFT 19 | 476 | #define IGB_82576_TSYNC_SHIFT 19 |
477 | #define IGB_TS_HDR_LEN 16 | 477 | #define IGB_TS_HDR_LEN 16 |
478 | enum e1000_state_t { | 478 | enum e1000_state_t { |
479 | __IGB_TESTING, | 479 | __IGB_TESTING, |
480 | __IGB_RESETTING, | 480 | __IGB_RESETTING, |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 6afd7278ad67..08195bd0a23a 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -248,15 +248,15 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
248 | struct e1000_hw *hw = &adapter->hw; | 248 | struct e1000_hw *hw = &adapter->hw; |
249 | 249 | ||
250 | /* When SoL/IDER sessions are active, autoneg/speed/duplex | 250 | /* When SoL/IDER sessions are active, autoneg/speed/duplex |
251 | * cannot be changed */ | 251 | * cannot be changed |
252 | */ | ||
252 | if (igb_check_reset_block(hw)) { | 253 | if (igb_check_reset_block(hw)) { |
253 | dev_err(&adapter->pdev->dev, | 254 | dev_err(&adapter->pdev->dev, |
254 | "Cannot change link characteristics when SoL/IDER is active.\n"); | 255 | "Cannot change link characteristics when SoL/IDER is active.\n"); |
255 | return -EINVAL; | 256 | return -EINVAL; |
256 | } | 257 | } |
257 | 258 | ||
258 | /* | 259 | /* MDI setting is only allowed when autoneg enabled because |
259 | * MDI setting is only allowed when autoneg enabled because | ||
260 | * some hardware doesn't allow MDI setting when speed or | 260 | * some hardware doesn't allow MDI setting when speed or |
261 | * duplex is forced. | 261 | * duplex is forced. |
262 | */ | 262 | */ |
@@ -305,8 +305,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
305 | 305 | ||
306 | /* MDI-X => 2; MDI => 1; Auto => 3 */ | 306 | /* MDI-X => 2; MDI => 1; Auto => 3 */ |
307 | if (ecmd->eth_tp_mdix_ctrl) { | 307 | if (ecmd->eth_tp_mdix_ctrl) { |
308 | /* | 308 | /* fix up the value for auto (3 => 0) as zero is mapped |
309 | * fix up the value for auto (3 => 0) as zero is mapped | ||
310 | * internally to auto | 309 | * internally to auto |
311 | */ | 310 | */ |
312 | if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) | 311 | if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) |
@@ -331,8 +330,7 @@ static u32 igb_get_link(struct net_device *netdev) | |||
331 | struct igb_adapter *adapter = netdev_priv(netdev); | 330 | struct igb_adapter *adapter = netdev_priv(netdev); |
332 | struct e1000_mac_info *mac = &adapter->hw.mac; | 331 | struct e1000_mac_info *mac = &adapter->hw.mac; |
333 | 332 | ||
334 | /* | 333 | /* If the link is not reported up to netdev, interrupts are disabled, |
335 | * If the link is not reported up to netdev, interrupts are disabled, | ||
336 | * and so the physical link state may have changed since we last | 334 | * and so the physical link state may have changed since we last |
337 | * looked. Set get_link_status to make sure that the true link | 335 | * looked. Set get_link_status to make sure that the true link |
338 | * state is interrogated, rather than pulling a cached and possibly | 336 | * state is interrogated, rather than pulling a cached and possibly |
@@ -452,7 +450,8 @@ static void igb_get_regs(struct net_device *netdev, | |||
452 | 450 | ||
453 | /* Interrupt */ | 451 | /* Interrupt */ |
454 | /* Reading EICS for EICR because they read the | 452 | /* Reading EICS for EICR because they read the |
455 | * same but EICS does not clear on read */ | 453 | * same but EICS does not clear on read |
454 | */ | ||
456 | regs_buff[13] = rd32(E1000_EICS); | 455 | regs_buff[13] = rd32(E1000_EICS); |
457 | regs_buff[14] = rd32(E1000_EICS); | 456 | regs_buff[14] = rd32(E1000_EICS); |
458 | regs_buff[15] = rd32(E1000_EIMS); | 457 | regs_buff[15] = rd32(E1000_EIMS); |
@@ -460,7 +459,8 @@ static void igb_get_regs(struct net_device *netdev, | |||
460 | regs_buff[17] = rd32(E1000_EIAC); | 459 | regs_buff[17] = rd32(E1000_EIAC); |
461 | regs_buff[18] = rd32(E1000_EIAM); | 460 | regs_buff[18] = rd32(E1000_EIAM); |
462 | /* Reading ICS for ICR because they read the | 461 | /* Reading ICS for ICR because they read the |
463 | * same but ICS does not clear on read */ | 462 | * same but ICS does not clear on read |
463 | */ | ||
464 | regs_buff[19] = rd32(E1000_ICS); | 464 | regs_buff[19] = rd32(E1000_ICS); |
465 | regs_buff[20] = rd32(E1000_ICS); | 465 | regs_buff[20] = rd32(E1000_ICS); |
466 | regs_buff[21] = rd32(E1000_IMS); | 466 | regs_buff[21] = rd32(E1000_IMS); |
@@ -710,12 +710,12 @@ static int igb_get_eeprom(struct net_device *netdev, | |||
710 | 710 | ||
711 | if (hw->nvm.type == e1000_nvm_eeprom_spi) | 711 | if (hw->nvm.type == e1000_nvm_eeprom_spi) |
712 | ret_val = hw->nvm.ops.read(hw, first_word, | 712 | ret_val = hw->nvm.ops.read(hw, first_word, |
713 | last_word - first_word + 1, | 713 | last_word - first_word + 1, |
714 | eeprom_buff); | 714 | eeprom_buff); |
715 | else { | 715 | else { |
716 | for (i = 0; i < last_word - first_word + 1; i++) { | 716 | for (i = 0; i < last_word - first_word + 1; i++) { |
717 | ret_val = hw->nvm.ops.read(hw, first_word + i, 1, | 717 | ret_val = hw->nvm.ops.read(hw, first_word + i, 1, |
718 | &eeprom_buff[i]); | 718 | &eeprom_buff[i]); |
719 | if (ret_val) | 719 | if (ret_val) |
720 | break; | 720 | break; |
721 | } | 721 | } |
@@ -762,15 +762,17 @@ static int igb_set_eeprom(struct net_device *netdev, | |||
762 | ptr = (void *)eeprom_buff; | 762 | ptr = (void *)eeprom_buff; |
763 | 763 | ||
764 | if (eeprom->offset & 1) { | 764 | if (eeprom->offset & 1) { |
765 | /* need read/modify/write of first changed EEPROM word */ | 765 | /* need read/modify/write of first changed EEPROM word |
766 | /* only the second byte of the word is being modified */ | 766 | * only the second byte of the word is being modified |
767 | */ | ||
767 | ret_val = hw->nvm.ops.read(hw, first_word, 1, | 768 | ret_val = hw->nvm.ops.read(hw, first_word, 1, |
768 | &eeprom_buff[0]); | 769 | &eeprom_buff[0]); |
769 | ptr++; | 770 | ptr++; |
770 | } | 771 | } |
771 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { | 772 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { |
772 | /* need read/modify/write of last changed EEPROM word */ | 773 | /* need read/modify/write of last changed EEPROM word |
773 | /* only the first byte of the word is being modified */ | 774 | * only the first byte of the word is being modified |
775 | */ | ||
774 | ret_val = hw->nvm.ops.read(hw, last_word, 1, | 776 | ret_val = hw->nvm.ops.read(hw, last_word, 1, |
775 | &eeprom_buff[last_word - first_word]); | 777 | &eeprom_buff[last_word - first_word]); |
776 | } | 778 | } |
@@ -785,10 +787,11 @@ static int igb_set_eeprom(struct net_device *netdev, | |||
785 | eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); | 787 | eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); |
786 | 788 | ||
787 | ret_val = hw->nvm.ops.write(hw, first_word, | 789 | ret_val = hw->nvm.ops.write(hw, first_word, |
788 | last_word - first_word + 1, eeprom_buff); | 790 | last_word - first_word + 1, eeprom_buff); |
789 | 791 | ||
790 | /* Update the checksum over the first part of the EEPROM if needed | 792 | /* Update the checksum over the first part of the EEPROM if needed |
791 | * and flush shadow RAM for 82573 controllers */ | 793 | * and flush shadow RAM for 82573 controllers |
794 | */ | ||
792 | if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) | 795 | if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) |
793 | hw->nvm.ops.update(hw); | 796 | hw->nvm.ops.update(hw); |
794 | 797 | ||
@@ -805,8 +808,7 @@ static void igb_get_drvinfo(struct net_device *netdev, | |||
805 | strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); | 808 | strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); |
806 | strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); | 809 | strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); |
807 | 810 | ||
808 | /* | 811 | /* EEPROM image version # is reported as firmware version # for |
809 | * EEPROM image version # is reported as firmware version # for | ||
810 | * 82575 controllers | 812 | * 82575 controllers |
811 | */ | 813 | */ |
812 | strlcpy(drvinfo->fw_version, adapter->fw_version, | 814 | strlcpy(drvinfo->fw_version, adapter->fw_version, |
@@ -869,9 +871,11 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
869 | } | 871 | } |
870 | 872 | ||
871 | if (adapter->num_tx_queues > adapter->num_rx_queues) | 873 | if (adapter->num_tx_queues > adapter->num_rx_queues) |
872 | temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); | 874 | temp_ring = vmalloc(adapter->num_tx_queues * |
875 | sizeof(struct igb_ring)); | ||
873 | else | 876 | else |
874 | temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); | 877 | temp_ring = vmalloc(adapter->num_rx_queues * |
878 | sizeof(struct igb_ring)); | ||
875 | 879 | ||
876 | if (!temp_ring) { | 880 | if (!temp_ring) { |
877 | err = -ENOMEM; | 881 | err = -ENOMEM; |
@@ -880,10 +884,9 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
880 | 884 | ||
881 | igb_down(adapter); | 885 | igb_down(adapter); |
882 | 886 | ||
883 | /* | 887 | /* We can't just free everything and then setup again, |
884 | * We can't just free everything and then setup again, | ||
885 | * because the ISRs in MSI-X mode get passed pointers | 888 | * because the ISRs in MSI-X mode get passed pointers |
886 | * to the tx and rx ring structs. | 889 | * to the Tx and Rx ring structs. |
887 | */ | 890 | */ |
888 | if (new_tx_count != adapter->tx_ring_count) { | 891 | if (new_tx_count != adapter->tx_ring_count) { |
889 | for (i = 0; i < adapter->num_tx_queues; i++) { | 892 | for (i = 0; i < adapter->num_tx_queues; i++) { |
@@ -1745,8 +1748,8 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, | |||
1745 | } | 1748 | } |
1746 | 1749 | ||
1747 | static int igb_clean_test_rings(struct igb_ring *rx_ring, | 1750 | static int igb_clean_test_rings(struct igb_ring *rx_ring, |
1748 | struct igb_ring *tx_ring, | 1751 | struct igb_ring *tx_ring, |
1749 | unsigned int size) | 1752 | unsigned int size) |
1750 | { | 1753 | { |
1751 | union e1000_adv_rx_desc *rx_desc; | 1754 | union e1000_adv_rx_desc *rx_desc; |
1752 | struct igb_rx_buffer *rx_buffer_info; | 1755 | struct igb_rx_buffer *rx_buffer_info; |
@@ -1759,7 +1762,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, | |||
1759 | rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); | 1762 | rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); |
1760 | 1763 | ||
1761 | while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { | 1764 | while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { |
1762 | /* check rx buffer */ | 1765 | /* check Rx buffer */ |
1763 | rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; | 1766 | rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; |
1764 | 1767 | ||
1765 | /* sync Rx buffer for CPU read */ | 1768 | /* sync Rx buffer for CPU read */ |
@@ -1778,11 +1781,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, | |||
1778 | IGB_RX_BUFSZ, | 1781 | IGB_RX_BUFSZ, |
1779 | DMA_FROM_DEVICE); | 1782 | DMA_FROM_DEVICE); |
1780 | 1783 | ||
1781 | /* unmap buffer on tx side */ | 1784 | /* unmap buffer on Tx side */ |
1782 | tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; | 1785 | tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; |
1783 | igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); | 1786 | igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
1784 | 1787 | ||
1785 | /* increment rx/tx next to clean counters */ | 1788 | /* increment Rx/Tx next to clean counters */ |
1786 | rx_ntc++; | 1789 | rx_ntc++; |
1787 | if (rx_ntc == rx_ring->count) | 1790 | if (rx_ntc == rx_ring->count) |
1788 | rx_ntc = 0; | 1791 | rx_ntc = 0; |
@@ -1823,8 +1826,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) | |||
1823 | igb_create_lbtest_frame(skb, size); | 1826 | igb_create_lbtest_frame(skb, size); |
1824 | skb_put(skb, size); | 1827 | skb_put(skb, size); |
1825 | 1828 | ||
1826 | /* | 1829 | /* Calculate the loop count based on the largest descriptor ring |
1827 | * Calculate the loop count based on the largest descriptor ring | ||
1828 | * The idea is to wrap the largest ring a number of times using 64 | 1830 | * The idea is to wrap the largest ring a number of times using 64 |
1829 | * send/receive pairs during each loop | 1831 | * send/receive pairs during each loop |
1830 | */ | 1832 | */ |
@@ -1851,7 +1853,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) | |||
1851 | break; | 1853 | break; |
1852 | } | 1854 | } |
1853 | 1855 | ||
1854 | /* allow 200 milliseconds for packets to go from tx to rx */ | 1856 | /* allow 200 milliseconds for packets to go from Tx to Rx */ |
1855 | msleep(200); | 1857 | msleep(200); |
1856 | 1858 | ||
1857 | good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); | 1859 | good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); |
@@ -1870,7 +1872,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter) | |||
1870 | static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) | 1872 | static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) |
1871 | { | 1873 | { |
1872 | /* PHY loopback cannot be performed if SoL/IDER | 1874 | /* PHY loopback cannot be performed if SoL/IDER |
1873 | * sessions are active */ | 1875 | * sessions are active |
1876 | */ | ||
1874 | if (igb_check_reset_block(&adapter->hw)) { | 1877 | if (igb_check_reset_block(&adapter->hw)) { |
1875 | dev_err(&adapter->pdev->dev, | 1878 | dev_err(&adapter->pdev->dev, |
1876 | "Cannot do PHY loopback test when SoL/IDER is active.\n"); | 1879 | "Cannot do PHY loopback test when SoL/IDER is active.\n"); |
@@ -1901,7 +1904,8 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) | |||
1901 | hw->mac.serdes_has_link = false; | 1904 | hw->mac.serdes_has_link = false; |
1902 | 1905 | ||
1903 | /* On some blade server designs, link establishment | 1906 | /* On some blade server designs, link establishment |
1904 | * could take as long as 2-3 minutes */ | 1907 | * could take as long as 2-3 minutes |
1908 | */ | ||
1905 | do { | 1909 | do { |
1906 | hw->mac.ops.check_for_link(&adapter->hw); | 1910 | hw->mac.ops.check_for_link(&adapter->hw); |
1907 | if (hw->mac.serdes_has_link) | 1911 | if (hw->mac.serdes_has_link) |
@@ -1944,7 +1948,8 @@ static void igb_diag_test(struct net_device *netdev, | |||
1944 | igb_power_up_link(adapter); | 1948 | igb_power_up_link(adapter); |
1945 | 1949 | ||
1946 | /* Link test performed before hardware reset so autoneg doesn't | 1950 | /* Link test performed before hardware reset so autoneg doesn't |
1947 | * interfere with test result */ | 1951 | * interfere with test result |
1952 | */ | ||
1948 | if (igb_link_test(adapter, &data[4])) | 1953 | if (igb_link_test(adapter, &data[4])) |
1949 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1954 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1950 | 1955 | ||
@@ -2009,8 +2014,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
2009 | struct igb_adapter *adapter = netdev_priv(netdev); | 2014 | struct igb_adapter *adapter = netdev_priv(netdev); |
2010 | 2015 | ||
2011 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 2016 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
2012 | WAKE_BCAST | WAKE_MAGIC | | 2017 | WAKE_BCAST | WAKE_MAGIC | |
2013 | WAKE_PHY; | 2018 | WAKE_PHY; |
2014 | wol->wolopts = 0; | 2019 | wol->wolopts = 0; |
2015 | 2020 | ||
2016 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) | 2021 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) |
@@ -2285,7 +2290,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
2285 | sprintf(p, "rx_queue_%u_alloc_failed", i); | 2290 | sprintf(p, "rx_queue_%u_alloc_failed", i); |
2286 | p += ETH_GSTRING_LEN; | 2291 | p += ETH_GSTRING_LEN; |
2287 | } | 2292 | } |
2288 | /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ | 2293 | /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ |
2289 | break; | 2294 | break; |
2290 | } | 2295 | } |
2291 | } | 2296 | } |
@@ -2384,7 +2389,7 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter, | |||
2384 | } | 2389 | } |
2385 | 2390 | ||
2386 | static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | 2391 | static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
2387 | u32 *rule_locs) | 2392 | u32 *rule_locs) |
2388 | { | 2393 | { |
2389 | struct igb_adapter *adapter = netdev_priv(dev); | 2394 | struct igb_adapter *adapter = netdev_priv(dev); |
2390 | int ret = -EOPNOTSUPP; | 2395 | int ret = -EOPNOTSUPP; |
@@ -2715,32 +2720,32 @@ static void igb_ethtool_complete(struct net_device *netdev) | |||
2715 | } | 2720 | } |
2716 | 2721 | ||
2717 | static const struct ethtool_ops igb_ethtool_ops = { | 2722 | static const struct ethtool_ops igb_ethtool_ops = { |
2718 | .get_settings = igb_get_settings, | 2723 | .get_settings = igb_get_settings, |
2719 | .set_settings = igb_set_settings, | 2724 | .set_settings = igb_set_settings, |
2720 | .get_drvinfo = igb_get_drvinfo, | 2725 | .get_drvinfo = igb_get_drvinfo, |
2721 | .get_regs_len = igb_get_regs_len, | 2726 | .get_regs_len = igb_get_regs_len, |
2722 | .get_regs = igb_get_regs, | 2727 | .get_regs = igb_get_regs, |
2723 | .get_wol = igb_get_wol, | 2728 | .get_wol = igb_get_wol, |
2724 | .set_wol = igb_set_wol, | 2729 | .set_wol = igb_set_wol, |
2725 | .get_msglevel = igb_get_msglevel, | 2730 | .get_msglevel = igb_get_msglevel, |
2726 | .set_msglevel = igb_set_msglevel, | 2731 | .set_msglevel = igb_set_msglevel, |
2727 | .nway_reset = igb_nway_reset, | 2732 | .nway_reset = igb_nway_reset, |
2728 | .get_link = igb_get_link, | 2733 | .get_link = igb_get_link, |
2729 | .get_eeprom_len = igb_get_eeprom_len, | 2734 | .get_eeprom_len = igb_get_eeprom_len, |
2730 | .get_eeprom = igb_get_eeprom, | 2735 | .get_eeprom = igb_get_eeprom, |
2731 | .set_eeprom = igb_set_eeprom, | 2736 | .set_eeprom = igb_set_eeprom, |
2732 | .get_ringparam = igb_get_ringparam, | 2737 | .get_ringparam = igb_get_ringparam, |
2733 | .set_ringparam = igb_set_ringparam, | 2738 | .set_ringparam = igb_set_ringparam, |
2734 | .get_pauseparam = igb_get_pauseparam, | 2739 | .get_pauseparam = igb_get_pauseparam, |
2735 | .set_pauseparam = igb_set_pauseparam, | 2740 | .set_pauseparam = igb_set_pauseparam, |
2736 | .self_test = igb_diag_test, | 2741 | .self_test = igb_diag_test, |
2737 | .get_strings = igb_get_strings, | 2742 | .get_strings = igb_get_strings, |
2738 | .set_phys_id = igb_set_phys_id, | 2743 | .set_phys_id = igb_set_phys_id, |
2739 | .get_sset_count = igb_get_sset_count, | 2744 | .get_sset_count = igb_get_sset_count, |
2740 | .get_ethtool_stats = igb_get_ethtool_stats, | 2745 | .get_ethtool_stats = igb_get_ethtool_stats, |
2741 | .get_coalesce = igb_get_coalesce, | 2746 | .get_coalesce = igb_get_coalesce, |
2742 | .set_coalesce = igb_set_coalesce, | 2747 | .set_coalesce = igb_set_coalesce, |
2743 | .get_ts_info = igb_get_ts_info, | 2748 | .get_ts_info = igb_get_ts_info, |
2744 | .get_rxnfc = igb_get_rxnfc, | 2749 | .get_rxnfc = igb_get_rxnfc, |
2745 | .set_rxnfc = igb_set_rxnfc, | 2750 | .set_rxnfc = igb_set_rxnfc, |
2746 | .get_eee = igb_get_eee, | 2751 | .get_eee = igb_get_eee, |
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 0478a1abe541..58f1ce967aeb 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c | |||
@@ -45,21 +45,21 @@ static struct i2c_board_info i350_sensor_info = { | |||
45 | 45 | ||
46 | /* hwmon callback functions */ | 46 | /* hwmon callback functions */ |
47 | static ssize_t igb_hwmon_show_location(struct device *dev, | 47 | static ssize_t igb_hwmon_show_location(struct device *dev, |
48 | struct device_attribute *attr, | 48 | struct device_attribute *attr, |
49 | char *buf) | 49 | char *buf) |
50 | { | 50 | { |
51 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, | 51 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, |
52 | dev_attr); | 52 | dev_attr); |
53 | return sprintf(buf, "loc%u\n", | 53 | return sprintf(buf, "loc%u\n", |
54 | igb_attr->sensor->location); | 54 | igb_attr->sensor->location); |
55 | } | 55 | } |
56 | 56 | ||
57 | static ssize_t igb_hwmon_show_temp(struct device *dev, | 57 | static ssize_t igb_hwmon_show_temp(struct device *dev, |
58 | struct device_attribute *attr, | 58 | struct device_attribute *attr, |
59 | char *buf) | 59 | char *buf) |
60 | { | 60 | { |
61 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, | 61 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, |
62 | dev_attr); | 62 | dev_attr); |
63 | unsigned int value; | 63 | unsigned int value; |
64 | 64 | ||
65 | /* reset the temp field */ | 65 | /* reset the temp field */ |
@@ -74,11 +74,11 @@ static ssize_t igb_hwmon_show_temp(struct device *dev, | |||
74 | } | 74 | } |
75 | 75 | ||
76 | static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, | 76 | static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, |
77 | struct device_attribute *attr, | 77 | struct device_attribute *attr, |
78 | char *buf) | 78 | char *buf) |
79 | { | 79 | { |
80 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, | 80 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, |
81 | dev_attr); | 81 | dev_attr); |
82 | unsigned int value = igb_attr->sensor->caution_thresh; | 82 | unsigned int value = igb_attr->sensor->caution_thresh; |
83 | 83 | ||
84 | /* display millidegree */ | 84 | /* display millidegree */ |
@@ -88,11 +88,11 @@ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, | |||
88 | } | 88 | } |
89 | 89 | ||
90 | static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, | 90 | static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, |
91 | struct device_attribute *attr, | 91 | struct device_attribute *attr, |
92 | char *buf) | 92 | char *buf) |
93 | { | 93 | { |
94 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, | 94 | struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, |
95 | dev_attr); | 95 | dev_attr); |
96 | unsigned int value = igb_attr->sensor->max_op_thresh; | 96 | unsigned int value = igb_attr->sensor->max_op_thresh; |
97 | 97 | ||
98 | /* display millidegree */ | 98 | /* display millidegree */ |
@@ -111,7 +111,8 @@ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, | |||
111 | * the data structures we need to get the data to display. | 111 | * the data structures we need to get the data to display. |
112 | */ | 112 | */ |
113 | static int igb_add_hwmon_attr(struct igb_adapter *adapter, | 113 | static int igb_add_hwmon_attr(struct igb_adapter *adapter, |
114 | unsigned int offset, int type) { | 114 | unsigned int offset, int type) |
115 | { | ||
115 | int rc; | 116 | int rc; |
116 | unsigned int n_attr; | 117 | unsigned int n_attr; |
117 | struct hwmon_attr *igb_attr; | 118 | struct hwmon_attr *igb_attr; |
@@ -217,7 +218,7 @@ int igb_sysfs_init(struct igb_adapter *adapter) | |||
217 | */ | 218 | */ |
218 | n_attrs = E1000_MAX_SENSORS * 4; | 219 | n_attrs = E1000_MAX_SENSORS * 4; |
219 | igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), | 220 | igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), |
220 | GFP_KERNEL); | 221 | GFP_KERNEL); |
221 | if (!igb_hwmon->hwmon_list) { | 222 | if (!igb_hwmon->hwmon_list) { |
222 | rc = -ENOMEM; | 223 | rc = -ENOMEM; |
223 | goto err; | 224 | goto err; |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d838ab1ea96f..c54ba4224ac6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -292,9 +292,7 @@ static const struct igb_reg_info igb_reg_info_tbl[] = { | |||
292 | {} | 292 | {} |
293 | }; | 293 | }; |
294 | 294 | ||
295 | /* | 295 | /* igb_regdump - register printout routine */ |
296 | * igb_regdump - register printout routine | ||
297 | */ | ||
298 | static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) | 296 | static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) |
299 | { | 297 | { |
300 | int n = 0; | 298 | int n = 0; |
@@ -360,9 +358,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) | |||
360 | regs[2], regs[3]); | 358 | regs[2], regs[3]); |
361 | } | 359 | } |
362 | 360 | ||
363 | /* | 361 | /* igb_dump - Print registers, Tx-rings and Rx-rings */ |
364 | * igb_dump - Print registers, tx-rings and rx-rings | ||
365 | */ | ||
366 | static void igb_dump(struct igb_adapter *adapter) | 362 | static void igb_dump(struct igb_adapter *adapter) |
367 | { | 363 | { |
368 | struct net_device *netdev = adapter->netdev; | 364 | struct net_device *netdev = adapter->netdev; |
@@ -569,12 +565,13 @@ exit: | |||
569 | return; | 565 | return; |
570 | } | 566 | } |
571 | 567 | ||
572 | /* igb_get_i2c_data - Reads the I2C SDA data bit | 568 | /** |
569 | * igb_get_i2c_data - Reads the I2C SDA data bit | ||
573 | * @hw: pointer to hardware structure | 570 | * @hw: pointer to hardware structure |
574 | * @i2cctl: Current value of I2CCTL register | 571 | * @i2cctl: Current value of I2CCTL register |
575 | * | 572 | * |
576 | * Returns the I2C data bit value | 573 | * Returns the I2C data bit value |
577 | */ | 574 | **/ |
578 | static int igb_get_i2c_data(void *data) | 575 | static int igb_get_i2c_data(void *data) |
579 | { | 576 | { |
580 | struct igb_adapter *adapter = (struct igb_adapter *)data; | 577 | struct igb_adapter *adapter = (struct igb_adapter *)data; |
@@ -584,12 +581,13 @@ static int igb_get_i2c_data(void *data) | |||
584 | return ((i2cctl & E1000_I2C_DATA_IN) != 0); | 581 | return ((i2cctl & E1000_I2C_DATA_IN) != 0); |
585 | } | 582 | } |
586 | 583 | ||
587 | /* igb_set_i2c_data - Sets the I2C data bit | 584 | /** |
585 | * igb_set_i2c_data - Sets the I2C data bit | ||
588 | * @data: pointer to hardware structure | 586 | * @data: pointer to hardware structure |
589 | * @state: I2C data value (0 or 1) to set | 587 | * @state: I2C data value (0 or 1) to set |
590 | * | 588 | * |
591 | * Sets the I2C data bit | 589 | * Sets the I2C data bit |
592 | */ | 590 | **/ |
593 | static void igb_set_i2c_data(void *data, int state) | 591 | static void igb_set_i2c_data(void *data, int state) |
594 | { | 592 | { |
595 | struct igb_adapter *adapter = (struct igb_adapter *)data; | 593 | struct igb_adapter *adapter = (struct igb_adapter *)data; |
@@ -608,12 +606,13 @@ static void igb_set_i2c_data(void *data, int state) | |||
608 | 606 | ||
609 | } | 607 | } |
610 | 608 | ||
611 | /* igb_set_i2c_clk - Sets the I2C SCL clock | 609 | /** |
610 | * igb_set_i2c_clk - Sets the I2C SCL clock | ||
612 | * @data: pointer to hardware structure | 611 | * @data: pointer to hardware structure |
613 | * @state: state to set clock | 612 | * @state: state to set clock |
614 | * | 613 | * |
615 | * Sets the I2C clock line to state | 614 | * Sets the I2C clock line to state |
616 | */ | 615 | **/ |
617 | static void igb_set_i2c_clk(void *data, int state) | 616 | static void igb_set_i2c_clk(void *data, int state) |
618 | { | 617 | { |
619 | struct igb_adapter *adapter = (struct igb_adapter *)data; | 618 | struct igb_adapter *adapter = (struct igb_adapter *)data; |
@@ -631,11 +630,12 @@ static void igb_set_i2c_clk(void *data, int state) | |||
631 | wrfl(); | 630 | wrfl(); |
632 | } | 631 | } |
633 | 632 | ||
634 | /* igb_get_i2c_clk - Gets the I2C SCL clock state | 633 | /** |
634 | * igb_get_i2c_clk - Gets the I2C SCL clock state | ||
635 | * @data: pointer to hardware structure | 635 | * @data: pointer to hardware structure |
636 | * | 636 | * |
637 | * Gets the I2C clock state | 637 | * Gets the I2C clock state |
638 | */ | 638 | **/ |
639 | static int igb_get_i2c_clk(void *data) | 639 | static int igb_get_i2c_clk(void *data) |
640 | { | 640 | { |
641 | struct igb_adapter *adapter = (struct igb_adapter *)data; | 641 | struct igb_adapter *adapter = (struct igb_adapter *)data; |
@@ -655,8 +655,10 @@ static const struct i2c_algo_bit_data igb_i2c_algo = { | |||
655 | }; | 655 | }; |
656 | 656 | ||
657 | /** | 657 | /** |
658 | * igb_get_hw_dev - return device | 658 | * igb_get_hw_dev - return device |
659 | * used by hardware layer to print debugging information | 659 | * @hw: pointer to hardware structure |
660 | * | ||
661 | * used by hardware layer to print debugging information | ||
660 | **/ | 662 | **/ |
661 | struct net_device *igb_get_hw_dev(struct e1000_hw *hw) | 663 | struct net_device *igb_get_hw_dev(struct e1000_hw *hw) |
662 | { | 664 | { |
@@ -665,10 +667,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) | |||
665 | } | 667 | } |
666 | 668 | ||
667 | /** | 669 | /** |
668 | * igb_init_module - Driver Registration Routine | 670 | * igb_init_module - Driver Registration Routine |
669 | * | 671 | * |
670 | * igb_init_module is the first routine called when the driver is | 672 | * igb_init_module is the first routine called when the driver is |
671 | * loaded. All it does is register with the PCI subsystem. | 673 | * loaded. All it does is register with the PCI subsystem. |
672 | **/ | 674 | **/ |
673 | static int __init igb_init_module(void) | 675 | static int __init igb_init_module(void) |
674 | { | 676 | { |
@@ -688,10 +690,10 @@ static int __init igb_init_module(void) | |||
688 | module_init(igb_init_module); | 690 | module_init(igb_init_module); |
689 | 691 | ||
690 | /** | 692 | /** |
691 | * igb_exit_module - Driver Exit Cleanup Routine | 693 | * igb_exit_module - Driver Exit Cleanup Routine |
692 | * | 694 | * |
693 | * igb_exit_module is called just before the driver is removed | 695 | * igb_exit_module is called just before the driver is removed |
694 | * from memory. | 696 | * from memory. |
695 | **/ | 697 | **/ |
696 | static void __exit igb_exit_module(void) | 698 | static void __exit igb_exit_module(void) |
697 | { | 699 | { |
@@ -705,11 +707,11 @@ module_exit(igb_exit_module); | |||
705 | 707 | ||
706 | #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) | 708 | #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) |
707 | /** | 709 | /** |
708 | * igb_cache_ring_register - Descriptor ring to register mapping | 710 | * igb_cache_ring_register - Descriptor ring to register mapping |
709 | * @adapter: board private structure to initialize | 711 | * @adapter: board private structure to initialize |
710 | * | 712 | * |
711 | * Once we know the feature-set enabled for the device, we'll cache | 713 | * Once we know the feature-set enabled for the device, we'll cache |
712 | * the register offset the descriptor ring is assigned to. | 714 | * the register offset the descriptor ring is assigned to. |
713 | **/ | 715 | **/ |
714 | static void igb_cache_ring_register(struct igb_adapter *adapter) | 716 | static void igb_cache_ring_register(struct igb_adapter *adapter) |
715 | { | 717 | { |
@@ -726,7 +728,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
726 | if (adapter->vfs_allocated_count) { | 728 | if (adapter->vfs_allocated_count) { |
727 | for (; i < adapter->rss_queues; i++) | 729 | for (; i < adapter->rss_queues; i++) |
728 | adapter->rx_ring[i]->reg_idx = rbase_offset + | 730 | adapter->rx_ring[i]->reg_idx = rbase_offset + |
729 | Q_IDX_82576(i); | 731 | Q_IDX_82576(i); |
730 | } | 732 | } |
731 | case e1000_82575: | 733 | case e1000_82575: |
732 | case e1000_82580: | 734 | case e1000_82580: |
@@ -785,9 +787,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) | |||
785 | switch (hw->mac.type) { | 787 | switch (hw->mac.type) { |
786 | case e1000_82575: | 788 | case e1000_82575: |
787 | /* The 82575 assigns vectors using a bitmask, which matches the | 789 | /* The 82575 assigns vectors using a bitmask, which matches the |
788 | bitmask for the EICR/EIMS/EIMC registers. To assign one | 790 | * bitmask for the EICR/EIMS/EIMC registers. To assign one |
789 | or more queues to a vector, we write the appropriate bits | 791 | * or more queues to a vector, we write the appropriate bits |
790 | into the MSIXBM register for that vector. */ | 792 | * into the MSIXBM register for that vector. |
793 | */ | ||
791 | if (rx_queue > IGB_N0_QUEUE) | 794 | if (rx_queue > IGB_N0_QUEUE) |
792 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; | 795 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; |
793 | if (tx_queue > IGB_N0_QUEUE) | 796 | if (tx_queue > IGB_N0_QUEUE) |
@@ -798,8 +801,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) | |||
798 | q_vector->eims_value = msixbm; | 801 | q_vector->eims_value = msixbm; |
799 | break; | 802 | break; |
800 | case e1000_82576: | 803 | case e1000_82576: |
801 | /* | 804 | /* 82576 uses a table that essentially consists of 2 columns |
802 | * 82576 uses a table that essentially consists of 2 columns | ||
803 | * with 8 rows. The ordering is column-major so we use the | 805 | * with 8 rows. The ordering is column-major so we use the |
804 | * lower 3 bits as the row index, and the 4th bit as the | 806 | * lower 3 bits as the row index, and the 4th bit as the |
805 | * column offset. | 807 | * column offset. |
@@ -818,8 +820,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) | |||
818 | case e1000_i350: | 820 | case e1000_i350: |
819 | case e1000_i210: | 821 | case e1000_i210: |
820 | case e1000_i211: | 822 | case e1000_i211: |
821 | /* | 823 | /* On 82580 and newer adapters the scheme is similar to 82576 |
822 | * On 82580 and newer adapters the scheme is similar to 82576 | ||
823 | * however instead of ordering column-major we have things | 824 | * however instead of ordering column-major we have things |
824 | * ordered row-major. So we traverse the table by using | 825 | * ordered row-major. So we traverse the table by using |
825 | * bit 0 as the column offset, and the remaining bits as the | 826 | * bit 0 as the column offset, and the remaining bits as the |
@@ -848,10 +849,11 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) | |||
848 | } | 849 | } |
849 | 850 | ||
850 | /** | 851 | /** |
851 | * igb_configure_msix - Configure MSI-X hardware | 852 | * igb_configure_msix - Configure MSI-X hardware |
853 | * @adapter: board private structure to initialize | ||
852 | * | 854 | * |
853 | * igb_configure_msix sets up the hardware to properly | 855 | * igb_configure_msix sets up the hardware to properly |
854 | * generate MSI-X interrupts. | 856 | * generate MSI-X interrupts. |
855 | **/ | 857 | **/ |
856 | static void igb_configure_msix(struct igb_adapter *adapter) | 858 | static void igb_configure_msix(struct igb_adapter *adapter) |
857 | { | 859 | { |
@@ -875,8 +877,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
875 | wr32(E1000_CTRL_EXT, tmp); | 877 | wr32(E1000_CTRL_EXT, tmp); |
876 | 878 | ||
877 | /* enable msix_other interrupt */ | 879 | /* enable msix_other interrupt */ |
878 | array_wr32(E1000_MSIXBM(0), vector++, | 880 | array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); |
879 | E1000_EIMS_OTHER); | ||
880 | adapter->eims_other = E1000_EIMS_OTHER; | 881 | adapter->eims_other = E1000_EIMS_OTHER; |
881 | 882 | ||
882 | break; | 883 | break; |
@@ -887,10 +888,11 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
887 | case e1000_i210: | 888 | case e1000_i210: |
888 | case e1000_i211: | 889 | case e1000_i211: |
889 | /* Turn on MSI-X capability first, or our settings | 890 | /* Turn on MSI-X capability first, or our settings |
890 | * won't stick. And it will take days to debug. */ | 891 | * won't stick. And it will take days to debug. |
892 | */ | ||
891 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | | 893 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | |
892 | E1000_GPIE_PBA | E1000_GPIE_EIAME | | 894 | E1000_GPIE_PBA | E1000_GPIE_EIAME | |
893 | E1000_GPIE_NSICR); | 895 | E1000_GPIE_NSICR); |
894 | 896 | ||
895 | /* enable msix_other interrupt */ | 897 | /* enable msix_other interrupt */ |
896 | adapter->eims_other = 1 << vector; | 898 | adapter->eims_other = 1 << vector; |
@@ -912,10 +914,11 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
912 | } | 914 | } |
913 | 915 | ||
914 | /** | 916 | /** |
915 | * igb_request_msix - Initialize MSI-X interrupts | 917 | * igb_request_msix - Initialize MSI-X interrupts |
918 | * @adapter: board private structure to initialize | ||
916 | * | 919 | * |
917 | * igb_request_msix allocates MSI-X vectors and requests interrupts from the | 920 | * igb_request_msix allocates MSI-X vectors and requests interrupts from the |
918 | * kernel. | 921 | * kernel. |
919 | **/ | 922 | **/ |
920 | static int igb_request_msix(struct igb_adapter *adapter) | 923 | static int igb_request_msix(struct igb_adapter *adapter) |
921 | { | 924 | { |
@@ -924,7 +927,7 @@ static int igb_request_msix(struct igb_adapter *adapter) | |||
924 | int i, err = 0, vector = 0, free_vector = 0; | 927 | int i, err = 0, vector = 0, free_vector = 0; |
925 | 928 | ||
926 | err = request_irq(adapter->msix_entries[vector].vector, | 929 | err = request_irq(adapter->msix_entries[vector].vector, |
927 | igb_msix_other, 0, netdev->name, adapter); | 930 | igb_msix_other, 0, netdev->name, adapter); |
928 | if (err) | 931 | if (err) |
929 | goto err_out; | 932 | goto err_out; |
930 | 933 | ||
@@ -948,8 +951,8 @@ static int igb_request_msix(struct igb_adapter *adapter) | |||
948 | sprintf(q_vector->name, "%s-unused", netdev->name); | 951 | sprintf(q_vector->name, "%s-unused", netdev->name); |
949 | 952 | ||
950 | err = request_irq(adapter->msix_entries[vector].vector, | 953 | err = request_irq(adapter->msix_entries[vector].vector, |
951 | igb_msix_ring, 0, q_vector->name, | 954 | igb_msix_ring, 0, q_vector->name, |
952 | q_vector); | 955 | q_vector); |
953 | if (err) | 956 | if (err) |
954 | goto err_free; | 957 | goto err_free; |
955 | } | 958 | } |
@@ -982,13 +985,13 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) | |||
982 | } | 985 | } |
983 | 986 | ||
984 | /** | 987 | /** |
985 | * igb_free_q_vector - Free memory allocated for specific interrupt vector | 988 | * igb_free_q_vector - Free memory allocated for specific interrupt vector |
986 | * @adapter: board private structure to initialize | 989 | * @adapter: board private structure to initialize |
987 | * @v_idx: Index of vector to be freed | 990 | * @v_idx: Index of vector to be freed |
988 | * | 991 | * |
989 | * This function frees the memory allocated to the q_vector. In addition if | 992 | * This function frees the memory allocated to the q_vector. In addition if |
990 | * NAPI is enabled it will delete any references to the NAPI struct prior | 993 | * NAPI is enabled it will delete any references to the NAPI struct prior |
991 | * to freeing the q_vector. | 994 | * to freeing the q_vector. |
992 | **/ | 995 | **/ |
993 | static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) | 996 | static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) |
994 | { | 997 | { |
@@ -1003,20 +1006,19 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) | |||
1003 | adapter->q_vector[v_idx] = NULL; | 1006 | adapter->q_vector[v_idx] = NULL; |
1004 | netif_napi_del(&q_vector->napi); | 1007 | netif_napi_del(&q_vector->napi); |
1005 | 1008 | ||
1006 | /* | 1009 | /* ixgbe_get_stats64() might access the rings on this vector, |
1007 | * ixgbe_get_stats64() might access the rings on this vector, | ||
1008 | * we must wait a grace period before freeing it. | 1010 | * we must wait a grace period before freeing it. |
1009 | */ | 1011 | */ |
1010 | kfree_rcu(q_vector, rcu); | 1012 | kfree_rcu(q_vector, rcu); |
1011 | } | 1013 | } |
1012 | 1014 | ||
1013 | /** | 1015 | /** |
1014 | * igb_free_q_vectors - Free memory allocated for interrupt vectors | 1016 | * igb_free_q_vectors - Free memory allocated for interrupt vectors |
1015 | * @adapter: board private structure to initialize | 1017 | * @adapter: board private structure to initialize |
1016 | * | 1018 | * |
1017 | * This function frees the memory allocated to the q_vectors. In addition if | 1019 | * This function frees the memory allocated to the q_vectors. In addition if |
1018 | * NAPI is enabled it will delete any references to the NAPI struct prior | 1020 | * NAPI is enabled it will delete any references to the NAPI struct prior |
1019 | * to freeing the q_vector. | 1021 | * to freeing the q_vector. |
1020 | **/ | 1022 | **/ |
1021 | static void igb_free_q_vectors(struct igb_adapter *adapter) | 1023 | static void igb_free_q_vectors(struct igb_adapter *adapter) |
1022 | { | 1024 | { |
@@ -1031,10 +1033,11 @@ static void igb_free_q_vectors(struct igb_adapter *adapter) | |||
1031 | } | 1033 | } |
1032 | 1034 | ||
1033 | /** | 1035 | /** |
1034 | * igb_clear_interrupt_scheme - reset the device to a state of no interrupts | 1036 | * igb_clear_interrupt_scheme - reset the device to a state of no interrupts |
1037 | * @adapter: board private structure to initialize | ||
1035 | * | 1038 | * |
1036 | * This function resets the device so that it has 0 rx queues, tx queues, and | 1039 | * This function resets the device so that it has 0 Rx queues, Tx queues, and |
1037 | * MSI-X interrupts allocated. | 1040 | * MSI-X interrupts allocated. |
1038 | */ | 1041 | */ |
1039 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) | 1042 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) |
1040 | { | 1043 | { |
@@ -1043,10 +1046,12 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) | |||
1043 | } | 1046 | } |
1044 | 1047 | ||
1045 | /** | 1048 | /** |
1046 | * igb_set_interrupt_capability - set MSI or MSI-X if supported | 1049 | * igb_set_interrupt_capability - set MSI or MSI-X if supported |
1050 | * @adapter: board private structure to initialize | ||
1051 | * @msix: boolean value of MSIX capability | ||
1047 | * | 1052 | * |
1048 | * Attempt to configure interrupts using the best available | 1053 | * Attempt to configure interrupts using the best available |
1049 | * capabilities of the hardware and kernel. | 1054 | * capabilities of the hardware and kernel. |
1050 | **/ | 1055 | **/ |
1051 | static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) | 1056 | static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) |
1052 | { | 1057 | { |
@@ -1063,10 +1068,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) | |||
1063 | else | 1068 | else |
1064 | adapter->num_tx_queues = adapter->rss_queues; | 1069 | adapter->num_tx_queues = adapter->rss_queues; |
1065 | 1070 | ||
1066 | /* start with one vector for every rx queue */ | 1071 | /* start with one vector for every Rx queue */ |
1067 | numvecs = adapter->num_rx_queues; | 1072 | numvecs = adapter->num_rx_queues; |
1068 | 1073 | ||
1069 | /* if tx handler is separate add 1 for every tx queue */ | 1074 | /* if Tx handler is separate add 1 for every Tx queue */ |
1070 | if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) | 1075 | if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) |
1071 | numvecs += adapter->num_tx_queues; | 1076 | numvecs += adapter->num_tx_queues; |
1072 | 1077 | ||
@@ -1128,16 +1133,16 @@ static void igb_add_ring(struct igb_ring *ring, | |||
1128 | } | 1133 | } |
1129 | 1134 | ||
1130 | /** | 1135 | /** |
1131 | * igb_alloc_q_vector - Allocate memory for a single interrupt vector | 1136 | * igb_alloc_q_vector - Allocate memory for a single interrupt vector |
1132 | * @adapter: board private structure to initialize | 1137 | * @adapter: board private structure to initialize |
1133 | * @v_count: q_vectors allocated on adapter, used for ring interleaving | 1138 | * @v_count: q_vectors allocated on adapter, used for ring interleaving |
1134 | * @v_idx: index of vector in adapter struct | 1139 | * @v_idx: index of vector in adapter struct |
1135 | * @txr_count: total number of Tx rings to allocate | 1140 | * @txr_count: total number of Tx rings to allocate |
1136 | * @txr_idx: index of first Tx ring to allocate | 1141 | * @txr_idx: index of first Tx ring to allocate |
1137 | * @rxr_count: total number of Rx rings to allocate | 1142 | * @rxr_count: total number of Rx rings to allocate |
1138 | * @rxr_idx: index of first Rx ring to allocate | 1143 | * @rxr_idx: index of first Rx ring to allocate |
1139 | * | 1144 | * |
1140 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | 1145 | * We allocate one q_vector. If allocation fails we return -ENOMEM. |
1141 | **/ | 1146 | **/ |
1142 | static int igb_alloc_q_vector(struct igb_adapter *adapter, | 1147 | static int igb_alloc_q_vector(struct igb_adapter *adapter, |
1143 | int v_count, int v_idx, | 1148 | int v_count, int v_idx, |
@@ -1231,10 +1236,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, | |||
1231 | if (adapter->hw.mac.type >= e1000_82576) | 1236 | if (adapter->hw.mac.type >= e1000_82576) |
1232 | set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); | 1237 | set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); |
1233 | 1238 | ||
1234 | /* | 1239 | /* On i350, i210, and i211, loopback VLAN packets |
1235 | * On i350, i210, and i211, loopback VLAN packets | ||
1236 | * have the tag byte-swapped. | 1240 | * have the tag byte-swapped. |
1237 | * */ | 1241 | */ |
1238 | if (adapter->hw.mac.type >= e1000_i350) | 1242 | if (adapter->hw.mac.type >= e1000_i350) |
1239 | set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); | 1243 | set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); |
1240 | 1244 | ||
@@ -1251,11 +1255,11 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, | |||
1251 | 1255 | ||
1252 | 1256 | ||
1253 | /** | 1257 | /** |
1254 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors | 1258 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors |
1255 | * @adapter: board private structure to initialize | 1259 | * @adapter: board private structure to initialize |
1256 | * | 1260 | * |
1257 | * We allocate one q_vector per queue interrupt. If allocation fails we | 1261 | * We allocate one q_vector per queue interrupt. If allocation fails we |
1258 | * return -ENOMEM. | 1262 | * return -ENOMEM. |
1259 | **/ | 1263 | **/ |
1260 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) | 1264 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) |
1261 | { | 1265 | { |
@@ -1309,9 +1313,11 @@ err_out: | |||
1309 | } | 1313 | } |
1310 | 1314 | ||
1311 | /** | 1315 | /** |
1312 | * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors | 1316 | * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors |
1317 | * @adapter: board private structure to initialize | ||
1318 | * @msix: boolean value of MSIX capability | ||
1313 | * | 1319 | * |
1314 | * This function initializes the interrupts and allocates all of the queues. | 1320 | * This function initializes the interrupts and allocates all of the queues. |
1315 | **/ | 1321 | **/ |
1316 | static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) | 1322 | static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) |
1317 | { | 1323 | { |
@@ -1336,10 +1342,11 @@ err_alloc_q_vectors: | |||
1336 | } | 1342 | } |
1337 | 1343 | ||
1338 | /** | 1344 | /** |
1339 | * igb_request_irq - initialize interrupts | 1345 | * igb_request_irq - initialize interrupts |
1346 | * @adapter: board private structure to initialize | ||
1340 | * | 1347 | * |
1341 | * Attempts to configure interrupts using the best available | 1348 | * Attempts to configure interrupts using the best available |
1342 | * capabilities of the hardware and kernel. | 1349 | * capabilities of the hardware and kernel. |
1343 | **/ | 1350 | **/ |
1344 | static int igb_request_irq(struct igb_adapter *adapter) | 1351 | static int igb_request_irq(struct igb_adapter *adapter) |
1345 | { | 1352 | { |
@@ -1405,15 +1412,14 @@ static void igb_free_irq(struct igb_adapter *adapter) | |||
1405 | } | 1412 | } |
1406 | 1413 | ||
1407 | /** | 1414 | /** |
1408 | * igb_irq_disable - Mask off interrupt generation on the NIC | 1415 | * igb_irq_disable - Mask off interrupt generation on the NIC |
1409 | * @adapter: board private structure | 1416 | * @adapter: board private structure |
1410 | **/ | 1417 | **/ |
1411 | static void igb_irq_disable(struct igb_adapter *adapter) | 1418 | static void igb_irq_disable(struct igb_adapter *adapter) |
1412 | { | 1419 | { |
1413 | struct e1000_hw *hw = &adapter->hw; | 1420 | struct e1000_hw *hw = &adapter->hw; |
1414 | 1421 | ||
1415 | /* | 1422 | /* we need to be careful when disabling interrupts. The VFs are also |
1416 | * we need to be careful when disabling interrupts. The VFs are also | ||
1417 | * mapped into these registers and so clearing the bits can cause | 1423 | * mapped into these registers and so clearing the bits can cause |
1418 | * issues on the VF drivers so we only need to clear what we set | 1424 | * issues on the VF drivers so we only need to clear what we set |
1419 | */ | 1425 | */ |
@@ -1438,8 +1444,8 @@ static void igb_irq_disable(struct igb_adapter *adapter) | |||
1438 | } | 1444 | } |
1439 | 1445 | ||
1440 | /** | 1446 | /** |
1441 | * igb_irq_enable - Enable default interrupt generation settings | 1447 | * igb_irq_enable - Enable default interrupt generation settings |
1442 | * @adapter: board private structure | 1448 | * @adapter: board private structure |
1443 | **/ | 1449 | **/ |
1444 | static void igb_irq_enable(struct igb_adapter *adapter) | 1450 | static void igb_irq_enable(struct igb_adapter *adapter) |
1445 | { | 1451 | { |
@@ -1488,13 +1494,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) | |||
1488 | } | 1494 | } |
1489 | 1495 | ||
1490 | /** | 1496 | /** |
1491 | * igb_release_hw_control - release control of the h/w to f/w | 1497 | * igb_release_hw_control - release control of the h/w to f/w |
1492 | * @adapter: address of board private structure | 1498 | * @adapter: address of board private structure |
1493 | * | ||
1494 | * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. | ||
1495 | * For ASF and Pass Through versions of f/w this means that the | ||
1496 | * driver is no longer loaded. | ||
1497 | * | 1499 | * |
1500 | * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. | ||
1501 | * For ASF and Pass Through versions of f/w this means that the | ||
1502 | * driver is no longer loaded. | ||
1498 | **/ | 1503 | **/ |
1499 | static void igb_release_hw_control(struct igb_adapter *adapter) | 1504 | static void igb_release_hw_control(struct igb_adapter *adapter) |
1500 | { | 1505 | { |
@@ -1508,13 +1513,12 @@ static void igb_release_hw_control(struct igb_adapter *adapter) | |||
1508 | } | 1513 | } |
1509 | 1514 | ||
1510 | /** | 1515 | /** |
1511 | * igb_get_hw_control - get control of the h/w from f/w | 1516 | * igb_get_hw_control - get control of the h/w from f/w |
1512 | * @adapter: address of board private structure | 1517 | * @adapter: address of board private structure |
1513 | * | ||
1514 | * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. | ||
1515 | * For ASF and Pass Through versions of f/w this means that | ||
1516 | * the driver is loaded. | ||
1517 | * | 1518 | * |
1519 | * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. | ||
1520 | * For ASF and Pass Through versions of f/w this means that | ||
1521 | * the driver is loaded. | ||
1518 | **/ | 1522 | **/ |
1519 | static void igb_get_hw_control(struct igb_adapter *adapter) | 1523 | static void igb_get_hw_control(struct igb_adapter *adapter) |
1520 | { | 1524 | { |
@@ -1528,8 +1532,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter) | |||
1528 | } | 1532 | } |
1529 | 1533 | ||
1530 | /** | 1534 | /** |
1531 | * igb_configure - configure the hardware for RX and TX | 1535 | * igb_configure - configure the hardware for RX and TX |
1532 | * @adapter: private board structure | 1536 | * @adapter: private board structure |
1533 | **/ | 1537 | **/ |
1534 | static void igb_configure(struct igb_adapter *adapter) | 1538 | static void igb_configure(struct igb_adapter *adapter) |
1535 | { | 1539 | { |
@@ -1552,7 +1556,8 @@ static void igb_configure(struct igb_adapter *adapter) | |||
1552 | 1556 | ||
1553 | /* call igb_desc_unused which always leaves | 1557 | /* call igb_desc_unused which always leaves |
1554 | * at least 1 descriptor unused to make sure | 1558 | * at least 1 descriptor unused to make sure |
1555 | * next_to_use != next_to_clean */ | 1559 | * next_to_use != next_to_clean |
1560 | */ | ||
1556 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1561 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1557 | struct igb_ring *ring = adapter->rx_ring[i]; | 1562 | struct igb_ring *ring = adapter->rx_ring[i]; |
1558 | igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); | 1563 | igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); |
@@ -1560,8 +1565,8 @@ static void igb_configure(struct igb_adapter *adapter) | |||
1560 | } | 1565 | } |
1561 | 1566 | ||
1562 | /** | 1567 | /** |
1563 | * igb_power_up_link - Power up the phy/serdes link | 1568 | * igb_power_up_link - Power up the phy/serdes link |
1564 | * @adapter: address of board private structure | 1569 | * @adapter: address of board private structure |
1565 | **/ | 1570 | **/ |
1566 | void igb_power_up_link(struct igb_adapter *adapter) | 1571 | void igb_power_up_link(struct igb_adapter *adapter) |
1567 | { | 1572 | { |
@@ -1574,8 +1579,8 @@ void igb_power_up_link(struct igb_adapter *adapter) | |||
1574 | } | 1579 | } |
1575 | 1580 | ||
1576 | /** | 1581 | /** |
1577 | * igb_power_down_link - Power down the phy/serdes link | 1582 | * igb_power_down_link - Power down the phy/serdes link |
1578 | * @adapter: address of board private structure | 1583 | * @adapter: address of board private structure |
1579 | */ | 1584 | */ |
1580 | static void igb_power_down_link(struct igb_adapter *adapter) | 1585 | static void igb_power_down_link(struct igb_adapter *adapter) |
1581 | { | 1586 | { |
@@ -1586,8 +1591,8 @@ static void igb_power_down_link(struct igb_adapter *adapter) | |||
1586 | } | 1591 | } |
1587 | 1592 | ||
1588 | /** | 1593 | /** |
1589 | * igb_up - Open the interface and prepare it to handle traffic | 1594 | * igb_up - Open the interface and prepare it to handle traffic |
1590 | * @adapter: board private structure | 1595 | * @adapter: board private structure |
1591 | **/ | 1596 | **/ |
1592 | int igb_up(struct igb_adapter *adapter) | 1597 | int igb_up(struct igb_adapter *adapter) |
1593 | { | 1598 | { |
@@ -1635,7 +1640,8 @@ void igb_down(struct igb_adapter *adapter) | |||
1635 | int i; | 1640 | int i; |
1636 | 1641 | ||
1637 | /* signal that we're down so the interrupt handler does not | 1642 | /* signal that we're down so the interrupt handler does not |
1638 | * reschedule our watchdog timer */ | 1643 | * reschedule our watchdog timer |
1644 | */ | ||
1639 | set_bit(__IGB_DOWN, &adapter->state); | 1645 | set_bit(__IGB_DOWN, &adapter->state); |
1640 | 1646 | ||
1641 | /* disable receives in the hardware */ | 1647 | /* disable receives in the hardware */ |
@@ -1731,14 +1737,16 @@ void igb_reset(struct igb_adapter *adapter) | |||
1731 | * rounded up to the next 1KB and expressed in KB. Likewise, | 1737 | * rounded up to the next 1KB and expressed in KB. Likewise, |
1732 | * the Rx FIFO should be large enough to accommodate at least | 1738 | * the Rx FIFO should be large enough to accommodate at least |
1733 | * one full receive packet and is similarly rounded up and | 1739 | * one full receive packet and is similarly rounded up and |
1734 | * expressed in KB. */ | 1740 | * expressed in KB. |
1741 | */ | ||
1735 | pba = rd32(E1000_PBA); | 1742 | pba = rd32(E1000_PBA); |
1736 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 1743 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
1737 | tx_space = pba >> 16; | 1744 | tx_space = pba >> 16; |
1738 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 1745 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
1739 | pba &= 0xffff; | 1746 | pba &= 0xffff; |
1740 | /* the tx fifo also stores 16 bytes of information about the tx | 1747 | /* the Tx fifo also stores 16 bytes of information about the Tx |
1741 | * but don't include ethernet FCS because hardware appends it */ | 1748 | * but don't include ethernet FCS because hardware appends it |
1749 | */ | ||
1742 | min_tx_space = (adapter->max_frame_size + | 1750 | min_tx_space = (adapter->max_frame_size + |
1743 | sizeof(union e1000_adv_tx_desc) - | 1751 | sizeof(union e1000_adv_tx_desc) - |
1744 | ETH_FCS_LEN) * 2; | 1752 | ETH_FCS_LEN) * 2; |
@@ -1751,13 +1759,15 @@ void igb_reset(struct igb_adapter *adapter) | |||
1751 | 1759 | ||
1752 | /* If current Tx allocation is less than the min Tx FIFO size, | 1760 | /* If current Tx allocation is less than the min Tx FIFO size, |
1753 | * and the min Tx FIFO size is less than the current Rx FIFO | 1761 | * and the min Tx FIFO size is less than the current Rx FIFO |
1754 | * allocation, take space away from current Rx allocation */ | 1762 | * allocation, take space away from current Rx allocation |
1763 | */ | ||
1755 | if (tx_space < min_tx_space && | 1764 | if (tx_space < min_tx_space && |
1756 | ((min_tx_space - tx_space) < pba)) { | 1765 | ((min_tx_space - tx_space) < pba)) { |
1757 | pba = pba - (min_tx_space - tx_space); | 1766 | pba = pba - (min_tx_space - tx_space); |
1758 | 1767 | ||
1759 | /* if short on rx space, rx wins and must trump tx | 1768 | /* if short on Rx space, Rx wins and must trump Tx |
1760 | * adjustment */ | 1769 | * adjustment |
1770 | */ | ||
1761 | if (pba < min_rx_space) | 1771 | if (pba < min_rx_space) |
1762 | pba = min_rx_space; | 1772 | pba = min_rx_space; |
1763 | } | 1773 | } |
@@ -1769,7 +1779,8 @@ void igb_reset(struct igb_adapter *adapter) | |||
1769 | * (or the size used for early receive) above it in the Rx FIFO. | 1779 | * (or the size used for early receive) above it in the Rx FIFO. |
1770 | * Set it to the lower of: | 1780 | * Set it to the lower of: |
1771 | * - 90% of the Rx FIFO size, or | 1781 | * - 90% of the Rx FIFO size, or |
1772 | * - the full Rx FIFO size minus one full frame */ | 1782 | * - the full Rx FIFO size minus one full frame |
1783 | */ | ||
1773 | hwm = min(((pba << 10) * 9 / 10), | 1784 | hwm = min(((pba << 10) * 9 / 10), |
1774 | ((pba << 10) - 2 * adapter->max_frame_size)); | 1785 | ((pba << 10) - 2 * adapter->max_frame_size)); |
1775 | 1786 | ||
@@ -1800,8 +1811,7 @@ void igb_reset(struct igb_adapter *adapter) | |||
1800 | if (hw->mac.ops.init_hw(hw)) | 1811 | if (hw->mac.ops.init_hw(hw)) |
1801 | dev_err(&pdev->dev, "Hardware Error\n"); | 1812 | dev_err(&pdev->dev, "Hardware Error\n"); |
1802 | 1813 | ||
1803 | /* | 1814 | /* Flow control settings reset on hardware reset, so guarantee flow |
1804 | * Flow control settings reset on hardware reset, so guarantee flow | ||
1805 | * control is off when forcing speed. | 1815 | * control is off when forcing speed. |
1806 | */ | 1816 | */ |
1807 | if (!hw->mac.autoneg) | 1817 | if (!hw->mac.autoneg) |
@@ -1837,9 +1847,8 @@ void igb_reset(struct igb_adapter *adapter) | |||
1837 | static netdev_features_t igb_fix_features(struct net_device *netdev, | 1847 | static netdev_features_t igb_fix_features(struct net_device *netdev, |
1838 | netdev_features_t features) | 1848 | netdev_features_t features) |
1839 | { | 1849 | { |
1840 | /* | 1850 | /* Since there is no support for separate Rx/Tx vlan accel |
1841 | * Since there is no support for separate rx/tx vlan accel | 1851 | * enable/disable make sure Tx flag is always in same state as Rx. |
1842 | * enable/disable make sure tx flag is always in same state as rx. | ||
1843 | */ | 1852 | */ |
1844 | if (features & NETIF_F_HW_VLAN_RX) | 1853 | if (features & NETIF_F_HW_VLAN_RX) |
1845 | features |= NETIF_F_HW_VLAN_TX; | 1854 | features |= NETIF_F_HW_VLAN_TX; |
@@ -1898,7 +1907,6 @@ static const struct net_device_ops igb_netdev_ops = { | |||
1898 | /** | 1907 | /** |
1899 | * igb_set_fw_version - Configure version string for ethtool | 1908 | * igb_set_fw_version - Configure version string for ethtool |
1900 | * @adapter: adapter struct | 1909 | * @adapter: adapter struct |
1901 | * | ||
1902 | **/ | 1910 | **/ |
1903 | void igb_set_fw_version(struct igb_adapter *adapter) | 1911 | void igb_set_fw_version(struct igb_adapter *adapter) |
1904 | { | 1912 | { |
@@ -1934,10 +1942,10 @@ void igb_set_fw_version(struct igb_adapter *adapter) | |||
1934 | return; | 1942 | return; |
1935 | } | 1943 | } |
1936 | 1944 | ||
1937 | /* igb_init_i2c - Init I2C interface | 1945 | /** |
1946 | * igb_init_i2c - Init I2C interface | ||
1938 | * @adapter: pointer to adapter structure | 1947 | * @adapter: pointer to adapter structure |
1939 | * | 1948 | **/ |
1940 | */ | ||
1941 | static s32 igb_init_i2c(struct igb_adapter *adapter) | 1949 | static s32 igb_init_i2c(struct igb_adapter *adapter) |
1942 | { | 1950 | { |
1943 | s32 status = E1000_SUCCESS; | 1951 | s32 status = E1000_SUCCESS; |
@@ -1962,15 +1970,15 @@ static s32 igb_init_i2c(struct igb_adapter *adapter) | |||
1962 | } | 1970 | } |
1963 | 1971 | ||
1964 | /** | 1972 | /** |
1965 | * igb_probe - Device Initialization Routine | 1973 | * igb_probe - Device Initialization Routine |
1966 | * @pdev: PCI device information struct | 1974 | * @pdev: PCI device information struct |
1967 | * @ent: entry in igb_pci_tbl | 1975 | * @ent: entry in igb_pci_tbl |
1968 | * | 1976 | * |
1969 | * Returns 0 on success, negative on failure | 1977 | * Returns 0 on success, negative on failure |
1970 | * | 1978 | * |
1971 | * igb_probe initializes an adapter identified by a pci_dev structure. | 1979 | * igb_probe initializes an adapter identified by a pci_dev structure. |
1972 | * The OS initialization, configuring of the adapter private structure, | 1980 | * The OS initialization, configuring of the adapter private structure, |
1973 | * and a hardware reset occur. | 1981 | * and a hardware reset occur. |
1974 | **/ | 1982 | **/ |
1975 | static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 1983 | static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1976 | { | 1984 | { |
@@ -2007,18 +2015,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2007 | } else { | 2015 | } else { |
2008 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | 2016 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
2009 | if (err) { | 2017 | if (err) { |
2010 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 2018 | err = dma_set_coherent_mask(&pdev->dev, |
2019 | DMA_BIT_MASK(32)); | ||
2011 | if (err) { | 2020 | if (err) { |
2012 | dev_err(&pdev->dev, "No usable DMA " | 2021 | dev_err(&pdev->dev, |
2013 | "configuration, aborting\n"); | 2022 | "No usable DMA configuration, aborting\n"); |
2014 | goto err_dma; | 2023 | goto err_dma; |
2015 | } | 2024 | } |
2016 | } | 2025 | } |
2017 | } | 2026 | } |
2018 | 2027 | ||
2019 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, | 2028 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
2020 | IORESOURCE_MEM), | 2029 | IORESOURCE_MEM), |
2021 | igb_driver_name); | 2030 | igb_driver_name); |
2022 | if (err) | 2031 | if (err) |
2023 | goto err_pci_reg; | 2032 | goto err_pci_reg; |
2024 | 2033 | ||
@@ -2096,8 +2105,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2096 | dev_info(&pdev->dev, | 2105 | dev_info(&pdev->dev, |
2097 | "PHY reset is blocked due to SOL/IDER session.\n"); | 2106 | "PHY reset is blocked due to SOL/IDER session.\n"); |
2098 | 2107 | ||
2099 | /* | 2108 | /* features is initialized to 0 in allocation, it might have bits |
2100 | * features is initialized to 0 in allocation, it might have bits | ||
2101 | * set by igb_sw_init so we should use an or instead of an | 2109 | * set by igb_sw_init so we should use an or instead of an |
2102 | * assignment. | 2110 | * assignment. |
2103 | */ | 2111 | */ |
@@ -2141,11 +2149,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2141 | adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); | 2149 | adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); |
2142 | 2150 | ||
2143 | /* before reading the NVM, reset the controller to put the device in a | 2151 | /* before reading the NVM, reset the controller to put the device in a |
2144 | * known good starting state */ | 2152 | * known good starting state |
2153 | */ | ||
2145 | hw->mac.ops.reset_hw(hw); | 2154 | hw->mac.ops.reset_hw(hw); |
2146 | 2155 | ||
2147 | /* | 2156 | /* make sure the NVM is good , i211 parts have special NVM that |
2148 | * make sure the NVM is good , i211 parts have special NVM that | ||
2149 | * doesn't contain a checksum | 2157 | * doesn't contain a checksum |
2150 | */ | 2158 | */ |
2151 | if (hw->mac.type != e1000_i211) { | 2159 | if (hw->mac.type != e1000_i211) { |
@@ -2172,9 +2180,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2172 | igb_set_fw_version(adapter); | 2180 | igb_set_fw_version(adapter); |
2173 | 2181 | ||
2174 | setup_timer(&adapter->watchdog_timer, igb_watchdog, | 2182 | setup_timer(&adapter->watchdog_timer, igb_watchdog, |
2175 | (unsigned long) adapter); | 2183 | (unsigned long) adapter); |
2176 | setup_timer(&adapter->phy_info_timer, igb_update_phy_info, | 2184 | setup_timer(&adapter->phy_info_timer, igb_update_phy_info, |
2177 | (unsigned long) adapter); | 2185 | (unsigned long) adapter); |
2178 | 2186 | ||
2179 | INIT_WORK(&adapter->reset_task, igb_reset_task); | 2187 | INIT_WORK(&adapter->reset_task, igb_reset_task); |
2180 | INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); | 2188 | INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); |
@@ -2196,8 +2204,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2196 | /* Check the NVM for wake support on non-port A ports */ | 2204 | /* Check the NVM for wake support on non-port A ports */ |
2197 | if (hw->mac.type >= e1000_82580) | 2205 | if (hw->mac.type >= e1000_82580) |
2198 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + | 2206 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + |
2199 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, | 2207 | NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, |
2200 | &eeprom_data); | 2208 | &eeprom_data); |
2201 | else if (hw->bus.func == 1) | 2209 | else if (hw->bus.func == 1) |
2202 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 2210 | hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
2203 | 2211 | ||
@@ -2206,7 +2214,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2206 | 2214 | ||
2207 | /* now that we have the eeprom settings, apply the special cases where | 2215 | /* now that we have the eeprom settings, apply the special cases where |
2208 | * the eeprom may be wrong or the board simply won't support wake on | 2216 | * the eeprom may be wrong or the board simply won't support wake on |
2209 | * lan on a particular port */ | 2217 | * lan on a particular port |
2218 | */ | ||
2210 | switch (pdev->device) { | 2219 | switch (pdev->device) { |
2211 | case E1000_DEV_ID_82575GB_QUAD_COPPER: | 2220 | case E1000_DEV_ID_82575GB_QUAD_COPPER: |
2212 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; | 2221 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; |
@@ -2215,7 +2224,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2215 | case E1000_DEV_ID_82576_FIBER: | 2224 | case E1000_DEV_ID_82576_FIBER: |
2216 | case E1000_DEV_ID_82576_SERDES: | 2225 | case E1000_DEV_ID_82576_SERDES: |
2217 | /* Wake events only supported on port A for dual fiber | 2226 | /* Wake events only supported on port A for dual fiber |
2218 | * regardless of eeprom setting */ | 2227 | * regardless of eeprom setting |
2228 | */ | ||
2219 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) | 2229 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) |
2220 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; | 2230 | adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; |
2221 | break; | 2231 | break; |
@@ -2285,8 +2295,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2285 | if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { | 2295 | if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { |
2286 | u16 ets_word; | 2296 | u16 ets_word; |
2287 | 2297 | ||
2288 | /* | 2298 | /* Read the NVM to determine if this i350 device supports an |
2289 | * Read the NVM to determine if this i350 device supports an | ||
2290 | * external thermal sensor. | 2299 | * external thermal sensor. |
2291 | */ | 2300 | */ |
2292 | hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); | 2301 | hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); |
@@ -2310,7 +2319,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2310 | netdev->name, | 2319 | netdev->name, |
2311 | ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : | 2320 | ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : |
2312 | (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : | 2321 | (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : |
2313 | "unknown"), | 2322 | "unknown"), |
2314 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : | 2323 | ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : |
2315 | (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : | 2324 | (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : |
2316 | (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : | 2325 | (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : |
@@ -2355,7 +2364,7 @@ err_ioremap: | |||
2355 | free_netdev(netdev); | 2364 | free_netdev(netdev); |
2356 | err_alloc_etherdev: | 2365 | err_alloc_etherdev: |
2357 | pci_release_selected_regions(pdev, | 2366 | pci_release_selected_regions(pdev, |
2358 | pci_select_bars(pdev, IORESOURCE_MEM)); | 2367 | pci_select_bars(pdev, IORESOURCE_MEM)); |
2359 | err_pci_reg: | 2368 | err_pci_reg: |
2360 | err_dma: | 2369 | err_dma: |
2361 | pci_disable_device(pdev); | 2370 | pci_disable_device(pdev); |
@@ -2455,26 +2464,24 @@ out: | |||
2455 | } | 2464 | } |
2456 | 2465 | ||
2457 | #endif | 2466 | #endif |
2458 | /* | 2467 | /** |
2459 | * igb_remove_i2c - Cleanup I2C interface | 2468 | * igb_remove_i2c - Cleanup I2C interface |
2460 | * @adapter: pointer to adapter structure | 2469 | * @adapter: pointer to adapter structure |
2461 | * | 2470 | **/ |
2462 | */ | ||
2463 | static void igb_remove_i2c(struct igb_adapter *adapter) | 2471 | static void igb_remove_i2c(struct igb_adapter *adapter) |
2464 | { | 2472 | { |
2465 | |||
2466 | /* free the adapter bus structure */ | 2473 | /* free the adapter bus structure */ |
2467 | i2c_del_adapter(&adapter->i2c_adap); | 2474 | i2c_del_adapter(&adapter->i2c_adap); |
2468 | } | 2475 | } |
2469 | 2476 | ||
2470 | /** | 2477 | /** |
2471 | * igb_remove - Device Removal Routine | 2478 | * igb_remove - Device Removal Routine |
2472 | * @pdev: PCI device information struct | 2479 | * @pdev: PCI device information struct |
2473 | * | 2480 | * |
2474 | * igb_remove is called by the PCI subsystem to alert the driver | 2481 | * igb_remove is called by the PCI subsystem to alert the driver |
2475 | * that it should release a PCI device. The could be caused by a | 2482 | * that it should release a PCI device. The could be caused by a |
2476 | * Hot-Plug event, or because the driver is going to be removed from | 2483 | * Hot-Plug event, or because the driver is going to be removed from |
2477 | * memory. | 2484 | * memory. |
2478 | **/ | 2485 | **/ |
2479 | static void igb_remove(struct pci_dev *pdev) | 2486 | static void igb_remove(struct pci_dev *pdev) |
2480 | { | 2487 | { |
@@ -2488,8 +2495,7 @@ static void igb_remove(struct pci_dev *pdev) | |||
2488 | #endif | 2495 | #endif |
2489 | igb_remove_i2c(adapter); | 2496 | igb_remove_i2c(adapter); |
2490 | igb_ptp_stop(adapter); | 2497 | igb_ptp_stop(adapter); |
2491 | /* | 2498 | /* The watchdog timer may be rescheduled, so explicitly |
2492 | * The watchdog timer may be rescheduled, so explicitly | ||
2493 | * disable watchdog from being rescheduled. | 2499 | * disable watchdog from being rescheduled. |
2494 | */ | 2500 | */ |
2495 | set_bit(__IGB_DOWN, &adapter->state); | 2501 | set_bit(__IGB_DOWN, &adapter->state); |
@@ -2509,7 +2515,8 @@ static void igb_remove(struct pci_dev *pdev) | |||
2509 | #endif | 2515 | #endif |
2510 | 2516 | ||
2511 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 2517 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
2512 | * would have already happened in close and is redundant. */ | 2518 | * would have already happened in close and is redundant. |
2519 | */ | ||
2513 | igb_release_hw_control(adapter); | 2520 | igb_release_hw_control(adapter); |
2514 | 2521 | ||
2515 | unregister_netdev(netdev); | 2522 | unregister_netdev(netdev); |
@@ -2524,7 +2531,7 @@ static void igb_remove(struct pci_dev *pdev) | |||
2524 | if (hw->flash_address) | 2531 | if (hw->flash_address) |
2525 | iounmap(hw->flash_address); | 2532 | iounmap(hw->flash_address); |
2526 | pci_release_selected_regions(pdev, | 2533 | pci_release_selected_regions(pdev, |
2527 | pci_select_bars(pdev, IORESOURCE_MEM)); | 2534 | pci_select_bars(pdev, IORESOURCE_MEM)); |
2528 | 2535 | ||
2529 | kfree(adapter->shadow_vfta); | 2536 | kfree(adapter->shadow_vfta); |
2530 | free_netdev(netdev); | 2537 | free_netdev(netdev); |
@@ -2535,13 +2542,13 @@ static void igb_remove(struct pci_dev *pdev) | |||
2535 | } | 2542 | } |
2536 | 2543 | ||
2537 | /** | 2544 | /** |
2538 | * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space | 2545 | * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space |
2539 | * @adapter: board private structure to initialize | 2546 | * @adapter: board private structure to initialize |
2540 | * | 2547 | * |
2541 | * This function initializes the vf specific data storage and then attempts to | 2548 | * This function initializes the vf specific data storage and then attempts to |
2542 | * allocate the VFs. The reason for ordering it this way is because it is much | 2549 | * allocate the VFs. The reason for ordering it this way is because it is much |
2543 | * mor expensive time wise to disable SR-IOV than it is to allocate and free | 2550 | * mor expensive time wise to disable SR-IOV than it is to allocate and free |
2544 | * the memory for the VFs. | 2551 | * the memory for the VFs. |
2545 | **/ | 2552 | **/ |
2546 | static void igb_probe_vfs(struct igb_adapter *adapter) | 2553 | static void igb_probe_vfs(struct igb_adapter *adapter) |
2547 | { | 2554 | { |
@@ -2601,8 +2608,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) | |||
2601 | /* Device supports enough interrupts without queue pairing. */ | 2608 | /* Device supports enough interrupts without queue pairing. */ |
2602 | break; | 2609 | break; |
2603 | case e1000_82576: | 2610 | case e1000_82576: |
2604 | /* | 2611 | /* If VFs are going to be allocated with RSS queues then we |
2605 | * If VFs are going to be allocated with RSS queues then we | ||
2606 | * should pair the queues in order to conserve interrupts due | 2612 | * should pair the queues in order to conserve interrupts due |
2607 | * to limited supply. | 2613 | * to limited supply. |
2608 | */ | 2614 | */ |
@@ -2614,8 +2620,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) | |||
2614 | case e1000_i350: | 2620 | case e1000_i350: |
2615 | case e1000_i210: | 2621 | case e1000_i210: |
2616 | default: | 2622 | default: |
2617 | /* | 2623 | /* If rss_queues > half of max_rss_queues, pair the queues in |
2618 | * If rss_queues > half of max_rss_queues, pair the queues in | ||
2619 | * order to conserve interrupts due to limited supply. | 2624 | * order to conserve interrupts due to limited supply. |
2620 | */ | 2625 | */ |
2621 | if (adapter->rss_queues > (max_rss_queues / 2)) | 2626 | if (adapter->rss_queues > (max_rss_queues / 2)) |
@@ -2625,12 +2630,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) | |||
2625 | } | 2630 | } |
2626 | 2631 | ||
2627 | /** | 2632 | /** |
2628 | * igb_sw_init - Initialize general software structures (struct igb_adapter) | 2633 | * igb_sw_init - Initialize general software structures (struct igb_adapter) |
2629 | * @adapter: board private structure to initialize | 2634 | * @adapter: board private structure to initialize |
2630 | * | 2635 | * |
2631 | * igb_sw_init initializes the Adapter private data structure. | 2636 | * igb_sw_init initializes the Adapter private data structure. |
2632 | * Fields are initialized based on PCI device information and | 2637 | * Fields are initialized based on PCI device information and |
2633 | * OS network device settings (MTU size). | 2638 | * OS network device settings (MTU size). |
2634 | **/ | 2639 | **/ |
2635 | static int igb_sw_init(struct igb_adapter *adapter) | 2640 | static int igb_sw_init(struct igb_adapter *adapter) |
2636 | { | 2641 | { |
@@ -2700,16 +2705,16 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
2700 | } | 2705 | } |
2701 | 2706 | ||
2702 | /** | 2707 | /** |
2703 | * igb_open - Called when a network interface is made active | 2708 | * igb_open - Called when a network interface is made active |
2704 | * @netdev: network interface device structure | 2709 | * @netdev: network interface device structure |
2705 | * | 2710 | * |
2706 | * Returns 0 on success, negative value on failure | 2711 | * Returns 0 on success, negative value on failure |
2707 | * | 2712 | * |
2708 | * The open entry point is called when a network interface is made | 2713 | * The open entry point is called when a network interface is made |
2709 | * active by the system (IFF_UP). At this point all resources needed | 2714 | * active by the system (IFF_UP). At this point all resources needed |
2710 | * for transmit and receive operations are allocated, the interrupt | 2715 | * for transmit and receive operations are allocated, the interrupt |
2711 | * handler is registered with the OS, the watchdog timer is started, | 2716 | * handler is registered with the OS, the watchdog timer is started, |
2712 | * and the stack is notified that the interface is ready. | 2717 | * and the stack is notified that the interface is ready. |
2713 | **/ | 2718 | **/ |
2714 | static int __igb_open(struct net_device *netdev, bool resuming) | 2719 | static int __igb_open(struct net_device *netdev, bool resuming) |
2715 | { | 2720 | { |
@@ -2745,7 +2750,8 @@ static int __igb_open(struct net_device *netdev, bool resuming) | |||
2745 | /* before we allocate an interrupt, we must be ready to handle it. | 2750 | /* before we allocate an interrupt, we must be ready to handle it. |
2746 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 2751 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
2747 | * as soon as we call pci_request_irq, so we have to setup our | 2752 | * as soon as we call pci_request_irq, so we have to setup our |
2748 | * clean_rx handler before we do so. */ | 2753 | * clean_rx handler before we do so. |
2754 | */ | ||
2749 | igb_configure(adapter); | 2755 | igb_configure(adapter); |
2750 | 2756 | ||
2751 | err = igb_request_irq(adapter); | 2757 | err = igb_request_irq(adapter); |
@@ -2814,15 +2820,15 @@ static int igb_open(struct net_device *netdev) | |||
2814 | } | 2820 | } |
2815 | 2821 | ||
2816 | /** | 2822 | /** |
2817 | * igb_close - Disables a network interface | 2823 | * igb_close - Disables a network interface |
2818 | * @netdev: network interface device structure | 2824 | * @netdev: network interface device structure |
2819 | * | 2825 | * |
2820 | * Returns 0, this is not allowed to fail | 2826 | * Returns 0, this is not allowed to fail |
2821 | * | 2827 | * |
2822 | * The close entry point is called when an interface is de-activated | 2828 | * The close entry point is called when an interface is de-activated |
2823 | * by the OS. The hardware is still under the driver's control, but | 2829 | * by the OS. The hardware is still under the driver's control, but |
2824 | * needs to be disabled. A global MAC reset is issued to stop the | 2830 | * needs to be disabled. A global MAC reset is issued to stop the |
2825 | * hardware, and all transmit and receive resources are freed. | 2831 | * hardware, and all transmit and receive resources are freed. |
2826 | **/ | 2832 | **/ |
2827 | static int __igb_close(struct net_device *netdev, bool suspending) | 2833 | static int __igb_close(struct net_device *netdev, bool suspending) |
2828 | { | 2834 | { |
@@ -2851,10 +2857,10 @@ static int igb_close(struct net_device *netdev) | |||
2851 | } | 2857 | } |
2852 | 2858 | ||
2853 | /** | 2859 | /** |
2854 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) | 2860 | * igb_setup_tx_resources - allocate Tx resources (Descriptors) |
2855 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | 2861 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
2856 | * | 2862 | * |
2857 | * Return 0 on success, negative on failure | 2863 | * Return 0 on success, negative on failure |
2858 | **/ | 2864 | **/ |
2859 | int igb_setup_tx_resources(struct igb_ring *tx_ring) | 2865 | int igb_setup_tx_resources(struct igb_ring *tx_ring) |
2860 | { | 2866 | { |
@@ -2889,11 +2895,11 @@ err: | |||
2889 | } | 2895 | } |
2890 | 2896 | ||
2891 | /** | 2897 | /** |
2892 | * igb_setup_all_tx_resources - wrapper to allocate Tx resources | 2898 | * igb_setup_all_tx_resources - wrapper to allocate Tx resources |
2893 | * (Descriptors) for all queues | 2899 | * (Descriptors) for all queues |
2894 | * @adapter: board private structure | 2900 | * @adapter: board private structure |
2895 | * | 2901 | * |
2896 | * Return 0 on success, negative on failure | 2902 | * Return 0 on success, negative on failure |
2897 | **/ | 2903 | **/ |
2898 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | 2904 | static int igb_setup_all_tx_resources(struct igb_adapter *adapter) |
2899 | { | 2905 | { |
@@ -2915,8 +2921,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | |||
2915 | } | 2921 | } |
2916 | 2922 | ||
2917 | /** | 2923 | /** |
2918 | * igb_setup_tctl - configure the transmit control registers | 2924 | * igb_setup_tctl - configure the transmit control registers |
2919 | * @adapter: Board private structure | 2925 | * @adapter: Board private structure |
2920 | **/ | 2926 | **/ |
2921 | void igb_setup_tctl(struct igb_adapter *adapter) | 2927 | void igb_setup_tctl(struct igb_adapter *adapter) |
2922 | { | 2928 | { |
@@ -2941,11 +2947,11 @@ void igb_setup_tctl(struct igb_adapter *adapter) | |||
2941 | } | 2947 | } |
2942 | 2948 | ||
2943 | /** | 2949 | /** |
2944 | * igb_configure_tx_ring - Configure transmit ring after Reset | 2950 | * igb_configure_tx_ring - Configure transmit ring after Reset |
2945 | * @adapter: board private structure | 2951 | * @adapter: board private structure |
2946 | * @ring: tx ring to configure | 2952 | * @ring: tx ring to configure |
2947 | * | 2953 | * |
2948 | * Configure a transmit ring after a reset. | 2954 | * Configure a transmit ring after a reset. |
2949 | **/ | 2955 | **/ |
2950 | void igb_configure_tx_ring(struct igb_adapter *adapter, | 2956 | void igb_configure_tx_ring(struct igb_adapter *adapter, |
2951 | struct igb_ring *ring) | 2957 | struct igb_ring *ring) |
@@ -2961,9 +2967,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, | |||
2961 | mdelay(10); | 2967 | mdelay(10); |
2962 | 2968 | ||
2963 | wr32(E1000_TDLEN(reg_idx), | 2969 | wr32(E1000_TDLEN(reg_idx), |
2964 | ring->count * sizeof(union e1000_adv_tx_desc)); | 2970 | ring->count * sizeof(union e1000_adv_tx_desc)); |
2965 | wr32(E1000_TDBAL(reg_idx), | 2971 | wr32(E1000_TDBAL(reg_idx), |
2966 | tdba & 0x00000000ffffffffULL); | 2972 | tdba & 0x00000000ffffffffULL); |
2967 | wr32(E1000_TDBAH(reg_idx), tdba >> 32); | 2973 | wr32(E1000_TDBAH(reg_idx), tdba >> 32); |
2968 | 2974 | ||
2969 | ring->tail = hw->hw_addr + E1000_TDT(reg_idx); | 2975 | ring->tail = hw->hw_addr + E1000_TDT(reg_idx); |
@@ -2979,10 +2985,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, | |||
2979 | } | 2985 | } |
2980 | 2986 | ||
2981 | /** | 2987 | /** |
2982 | * igb_configure_tx - Configure transmit Unit after Reset | 2988 | * igb_configure_tx - Configure transmit Unit after Reset |
2983 | * @adapter: board private structure | 2989 | * @adapter: board private structure |
2984 | * | 2990 | * |
2985 | * Configure the Tx unit of the MAC after a reset. | 2991 | * Configure the Tx unit of the MAC after a reset. |
2986 | **/ | 2992 | **/ |
2987 | static void igb_configure_tx(struct igb_adapter *adapter) | 2993 | static void igb_configure_tx(struct igb_adapter *adapter) |
2988 | { | 2994 | { |
@@ -2993,10 +2999,10 @@ static void igb_configure_tx(struct igb_adapter *adapter) | |||
2993 | } | 2999 | } |
2994 | 3000 | ||
2995 | /** | 3001 | /** |
2996 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) | 3002 | * igb_setup_rx_resources - allocate Rx resources (Descriptors) |
2997 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 3003 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup |
2998 | * | 3004 | * |
2999 | * Returns 0 on success, negative on failure | 3005 | * Returns 0 on success, negative on failure |
3000 | **/ | 3006 | **/ |
3001 | int igb_setup_rx_resources(struct igb_ring *rx_ring) | 3007 | int igb_setup_rx_resources(struct igb_ring *rx_ring) |
3002 | { | 3008 | { |
@@ -3032,11 +3038,11 @@ err: | |||
3032 | } | 3038 | } |
3033 | 3039 | ||
3034 | /** | 3040 | /** |
3035 | * igb_setup_all_rx_resources - wrapper to allocate Rx resources | 3041 | * igb_setup_all_rx_resources - wrapper to allocate Rx resources |
3036 | * (Descriptors) for all queues | 3042 | * (Descriptors) for all queues |
3037 | * @adapter: board private structure | 3043 | * @adapter: board private structure |
3038 | * | 3044 | * |
3039 | * Return 0 on success, negative on failure | 3045 | * Return 0 on success, negative on failure |
3040 | **/ | 3046 | **/ |
3041 | static int igb_setup_all_rx_resources(struct igb_adapter *adapter) | 3047 | static int igb_setup_all_rx_resources(struct igb_adapter *adapter) |
3042 | { | 3048 | { |
@@ -3058,8 +3064,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) | |||
3058 | } | 3064 | } |
3059 | 3065 | ||
3060 | /** | 3066 | /** |
3061 | * igb_setup_mrqc - configure the multiple receive queue control registers | 3067 | * igb_setup_mrqc - configure the multiple receive queue control registers |
3062 | * @adapter: Board private structure | 3068 | * @adapter: Board private structure |
3063 | **/ | 3069 | **/ |
3064 | static void igb_setup_mrqc(struct igb_adapter *adapter) | 3070 | static void igb_setup_mrqc(struct igb_adapter *adapter) |
3065 | { | 3071 | { |
@@ -3092,8 +3098,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) | |||
3092 | break; | 3098 | break; |
3093 | } | 3099 | } |
3094 | 3100 | ||
3095 | /* | 3101 | /* Populate the indirection table 4 entries at a time. To do this |
3096 | * Populate the indirection table 4 entries at a time. To do this | ||
3097 | * we are generating the results for n and n+2 and then interleaving | 3102 | * we are generating the results for n and n+2 and then interleaving |
3098 | * those with the results with n+1 and n+3. | 3103 | * those with the results with n+1 and n+3. |
3099 | */ | 3104 | */ |
@@ -3109,8 +3114,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) | |||
3109 | wr32(E1000_RETA(j), reta); | 3114 | wr32(E1000_RETA(j), reta); |
3110 | } | 3115 | } |
3111 | 3116 | ||
3112 | /* | 3117 | /* Disable raw packet checksumming so that RSS hash is placed in |
3113 | * Disable raw packet checksumming so that RSS hash is placed in | ||
3114 | * descriptor on writeback. No need to enable TCP/UDP/IP checksum | 3118 | * descriptor on writeback. No need to enable TCP/UDP/IP checksum |
3115 | * offloads as they are enabled by default | 3119 | * offloads as they are enabled by default |
3116 | */ | 3120 | */ |
@@ -3140,7 +3144,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) | |||
3140 | 3144 | ||
3141 | /* If VMDq is enabled then we set the appropriate mode for that, else | 3145 | /* If VMDq is enabled then we set the appropriate mode for that, else |
3142 | * we default to RSS so that an RSS hash is calculated per packet even | 3146 | * we default to RSS so that an RSS hash is calculated per packet even |
3143 | * if we are only using one queue */ | 3147 | * if we are only using one queue |
3148 | */ | ||
3144 | if (adapter->vfs_allocated_count) { | 3149 | if (adapter->vfs_allocated_count) { |
3145 | if (hw->mac.type > e1000_82575) { | 3150 | if (hw->mac.type > e1000_82575) { |
3146 | /* Set the default pool for the PF's first queue */ | 3151 | /* Set the default pool for the PF's first queue */ |
@@ -3165,8 +3170,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) | |||
3165 | } | 3170 | } |
3166 | 3171 | ||
3167 | /** | 3172 | /** |
3168 | * igb_setup_rctl - configure the receive control registers | 3173 | * igb_setup_rctl - configure the receive control registers |
3169 | * @adapter: Board private structure | 3174 | * @adapter: Board private structure |
3170 | **/ | 3175 | **/ |
3171 | void igb_setup_rctl(struct igb_adapter *adapter) | 3176 | void igb_setup_rctl(struct igb_adapter *adapter) |
3172 | { | 3177 | { |
@@ -3181,8 +3186,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) | |||
3181 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | | 3186 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | |
3182 | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); | 3187 | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); |
3183 | 3188 | ||
3184 | /* | 3189 | /* enable stripping of CRC. It's unlikely this will break BMC |
3185 | * enable stripping of CRC. It's unlikely this will break BMC | ||
3186 | * redirection as it did with e1000. Newer features require | 3190 | * redirection as it did with e1000. Newer features require |
3187 | * that the HW strips the CRC. | 3191 | * that the HW strips the CRC. |
3188 | */ | 3192 | */ |
@@ -3209,7 +3213,8 @@ void igb_setup_rctl(struct igb_adapter *adapter) | |||
3209 | /* This is useful for sniffing bad packets. */ | 3213 | /* This is useful for sniffing bad packets. */ |
3210 | if (adapter->netdev->features & NETIF_F_RXALL) { | 3214 | if (adapter->netdev->features & NETIF_F_RXALL) { |
3211 | /* UPE and MPE will be handled by normal PROMISC logic | 3215 | /* UPE and MPE will be handled by normal PROMISC logic |
3212 | * in e1000e_set_rx_mode */ | 3216 | * in e1000e_set_rx_mode |
3217 | */ | ||
3213 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ | 3218 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ |
3214 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ | 3219 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ |
3215 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | 3220 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ |
@@ -3232,7 +3237,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, | |||
3232 | u32 vmolr; | 3237 | u32 vmolr; |
3233 | 3238 | ||
3234 | /* if it isn't the PF check to see if VFs are enabled and | 3239 | /* if it isn't the PF check to see if VFs are enabled and |
3235 | * increase the size to support vlan tags */ | 3240 | * increase the size to support vlan tags |
3241 | */ | ||
3236 | if (vfn < adapter->vfs_allocated_count && | 3242 | if (vfn < adapter->vfs_allocated_count && |
3237 | adapter->vf_data[vfn].vlans_enabled) | 3243 | adapter->vf_data[vfn].vlans_enabled) |
3238 | size += VLAN_TAG_SIZE; | 3244 | size += VLAN_TAG_SIZE; |
@@ -3246,10 +3252,10 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, | |||
3246 | } | 3252 | } |
3247 | 3253 | ||
3248 | /** | 3254 | /** |
3249 | * igb_rlpml_set - set maximum receive packet size | 3255 | * igb_rlpml_set - set maximum receive packet size |
3250 | * @adapter: board private structure | 3256 | * @adapter: board private structure |
3251 | * | 3257 | * |
3252 | * Configure maximum receivable packet size. | 3258 | * Configure maximum receivable packet size. |
3253 | **/ | 3259 | **/ |
3254 | static void igb_rlpml_set(struct igb_adapter *adapter) | 3260 | static void igb_rlpml_set(struct igb_adapter *adapter) |
3255 | { | 3261 | { |
@@ -3259,8 +3265,7 @@ static void igb_rlpml_set(struct igb_adapter *adapter) | |||
3259 | 3265 | ||
3260 | if (pf_id) { | 3266 | if (pf_id) { |
3261 | igb_set_vf_rlpml(adapter, max_frame_size, pf_id); | 3267 | igb_set_vf_rlpml(adapter, max_frame_size, pf_id); |
3262 | /* | 3268 | /* If we're in VMDQ or SR-IOV mode, then set global RLPML |
3263 | * If we're in VMDQ or SR-IOV mode, then set global RLPML | ||
3264 | * to our max jumbo frame size, in case we need to enable | 3269 | * to our max jumbo frame size, in case we need to enable |
3265 | * jumbo frames on one of the rings later. | 3270 | * jumbo frames on one of the rings later. |
3266 | * This will not pass over-length frames into the default | 3271 | * This will not pass over-length frames into the default |
@@ -3278,17 +3283,16 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, | |||
3278 | struct e1000_hw *hw = &adapter->hw; | 3283 | struct e1000_hw *hw = &adapter->hw; |
3279 | u32 vmolr; | 3284 | u32 vmolr; |
3280 | 3285 | ||
3281 | /* | 3286 | /* This register exists only on 82576 and newer so if we are older then |
3282 | * This register exists only on 82576 and newer so if we are older then | ||
3283 | * we should exit and do nothing | 3287 | * we should exit and do nothing |
3284 | */ | 3288 | */ |
3285 | if (hw->mac.type < e1000_82576) | 3289 | if (hw->mac.type < e1000_82576) |
3286 | return; | 3290 | return; |
3287 | 3291 | ||
3288 | vmolr = rd32(E1000_VMOLR(vfn)); | 3292 | vmolr = rd32(E1000_VMOLR(vfn)); |
3289 | vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ | 3293 | vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ |
3290 | if (aupe) | 3294 | if (aupe) |
3291 | vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ | 3295 | vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ |
3292 | else | 3296 | else |
3293 | vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ | 3297 | vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ |
3294 | 3298 | ||
@@ -3297,25 +3301,24 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, | |||
3297 | 3301 | ||
3298 | if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) | 3302 | if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) |
3299 | vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ | 3303 | vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ |
3300 | /* | 3304 | /* for VMDq only allow the VFs and pool 0 to accept broadcast and |
3301 | * for VMDq only allow the VFs and pool 0 to accept broadcast and | ||
3302 | * multicast packets | 3305 | * multicast packets |
3303 | */ | 3306 | */ |
3304 | if (vfn <= adapter->vfs_allocated_count) | 3307 | if (vfn <= adapter->vfs_allocated_count) |
3305 | vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ | 3308 | vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ |
3306 | 3309 | ||
3307 | wr32(E1000_VMOLR(vfn), vmolr); | 3310 | wr32(E1000_VMOLR(vfn), vmolr); |
3308 | } | 3311 | } |
3309 | 3312 | ||
3310 | /** | 3313 | /** |
3311 | * igb_configure_rx_ring - Configure a receive ring after Reset | 3314 | * igb_configure_rx_ring - Configure a receive ring after Reset |
3312 | * @adapter: board private structure | 3315 | * @adapter: board private structure |
3313 | * @ring: receive ring to be configured | 3316 | * @ring: receive ring to be configured |
3314 | * | 3317 | * |
3315 | * Configure the Rx unit of the MAC after a reset. | 3318 | * Configure the Rx unit of the MAC after a reset. |
3316 | **/ | 3319 | **/ |
3317 | void igb_configure_rx_ring(struct igb_adapter *adapter, | 3320 | void igb_configure_rx_ring(struct igb_adapter *adapter, |
3318 | struct igb_ring *ring) | 3321 | struct igb_ring *ring) |
3319 | { | 3322 | { |
3320 | struct e1000_hw *hw = &adapter->hw; | 3323 | struct e1000_hw *hw = &adapter->hw; |
3321 | u64 rdba = ring->dma; | 3324 | u64 rdba = ring->dma; |
@@ -3330,7 +3333,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
3330 | rdba & 0x00000000ffffffffULL); | 3333 | rdba & 0x00000000ffffffffULL); |
3331 | wr32(E1000_RDBAH(reg_idx), rdba >> 32); | 3334 | wr32(E1000_RDBAH(reg_idx), rdba >> 32); |
3332 | wr32(E1000_RDLEN(reg_idx), | 3335 | wr32(E1000_RDLEN(reg_idx), |
3333 | ring->count * sizeof(union e1000_adv_rx_desc)); | 3336 | ring->count * sizeof(union e1000_adv_rx_desc)); |
3334 | 3337 | ||
3335 | /* initialize head and tail */ | 3338 | /* initialize head and tail */ |
3336 | ring->tail = hw->hw_addr + E1000_RDT(reg_idx); | 3339 | ring->tail = hw->hw_addr + E1000_RDT(reg_idx); |
@@ -3376,10 +3379,10 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter, | |||
3376 | } | 3379 | } |
3377 | 3380 | ||
3378 | /** | 3381 | /** |
3379 | * igb_configure_rx - Configure receive Unit after Reset | 3382 | * igb_configure_rx - Configure receive Unit after Reset |
3380 | * @adapter: board private structure | 3383 | * @adapter: board private structure |
3381 | * | 3384 | * |
3382 | * Configure the Rx unit of the MAC after a reset. | 3385 | * Configure the Rx unit of the MAC after a reset. |
3383 | **/ | 3386 | **/ |
3384 | static void igb_configure_rx(struct igb_adapter *adapter) | 3387 | static void igb_configure_rx(struct igb_adapter *adapter) |
3385 | { | 3388 | { |
@@ -3390,10 +3393,11 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
3390 | 3393 | ||
3391 | /* set the correct pool for the PF default MAC address in entry 0 */ | 3394 | /* set the correct pool for the PF default MAC address in entry 0 */ |
3392 | igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, | 3395 | igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, |
3393 | adapter->vfs_allocated_count); | 3396 | adapter->vfs_allocated_count); |
3394 | 3397 | ||
3395 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 3398 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
3396 | * the Base and Length of the Rx Descriptor Ring */ | 3399 | * the Base and Length of the Rx Descriptor Ring |
3400 | */ | ||
3397 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3401 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3398 | struct igb_ring *rx_ring = adapter->rx_ring[i]; | 3402 | struct igb_ring *rx_ring = adapter->rx_ring[i]; |
3399 | igb_set_rx_buffer_len(adapter, rx_ring); | 3403 | igb_set_rx_buffer_len(adapter, rx_ring); |
@@ -3402,10 +3406,10 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
3402 | } | 3406 | } |
3403 | 3407 | ||
3404 | /** | 3408 | /** |
3405 | * igb_free_tx_resources - Free Tx Resources per Queue | 3409 | * igb_free_tx_resources - Free Tx Resources per Queue |
3406 | * @tx_ring: Tx descriptor ring for a specific queue | 3410 | * @tx_ring: Tx descriptor ring for a specific queue |
3407 | * | 3411 | * |
3408 | * Free all transmit software resources | 3412 | * Free all transmit software resources |
3409 | **/ | 3413 | **/ |
3410 | void igb_free_tx_resources(struct igb_ring *tx_ring) | 3414 | void igb_free_tx_resources(struct igb_ring *tx_ring) |
3411 | { | 3415 | { |
@@ -3425,10 +3429,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring) | |||
3425 | } | 3429 | } |
3426 | 3430 | ||
3427 | /** | 3431 | /** |
3428 | * igb_free_all_tx_resources - Free Tx Resources for All Queues | 3432 | * igb_free_all_tx_resources - Free Tx Resources for All Queues |
3429 | * @adapter: board private structure | 3433 | * @adapter: board private structure |
3430 | * | 3434 | * |
3431 | * Free all transmit software resources | 3435 | * Free all transmit software resources |
3432 | **/ | 3436 | **/ |
3433 | static void igb_free_all_tx_resources(struct igb_adapter *adapter) | 3437 | static void igb_free_all_tx_resources(struct igb_adapter *adapter) |
3434 | { | 3438 | { |
@@ -3461,8 +3465,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring, | |||
3461 | } | 3465 | } |
3462 | 3466 | ||
3463 | /** | 3467 | /** |
3464 | * igb_clean_tx_ring - Free Tx Buffers | 3468 | * igb_clean_tx_ring - Free Tx Buffers |
3465 | * @tx_ring: ring to be cleaned | 3469 | * @tx_ring: ring to be cleaned |
3466 | **/ | 3470 | **/ |
3467 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) | 3471 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
3468 | { | 3472 | { |
@@ -3492,8 +3496,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) | |||
3492 | } | 3496 | } |
3493 | 3497 | ||
3494 | /** | 3498 | /** |
3495 | * igb_clean_all_tx_rings - Free Tx Buffers for all queues | 3499 | * igb_clean_all_tx_rings - Free Tx Buffers for all queues |
3496 | * @adapter: board private structure | 3500 | * @adapter: board private structure |
3497 | **/ | 3501 | **/ |
3498 | static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | 3502 | static void igb_clean_all_tx_rings(struct igb_adapter *adapter) |
3499 | { | 3503 | { |
@@ -3504,10 +3508,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |||
3504 | } | 3508 | } |
3505 | 3509 | ||
3506 | /** | 3510 | /** |
3507 | * igb_free_rx_resources - Free Rx Resources | 3511 | * igb_free_rx_resources - Free Rx Resources |
3508 | * @rx_ring: ring to clean the resources from | 3512 | * @rx_ring: ring to clean the resources from |
3509 | * | 3513 | * |
3510 | * Free all receive software resources | 3514 | * Free all receive software resources |
3511 | **/ | 3515 | **/ |
3512 | void igb_free_rx_resources(struct igb_ring *rx_ring) | 3516 | void igb_free_rx_resources(struct igb_ring *rx_ring) |
3513 | { | 3517 | { |
@@ -3527,10 +3531,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) | |||
3527 | } | 3531 | } |
3528 | 3532 | ||
3529 | /** | 3533 | /** |
3530 | * igb_free_all_rx_resources - Free Rx Resources for All Queues | 3534 | * igb_free_all_rx_resources - Free Rx Resources for All Queues |
3531 | * @adapter: board private structure | 3535 | * @adapter: board private structure |
3532 | * | 3536 | * |
3533 | * Free all receive software resources | 3537 | * Free all receive software resources |
3534 | **/ | 3538 | **/ |
3535 | static void igb_free_all_rx_resources(struct igb_adapter *adapter) | 3539 | static void igb_free_all_rx_resources(struct igb_adapter *adapter) |
3536 | { | 3540 | { |
@@ -3541,8 +3545,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |||
3541 | } | 3545 | } |
3542 | 3546 | ||
3543 | /** | 3547 | /** |
3544 | * igb_clean_rx_ring - Free Rx Buffers per Queue | 3548 | * igb_clean_rx_ring - Free Rx Buffers per Queue |
3545 | * @rx_ring: ring to free buffers from | 3549 | * @rx_ring: ring to free buffers from |
3546 | **/ | 3550 | **/ |
3547 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) | 3551 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) |
3548 | { | 3552 | { |
@@ -3584,8 +3588,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
3584 | } | 3588 | } |
3585 | 3589 | ||
3586 | /** | 3590 | /** |
3587 | * igb_clean_all_rx_rings - Free Rx Buffers for all queues | 3591 | * igb_clean_all_rx_rings - Free Rx Buffers for all queues |
3588 | * @adapter: board private structure | 3592 | * @adapter: board private structure |
3589 | **/ | 3593 | **/ |
3590 | static void igb_clean_all_rx_rings(struct igb_adapter *adapter) | 3594 | static void igb_clean_all_rx_rings(struct igb_adapter *adapter) |
3591 | { | 3595 | { |
@@ -3596,11 +3600,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) | |||
3596 | } | 3600 | } |
3597 | 3601 | ||
3598 | /** | 3602 | /** |
3599 | * igb_set_mac - Change the Ethernet Address of the NIC | 3603 | * igb_set_mac - Change the Ethernet Address of the NIC |
3600 | * @netdev: network interface device structure | 3604 | * @netdev: network interface device structure |
3601 | * @p: pointer to an address structure | 3605 | * @p: pointer to an address structure |
3602 | * | 3606 | * |
3603 | * Returns 0 on success, negative on failure | 3607 | * Returns 0 on success, negative on failure |
3604 | **/ | 3608 | **/ |
3605 | static int igb_set_mac(struct net_device *netdev, void *p) | 3609 | static int igb_set_mac(struct net_device *netdev, void *p) |
3606 | { | 3610 | { |
@@ -3616,19 +3620,19 @@ static int igb_set_mac(struct net_device *netdev, void *p) | |||
3616 | 3620 | ||
3617 | /* set the correct pool for the new PF MAC address in entry 0 */ | 3621 | /* set the correct pool for the new PF MAC address in entry 0 */ |
3618 | igb_rar_set_qsel(adapter, hw->mac.addr, 0, | 3622 | igb_rar_set_qsel(adapter, hw->mac.addr, 0, |
3619 | adapter->vfs_allocated_count); | 3623 | adapter->vfs_allocated_count); |
3620 | 3624 | ||
3621 | return 0; | 3625 | return 0; |
3622 | } | 3626 | } |
3623 | 3627 | ||
3624 | /** | 3628 | /** |
3625 | * igb_write_mc_addr_list - write multicast addresses to MTA | 3629 | * igb_write_mc_addr_list - write multicast addresses to MTA |
3626 | * @netdev: network interface device structure | 3630 | * @netdev: network interface device structure |
3627 | * | 3631 | * |
3628 | * Writes multicast address list to the MTA hash table. | 3632 | * Writes multicast address list to the MTA hash table. |
3629 | * Returns: -ENOMEM on failure | 3633 | * Returns: -ENOMEM on failure |
3630 | * 0 on no addresses written | 3634 | * 0 on no addresses written |
3631 | * X on writing X addresses to MTA | 3635 | * X on writing X addresses to MTA |
3632 | **/ | 3636 | **/ |
3633 | static int igb_write_mc_addr_list(struct net_device *netdev) | 3637 | static int igb_write_mc_addr_list(struct net_device *netdev) |
3634 | { | 3638 | { |
@@ -3661,13 +3665,13 @@ static int igb_write_mc_addr_list(struct net_device *netdev) | |||
3661 | } | 3665 | } |
3662 | 3666 | ||
3663 | /** | 3667 | /** |
3664 | * igb_write_uc_addr_list - write unicast addresses to RAR table | 3668 | * igb_write_uc_addr_list - write unicast addresses to RAR table |
3665 | * @netdev: network interface device structure | 3669 | * @netdev: network interface device structure |
3666 | * | 3670 | * |
3667 | * Writes unicast address list to the RAR table. | 3671 | * Writes unicast address list to the RAR table. |
3668 | * Returns: -ENOMEM on failure/insufficient address space | 3672 | * Returns: -ENOMEM on failure/insufficient address space |
3669 | * 0 on no addresses written | 3673 | * 0 on no addresses written |
3670 | * X on writing X addresses to the RAR table | 3674 | * X on writing X addresses to the RAR table |
3671 | **/ | 3675 | **/ |
3672 | static int igb_write_uc_addr_list(struct net_device *netdev) | 3676 | static int igb_write_uc_addr_list(struct net_device *netdev) |
3673 | { | 3677 | { |
@@ -3688,8 +3692,8 @@ static int igb_write_uc_addr_list(struct net_device *netdev) | |||
3688 | if (!rar_entries) | 3692 | if (!rar_entries) |
3689 | break; | 3693 | break; |
3690 | igb_rar_set_qsel(adapter, ha->addr, | 3694 | igb_rar_set_qsel(adapter, ha->addr, |
3691 | rar_entries--, | 3695 | rar_entries--, |
3692 | vfn); | 3696 | vfn); |
3693 | count++; | 3697 | count++; |
3694 | } | 3698 | } |
3695 | } | 3699 | } |
@@ -3704,13 +3708,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev) | |||
3704 | } | 3708 | } |
3705 | 3709 | ||
3706 | /** | 3710 | /** |
3707 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set | 3711 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
3708 | * @netdev: network interface device structure | 3712 | * @netdev: network interface device structure |
3709 | * | 3713 | * |
3710 | * The set_rx_mode entry point is called whenever the unicast or multicast | 3714 | * The set_rx_mode entry point is called whenever the unicast or multicast |
3711 | * address lists or the network interface flags are updated. This routine is | 3715 | * address lists or the network interface flags are updated. This routine is |
3712 | * responsible for configuring the hardware for proper unicast, multicast, | 3716 | * responsible for configuring the hardware for proper unicast, multicast, |
3713 | * promiscuous mode, and all-multi behavior. | 3717 | * promiscuous mode, and all-multi behavior. |
3714 | **/ | 3718 | **/ |
3715 | static void igb_set_rx_mode(struct net_device *netdev) | 3719 | static void igb_set_rx_mode(struct net_device *netdev) |
3716 | { | 3720 | { |
@@ -3734,8 +3738,7 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
3734 | rctl |= E1000_RCTL_MPE; | 3738 | rctl |= E1000_RCTL_MPE; |
3735 | vmolr |= E1000_VMOLR_MPME; | 3739 | vmolr |= E1000_VMOLR_MPME; |
3736 | } else { | 3740 | } else { |
3737 | /* | 3741 | /* Write addresses to the MTA, if the attempt fails |
3738 | * Write addresses to the MTA, if the attempt fails | ||
3739 | * then we should just turn on promiscuous mode so | 3742 | * then we should just turn on promiscuous mode so |
3740 | * that we can at least receive multicast traffic | 3743 | * that we can at least receive multicast traffic |
3741 | */ | 3744 | */ |
@@ -3747,8 +3750,7 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
3747 | vmolr |= E1000_VMOLR_ROMPE; | 3750 | vmolr |= E1000_VMOLR_ROMPE; |
3748 | } | 3751 | } |
3749 | } | 3752 | } |
3750 | /* | 3753 | /* Write addresses to available RAR registers, if there is not |
3751 | * Write addresses to available RAR registers, if there is not | ||
3752 | * sufficient space to store all the addresses then enable | 3754 | * sufficient space to store all the addresses then enable |
3753 | * unicast promiscuous mode | 3755 | * unicast promiscuous mode |
3754 | */ | 3756 | */ |
@@ -3761,8 +3763,7 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
3761 | } | 3763 | } |
3762 | wr32(E1000_RCTL, rctl); | 3764 | wr32(E1000_RCTL, rctl); |
3763 | 3765 | ||
3764 | /* | 3766 | /* In order to support SR-IOV and eventually VMDq it is necessary to set |
3765 | * In order to support SR-IOV and eventually VMDq it is necessary to set | ||
3766 | * the VMOLR to enable the appropriate modes. Without this workaround | 3767 | * the VMOLR to enable the appropriate modes. Without this workaround |
3767 | * we will have issues with VLAN tag stripping not being done for frames | 3768 | * we will have issues with VLAN tag stripping not being done for frames |
3768 | * that are only arriving because we are the default pool | 3769 | * that are only arriving because we are the default pool |
@@ -3771,7 +3772,7 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
3771 | return; | 3772 | return; |
3772 | 3773 | ||
3773 | vmolr |= rd32(E1000_VMOLR(vfn)) & | 3774 | vmolr |= rd32(E1000_VMOLR(vfn)) & |
3774 | ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); | 3775 | ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); |
3775 | wr32(E1000_VMOLR(vfn), vmolr); | 3776 | wr32(E1000_VMOLR(vfn), vmolr); |
3776 | igb_restore_vf_multicasts(adapter); | 3777 | igb_restore_vf_multicasts(adapter); |
3777 | } | 3778 | } |
@@ -3816,7 +3817,8 @@ static void igb_spoof_check(struct igb_adapter *adapter) | |||
3816 | } | 3817 | } |
3817 | 3818 | ||
3818 | /* Need to wait a few seconds after link up to get diagnostic information from | 3819 | /* Need to wait a few seconds after link up to get diagnostic information from |
3819 | * the phy */ | 3820 | * the phy |
3821 | */ | ||
3820 | static void igb_update_phy_info(unsigned long data) | 3822 | static void igb_update_phy_info(unsigned long data) |
3821 | { | 3823 | { |
3822 | struct igb_adapter *adapter = (struct igb_adapter *) data; | 3824 | struct igb_adapter *adapter = (struct igb_adapter *) data; |
@@ -3824,8 +3826,8 @@ static void igb_update_phy_info(unsigned long data) | |||
3824 | } | 3826 | } |
3825 | 3827 | ||
3826 | /** | 3828 | /** |
3827 | * igb_has_link - check shared code for link and determine up/down | 3829 | * igb_has_link - check shared code for link and determine up/down |
3828 | * @adapter: pointer to driver private info | 3830 | * @adapter: pointer to driver private info |
3829 | **/ | 3831 | **/ |
3830 | bool igb_has_link(struct igb_adapter *adapter) | 3832 | bool igb_has_link(struct igb_adapter *adapter) |
3831 | { | 3833 | { |
@@ -3878,8 +3880,8 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) | |||
3878 | } | 3880 | } |
3879 | 3881 | ||
3880 | /** | 3882 | /** |
3881 | * igb_watchdog - Timer Call-back | 3883 | * igb_watchdog - Timer Call-back |
3882 | * @data: pointer to adapter cast into an unsigned long | 3884 | * @data: pointer to adapter cast into an unsigned long |
3883 | **/ | 3885 | **/ |
3884 | static void igb_watchdog(unsigned long data) | 3886 | static void igb_watchdog(unsigned long data) |
3885 | { | 3887 | { |
@@ -3891,8 +3893,8 @@ static void igb_watchdog(unsigned long data) | |||
3891 | static void igb_watchdog_task(struct work_struct *work) | 3893 | static void igb_watchdog_task(struct work_struct *work) |
3892 | { | 3894 | { |
3893 | struct igb_adapter *adapter = container_of(work, | 3895 | struct igb_adapter *adapter = container_of(work, |
3894 | struct igb_adapter, | 3896 | struct igb_adapter, |
3895 | watchdog_task); | 3897 | watchdog_task); |
3896 | struct e1000_hw *hw = &adapter->hw; | 3898 | struct e1000_hw *hw = &adapter->hw; |
3897 | struct net_device *netdev = adapter->netdev; | 3899 | struct net_device *netdev = adapter->netdev; |
3898 | u32 link; | 3900 | u32 link; |
@@ -3906,8 +3908,8 @@ static void igb_watchdog_task(struct work_struct *work) | |||
3906 | if (!netif_carrier_ok(netdev)) { | 3908 | if (!netif_carrier_ok(netdev)) { |
3907 | u32 ctrl; | 3909 | u32 ctrl; |
3908 | hw->mac.ops.get_speed_and_duplex(hw, | 3910 | hw->mac.ops.get_speed_and_duplex(hw, |
3909 | &adapter->link_speed, | 3911 | &adapter->link_speed, |
3910 | &adapter->link_duplex); | 3912 | &adapter->link_duplex); |
3911 | 3913 | ||
3912 | ctrl = rd32(E1000_CTRL); | 3914 | ctrl = rd32(E1000_CTRL); |
3913 | /* Links status message must follow this format */ | 3915 | /* Links status message must follow this format */ |
@@ -3990,7 +3992,8 @@ static void igb_watchdog_task(struct work_struct *work) | |||
3990 | /* We've lost link, so the controller stops DMA, | 3992 | /* We've lost link, so the controller stops DMA, |
3991 | * but we've got queued Tx work that's never going | 3993 | * but we've got queued Tx work that's never going |
3992 | * to get done, so reset controller to flush Tx. | 3994 | * to get done, so reset controller to flush Tx. |
3993 | * (Do the reset outside of interrupt context). */ | 3995 | * (Do the reset outside of interrupt context). |
3996 | */ | ||
3994 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { | 3997 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { |
3995 | adapter->tx_timeout_count++; | 3998 | adapter->tx_timeout_count++; |
3996 | schedule_work(&adapter->reset_task); | 3999 | schedule_work(&adapter->reset_task); |
@@ -4003,7 +4006,7 @@ static void igb_watchdog_task(struct work_struct *work) | |||
4003 | set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); | 4006 | set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); |
4004 | } | 4007 | } |
4005 | 4008 | ||
4006 | /* Cause software interrupt to ensure rx ring is cleaned */ | 4009 | /* Cause software interrupt to ensure Rx ring is cleaned */ |
4007 | if (adapter->msix_entries) { | 4010 | if (adapter->msix_entries) { |
4008 | u32 eics = 0; | 4011 | u32 eics = 0; |
4009 | for (i = 0; i < adapter->num_q_vectors; i++) | 4012 | for (i = 0; i < adapter->num_q_vectors; i++) |
@@ -4030,20 +4033,20 @@ enum latency_range { | |||
4030 | }; | 4033 | }; |
4031 | 4034 | ||
4032 | /** | 4035 | /** |
4033 | * igb_update_ring_itr - update the dynamic ITR value based on packet size | 4036 | * igb_update_ring_itr - update the dynamic ITR value based on packet size |
4037 | * @q_vector: pointer to q_vector | ||
4034 | * | 4038 | * |
4035 | * Stores a new ITR value based on strictly on packet size. This | 4039 | * Stores a new ITR value based on strictly on packet size. This |
4036 | * algorithm is less sophisticated than that used in igb_update_itr, | 4040 | * algorithm is less sophisticated than that used in igb_update_itr, |
4037 | * due to the difficulty of synchronizing statistics across multiple | 4041 | * due to the difficulty of synchronizing statistics across multiple |
4038 | * receive rings. The divisors and thresholds used by this function | 4042 | * receive rings. The divisors and thresholds used by this function |
4039 | * were determined based on theoretical maximum wire speed and testing | 4043 | * were determined based on theoretical maximum wire speed and testing |
4040 | * data, in order to minimize response time while increasing bulk | 4044 | * data, in order to minimize response time while increasing bulk |
4041 | * throughput. | 4045 | * throughput. |
4042 | * This functionality is controlled by the InterruptThrottleRate module | 4046 | * This functionality is controlled by the InterruptThrottleRate module |
4043 | * parameter (see igb_param.c) | 4047 | * parameter (see igb_param.c) |
4044 | * NOTE: This function is called only when operating in a multiqueue | 4048 | * NOTE: This function is called only when operating in a multiqueue |
4045 | * receive environment. | 4049 | * receive environment. |
4046 | * @q_vector: pointer to q_vector | ||
4047 | **/ | 4050 | **/ |
4048 | static void igb_update_ring_itr(struct igb_q_vector *q_vector) | 4051 | static void igb_update_ring_itr(struct igb_q_vector *q_vector) |
4049 | { | 4052 | { |
@@ -4104,20 +4107,21 @@ clear_counts: | |||
4104 | } | 4107 | } |
4105 | 4108 | ||
4106 | /** | 4109 | /** |
4107 | * igb_update_itr - update the dynamic ITR value based on statistics | 4110 | * igb_update_itr - update the dynamic ITR value based on statistics |
4108 | * Stores a new ITR value based on packets and byte | 4111 | * @q_vector: pointer to q_vector |
4109 | * counts during the last interrupt. The advantage of per interrupt | 4112 | * @ring_container: ring info to update the itr for |
4110 | * computation is faster updates and more accurate ITR for the current | 4113 | * |
4111 | * traffic pattern. Constants in this function were computed | 4114 | * Stores a new ITR value based on packets and byte |
4112 | * based on theoretical maximum wire speed and thresholds were set based | 4115 | * counts during the last interrupt. The advantage of per interrupt |
4113 | * on testing data as well as attempting to minimize response time | 4116 | * computation is faster updates and more accurate ITR for the current |
4114 | * while increasing bulk throughput. | 4117 | * traffic pattern. Constants in this function were computed |
4115 | * this functionality is controlled by the InterruptThrottleRate module | 4118 | * based on theoretical maximum wire speed and thresholds were set based |
4116 | * parameter (see igb_param.c) | 4119 | * on testing data as well as attempting to minimize response time |
4117 | * NOTE: These calculations are only valid when operating in a single- | 4120 | * while increasing bulk throughput. |
4118 | * queue environment. | 4121 | * this functionality is controlled by the InterruptThrottleRate module |
4119 | * @q_vector: pointer to q_vector | 4122 | * parameter (see igb_param.c) |
4120 | * @ring_container: ring info to update the itr for | 4123 | * NOTE: These calculations are only valid when operating in a single- |
4124 | * queue environment. | ||
4121 | **/ | 4125 | **/ |
4122 | static void igb_update_itr(struct igb_q_vector *q_vector, | 4126 | static void igb_update_itr(struct igb_q_vector *q_vector, |
4123 | struct igb_ring_container *ring_container) | 4127 | struct igb_ring_container *ring_container) |
@@ -4215,12 +4219,12 @@ set_itr_now: | |||
4215 | if (new_itr != q_vector->itr_val) { | 4219 | if (new_itr != q_vector->itr_val) { |
4216 | /* this attempts to bias the interrupt rate towards Bulk | 4220 | /* this attempts to bias the interrupt rate towards Bulk |
4217 | * by adding intermediate steps when interrupt rate is | 4221 | * by adding intermediate steps when interrupt rate is |
4218 | * increasing */ | 4222 | * increasing |
4223 | */ | ||
4219 | new_itr = new_itr > q_vector->itr_val ? | 4224 | new_itr = new_itr > q_vector->itr_val ? |
4220 | max((new_itr * q_vector->itr_val) / | 4225 | max((new_itr * q_vector->itr_val) / |
4221 | (new_itr + (q_vector->itr_val >> 2)), | 4226 | (new_itr + (q_vector->itr_val >> 2)), |
4222 | new_itr) : | 4227 | new_itr) : new_itr; |
4223 | new_itr; | ||
4224 | /* Don't write the value here; it resets the adapter's | 4228 | /* Don't write the value here; it resets the adapter's |
4225 | * internal timer, and causes us to delay far longer than | 4229 | * internal timer, and causes us to delay far longer than |
4226 | * we should between interrupts. Instead, we write the ITR | 4230 | * we should between interrupts. Instead, we write the ITR |
@@ -4347,8 +4351,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) | |||
4347 | default: | 4351 | default: |
4348 | if (unlikely(net_ratelimit())) { | 4352 | if (unlikely(net_ratelimit())) { |
4349 | dev_warn(tx_ring->dev, | 4353 | dev_warn(tx_ring->dev, |
4350 | "partial checksum but proto=%x!\n", | 4354 | "partial checksum but proto=%x!\n", |
4351 | first->protocol); | 4355 | first->protocol); |
4352 | } | 4356 | } |
4353 | break; | 4357 | break; |
4354 | } | 4358 | } |
@@ -4371,8 +4375,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) | |||
4371 | default: | 4375 | default: |
4372 | if (unlikely(net_ratelimit())) { | 4376 | if (unlikely(net_ratelimit())) { |
4373 | dev_warn(tx_ring->dev, | 4377 | dev_warn(tx_ring->dev, |
4374 | "partial checksum but l4 proto=%x!\n", | 4378 | "partial checksum but l4 proto=%x!\n", |
4375 | l4_hdr); | 4379 | l4_hdr); |
4376 | } | 4380 | } |
4377 | break; | 4381 | break; |
4378 | } | 4382 | } |
@@ -4524,8 +4528,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, | |||
4524 | /* set the timestamp */ | 4528 | /* set the timestamp */ |
4525 | first->time_stamp = jiffies; | 4529 | first->time_stamp = jiffies; |
4526 | 4530 | ||
4527 | /* | 4531 | /* Force memory writes to complete before letting h/w know there |
4528 | * Force memory writes to complete before letting h/w know there | ||
4529 | * are new descriptors to fetch. (Only applicable for weak-ordered | 4532 | * are new descriptors to fetch. (Only applicable for weak-ordered |
4530 | * memory model archs, such as IA-64). | 4533 | * memory model archs, such as IA-64). |
4531 | * | 4534 | * |
@@ -4546,7 +4549,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, | |||
4546 | writel(i, tx_ring->tail); | 4549 | writel(i, tx_ring->tail); |
4547 | 4550 | ||
4548 | /* we need this if more than one processor can write to our tail | 4551 | /* we need this if more than one processor can write to our tail |
4549 | * at a time, it syncronizes IO on IA64/Altix systems */ | 4552 | * at a time, it synchronizes IO on IA64/Altix systems |
4553 | */ | ||
4550 | mmiowb(); | 4554 | mmiowb(); |
4551 | 4555 | ||
4552 | return; | 4556 | return; |
@@ -4576,11 +4580,13 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) | |||
4576 | 4580 | ||
4577 | /* Herbert's original patch had: | 4581 | /* Herbert's original patch had: |
4578 | * smp_mb__after_netif_stop_queue(); | 4582 | * smp_mb__after_netif_stop_queue(); |
4579 | * but since that doesn't exist yet, just open code it. */ | 4583 | * but since that doesn't exist yet, just open code it. |
4584 | */ | ||
4580 | smp_mb(); | 4585 | smp_mb(); |
4581 | 4586 | ||
4582 | /* We need to check again in a case another CPU has just | 4587 | /* We need to check again in a case another CPU has just |
4583 | * made room available. */ | 4588 | * made room available. |
4589 | */ | ||
4584 | if (igb_desc_unused(tx_ring) < size) | 4590 | if (igb_desc_unused(tx_ring) < size) |
4585 | return -EBUSY; | 4591 | return -EBUSY; |
4586 | 4592 | ||
@@ -4706,8 +4712,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, | |||
4706 | return NETDEV_TX_OK; | 4712 | return NETDEV_TX_OK; |
4707 | } | 4713 | } |
4708 | 4714 | ||
4709 | /* | 4715 | /* The minimum packet size with TCTL.PSP set is 17 so pad the skb |
4710 | * The minimum packet size with TCTL.PSP set is 17 so pad the skb | ||
4711 | * in order to meet this minimum size requirement. | 4716 | * in order to meet this minimum size requirement. |
4712 | */ | 4717 | */ |
4713 | if (unlikely(skb->len < 17)) { | 4718 | if (unlikely(skb->len < 17)) { |
@@ -4721,8 +4726,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, | |||
4721 | } | 4726 | } |
4722 | 4727 | ||
4723 | /** | 4728 | /** |
4724 | * igb_tx_timeout - Respond to a Tx Hang | 4729 | * igb_tx_timeout - Respond to a Tx Hang |
4725 | * @netdev: network interface device structure | 4730 | * @netdev: network interface device structure |
4726 | **/ | 4731 | **/ |
4727 | static void igb_tx_timeout(struct net_device *netdev) | 4732 | static void igb_tx_timeout(struct net_device *netdev) |
4728 | { | 4733 | { |
@@ -4751,13 +4756,12 @@ static void igb_reset_task(struct work_struct *work) | |||
4751 | } | 4756 | } |
4752 | 4757 | ||
4753 | /** | 4758 | /** |
4754 | * igb_get_stats64 - Get System Network Statistics | 4759 | * igb_get_stats64 - Get System Network Statistics |
4755 | * @netdev: network interface device structure | 4760 | * @netdev: network interface device structure |
4756 | * @stats: rtnl_link_stats64 pointer | 4761 | * @stats: rtnl_link_stats64 pointer |
4757 | * | ||
4758 | **/ | 4762 | **/ |
4759 | static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, | 4763 | static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, |
4760 | struct rtnl_link_stats64 *stats) | 4764 | struct rtnl_link_stats64 *stats) |
4761 | { | 4765 | { |
4762 | struct igb_adapter *adapter = netdev_priv(netdev); | 4766 | struct igb_adapter *adapter = netdev_priv(netdev); |
4763 | 4767 | ||
@@ -4770,11 +4774,11 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, | |||
4770 | } | 4774 | } |
4771 | 4775 | ||
4772 | /** | 4776 | /** |
4773 | * igb_change_mtu - Change the Maximum Transfer Unit | 4777 | * igb_change_mtu - Change the Maximum Transfer Unit |
4774 | * @netdev: network interface device structure | 4778 | * @netdev: network interface device structure |
4775 | * @new_mtu: new value for maximum frame size | 4779 | * @new_mtu: new value for maximum frame size |
4776 | * | 4780 | * |
4777 | * Returns 0 on success, negative on failure | 4781 | * Returns 0 on success, negative on failure |
4778 | **/ | 4782 | **/ |
4779 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) | 4783 | static int igb_change_mtu(struct net_device *netdev, int new_mtu) |
4780 | { | 4784 | { |
@@ -4817,10 +4821,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
4817 | } | 4821 | } |
4818 | 4822 | ||
4819 | /** | 4823 | /** |
4820 | * igb_update_stats - Update the board statistics counters | 4824 | * igb_update_stats - Update the board statistics counters |
4821 | * @adapter: board private structure | 4825 | * @adapter: board private structure |
4822 | **/ | 4826 | **/ |
4823 | |||
4824 | void igb_update_stats(struct igb_adapter *adapter, | 4827 | void igb_update_stats(struct igb_adapter *adapter, |
4825 | struct rtnl_link_stats64 *net_stats) | 4828 | struct rtnl_link_stats64 *net_stats) |
4826 | { | 4829 | { |
@@ -4835,8 +4838,7 @@ void igb_update_stats(struct igb_adapter *adapter, | |||
4835 | 4838 | ||
4836 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | 4839 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
4837 | 4840 | ||
4838 | /* | 4841 | /* Prevent stats update while adapter is being reset, or if the pci |
4839 | * Prevent stats update while adapter is being reset, or if the pci | ||
4840 | * connection is down. | 4842 | * connection is down. |
4841 | */ | 4843 | */ |
4842 | if (adapter->link_speed == 0) | 4844 | if (adapter->link_speed == 0) |
@@ -4970,7 +4972,8 @@ void igb_update_stats(struct igb_adapter *adapter, | |||
4970 | /* Rx Errors */ | 4972 | /* Rx Errors */ |
4971 | 4973 | ||
4972 | /* RLEC on some newer hardware can be incorrect so build | 4974 | /* RLEC on some newer hardware can be incorrect so build |
4973 | * our own version based on RUC and ROC */ | 4975 | * our own version based on RUC and ROC |
4976 | */ | ||
4974 | net_stats->rx_errors = adapter->stats.rxerrc + | 4977 | net_stats->rx_errors = adapter->stats.rxerrc + |
4975 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 4978 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
4976 | adapter->stats.ruc + adapter->stats.roc + | 4979 | adapter->stats.ruc + adapter->stats.roc + |
@@ -5029,7 +5032,8 @@ static irqreturn_t igb_msix_other(int irq, void *data) | |||
5029 | adapter->stats.doosync++; | 5032 | adapter->stats.doosync++; |
5030 | /* The DMA Out of Sync is also indication of a spoof event | 5033 | /* The DMA Out of Sync is also indication of a spoof event |
5031 | * in IOV mode. Check the Wrong VM Behavior register to | 5034 | * in IOV mode. Check the Wrong VM Behavior register to |
5032 | * see if it is really a spoof event. */ | 5035 | * see if it is really a spoof event. |
5036 | */ | ||
5033 | igb_check_wvbr(adapter); | 5037 | igb_check_wvbr(adapter); |
5034 | } | 5038 | } |
5035 | 5039 | ||
@@ -5103,8 +5107,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter, | |||
5103 | if (hw->mac.type != e1000_82575) | 5107 | if (hw->mac.type != e1000_82575) |
5104 | txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; | 5108 | txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; |
5105 | 5109 | ||
5106 | /* | 5110 | /* We can enable relaxed ordering for reads, but not writes when |
5107 | * We can enable relaxed ordering for reads, but not writes when | ||
5108 | * DCA is enabled. This is due to a known issue in some chipsets | 5111 | * DCA is enabled. This is due to a known issue in some chipsets |
5109 | * which will cause the DCA tag to be cleared. | 5112 | * which will cause the DCA tag to be cleared. |
5110 | */ | 5113 | */ |
@@ -5125,8 +5128,7 @@ static void igb_update_rx_dca(struct igb_adapter *adapter, | |||
5125 | if (hw->mac.type != e1000_82575) | 5128 | if (hw->mac.type != e1000_82575) |
5126 | rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; | 5129 | rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; |
5127 | 5130 | ||
5128 | /* | 5131 | /* We can enable relaxed ordering for reads, but not writes when |
5129 | * We can enable relaxed ordering for reads, but not writes when | ||
5130 | * DCA is enabled. This is due to a known issue in some chipsets | 5132 | * DCA is enabled. This is due to a known issue in some chipsets |
5131 | * which will cause the DCA tag to be cleared. | 5133 | * which will cause the DCA tag to be cleared. |
5132 | */ | 5134 | */ |
@@ -5195,7 +5197,8 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
5195 | case DCA_PROVIDER_REMOVE: | 5197 | case DCA_PROVIDER_REMOVE: |
5196 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { | 5198 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
5197 | /* without this a class_device is left | 5199 | /* without this a class_device is left |
5198 | * hanging around in the sysfs model */ | 5200 | * hanging around in the sysfs model |
5201 | */ | ||
5199 | dca_remove_requester(dev); | 5202 | dca_remove_requester(dev); |
5200 | dev_info(&pdev->dev, "DCA disabled\n"); | 5203 | dev_info(&pdev->dev, "DCA disabled\n"); |
5201 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; | 5204 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; |
@@ -5208,12 +5211,12 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
5208 | } | 5211 | } |
5209 | 5212 | ||
5210 | static int igb_notify_dca(struct notifier_block *nb, unsigned long event, | 5213 | static int igb_notify_dca(struct notifier_block *nb, unsigned long event, |
5211 | void *p) | 5214 | void *p) |
5212 | { | 5215 | { |
5213 | int ret_val; | 5216 | int ret_val; |
5214 | 5217 | ||
5215 | ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, | 5218 | ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, |
5216 | __igb_notify_dca); | 5219 | __igb_notify_dca); |
5217 | 5220 | ||
5218 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | 5221 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; |
5219 | } | 5222 | } |
@@ -5285,7 +5288,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | |||
5285 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; | 5288 | struct vf_data_storage *vf_data = &adapter->vf_data[vf]; |
5286 | 5289 | ||
5287 | vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | | 5290 | vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | |
5288 | IGB_VF_FLAG_MULTI_PROMISC); | 5291 | IGB_VF_FLAG_MULTI_PROMISC); |
5289 | vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); | 5292 | vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); |
5290 | 5293 | ||
5291 | if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { | 5294 | if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { |
@@ -5293,8 +5296,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | |||
5293 | vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; | 5296 | vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; |
5294 | *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; | 5297 | *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; |
5295 | } else { | 5298 | } else { |
5296 | /* | 5299 | /* if we have hashes and we are clearing a multicast promisc |
5297 | * if we have hashes and we are clearing a multicast promisc | ||
5298 | * flag we need to write the hashes to the MTA as this step | 5300 | * flag we need to write the hashes to the MTA as this step |
5299 | * was previously skipped | 5301 | * was previously skipped |
5300 | */ | 5302 | */ |
@@ -5315,7 +5317,6 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | |||
5315 | return -EINVAL; | 5317 | return -EINVAL; |
5316 | 5318 | ||
5317 | return 0; | 5319 | return 0; |
5318 | |||
5319 | } | 5320 | } |
5320 | 5321 | ||
5321 | static int igb_set_vf_multicasts(struct igb_adapter *adapter, | 5322 | static int igb_set_vf_multicasts(struct igb_adapter *adapter, |
@@ -5522,22 +5523,20 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, | |||
5522 | "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); | 5523 | "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); |
5523 | if (test_bit(__IGB_DOWN, &adapter->state)) { | 5524 | if (test_bit(__IGB_DOWN, &adapter->state)) { |
5524 | dev_warn(&adapter->pdev->dev, | 5525 | dev_warn(&adapter->pdev->dev, |
5525 | "The VF VLAN has been set," | 5526 | "The VF VLAN has been set, but the PF device is not up.\n"); |
5526 | " but the PF device is not up.\n"); | ||
5527 | dev_warn(&adapter->pdev->dev, | 5527 | dev_warn(&adapter->pdev->dev, |
5528 | "Bring the PF device up before" | 5528 | "Bring the PF device up before attempting to use the VF device.\n"); |
5529 | " attempting to use the VF device.\n"); | ||
5530 | } | 5529 | } |
5531 | } else { | 5530 | } else { |
5532 | igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, | 5531 | igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, |
5533 | false, vf); | 5532 | false, vf); |
5534 | igb_set_vmvir(adapter, vlan, vf); | 5533 | igb_set_vmvir(adapter, vlan, vf); |
5535 | igb_set_vmolr(adapter, vf, true); | 5534 | igb_set_vmolr(adapter, vf, true); |
5536 | adapter->vf_data[vf].pf_vlan = 0; | 5535 | adapter->vf_data[vf].pf_vlan = 0; |
5537 | adapter->vf_data[vf].pf_qos = 0; | 5536 | adapter->vf_data[vf].pf_qos = 0; |
5538 | } | 5537 | } |
5539 | out: | 5538 | out: |
5540 | return err; | 5539 | return err; |
5541 | } | 5540 | } |
5542 | 5541 | ||
5543 | static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) | 5542 | static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) |
@@ -5615,8 +5614,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) | |||
5615 | 5614 | ||
5616 | static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) | 5615 | static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) |
5617 | { | 5616 | { |
5618 | /* | 5617 | /* The VF MAC Address is stored in a packed array of bytes |
5619 | * The VF MAC Address is stored in a packed array of bytes | ||
5620 | * starting at the second 32 bit word of the msg array | 5618 | * starting at the second 32 bit word of the msg array |
5621 | */ | 5619 | */ |
5622 | unsigned char *addr = (char *)&msg[1]; | 5620 | unsigned char *addr = (char *)&msg[1]; |
@@ -5665,11 +5663,9 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
5665 | if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) | 5663 | if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) |
5666 | return; | 5664 | return; |
5667 | 5665 | ||
5668 | /* | 5666 | /* until the vf completes a reset it should not be |
5669 | * until the vf completes a reset it should not be | ||
5670 | * allowed to start any configuration. | 5667 | * allowed to start any configuration. |
5671 | */ | 5668 | */ |
5672 | |||
5673 | if (msgbuf[0] == E1000_VF_RESET) { | 5669 | if (msgbuf[0] == E1000_VF_RESET) { |
5674 | igb_vf_reset_msg(adapter, vf); | 5670 | igb_vf_reset_msg(adapter, vf); |
5675 | return; | 5671 | return; |
@@ -5689,9 +5685,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
5689 | retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); | 5685 | retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); |
5690 | else | 5686 | else |
5691 | dev_warn(&pdev->dev, | 5687 | dev_warn(&pdev->dev, |
5692 | "VF %d attempted to override administratively " | 5688 | "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", |
5693 | "set MAC address\nReload the VF driver to " | 5689 | vf); |
5694 | "resume operations\n", vf); | ||
5695 | break; | 5690 | break; |
5696 | case E1000_VF_SET_PROMISC: | 5691 | case E1000_VF_SET_PROMISC: |
5697 | retval = igb_set_vf_promisc(adapter, msgbuf, vf); | 5692 | retval = igb_set_vf_promisc(adapter, msgbuf, vf); |
@@ -5706,9 +5701,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
5706 | retval = -1; | 5701 | retval = -1; |
5707 | if (vf_data->pf_vlan) | 5702 | if (vf_data->pf_vlan) |
5708 | dev_warn(&pdev->dev, | 5703 | dev_warn(&pdev->dev, |
5709 | "VF %d attempted to override administratively " | 5704 | "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", |
5710 | "set VLAN tag\nReload the VF driver to " | 5705 | vf); |
5711 | "resume operations\n", vf); | ||
5712 | else | 5706 | else |
5713 | retval = igb_set_vf_vlan(adapter, msgbuf, vf); | 5707 | retval = igb_set_vf_vlan(adapter, msgbuf, vf); |
5714 | break; | 5708 | break; |
@@ -5777,9 +5771,9 @@ static void igb_set_uta(struct igb_adapter *adapter) | |||
5777 | } | 5771 | } |
5778 | 5772 | ||
5779 | /** | 5773 | /** |
5780 | * igb_intr_msi - Interrupt Handler | 5774 | * igb_intr_msi - Interrupt Handler |
5781 | * @irq: interrupt number | 5775 | * @irq: interrupt number |
5782 | * @data: pointer to a network interface device structure | 5776 | * @data: pointer to a network interface device structure |
5783 | **/ | 5777 | **/ |
5784 | static irqreturn_t igb_intr_msi(int irq, void *data) | 5778 | static irqreturn_t igb_intr_msi(int irq, void *data) |
5785 | { | 5779 | { |
@@ -5822,9 +5816,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
5822 | } | 5816 | } |
5823 | 5817 | ||
5824 | /** | 5818 | /** |
5825 | * igb_intr - Legacy Interrupt Handler | 5819 | * igb_intr - Legacy Interrupt Handler |
5826 | * @irq: interrupt number | 5820 | * @irq: interrupt number |
5827 | * @data: pointer to a network interface device structure | 5821 | * @data: pointer to a network interface device structure |
5828 | **/ | 5822 | **/ |
5829 | static irqreturn_t igb_intr(int irq, void *data) | 5823 | static irqreturn_t igb_intr(int irq, void *data) |
5830 | { | 5824 | { |
@@ -5832,11 +5826,13 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
5832 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | 5826 | struct igb_q_vector *q_vector = adapter->q_vector[0]; |
5833 | struct e1000_hw *hw = &adapter->hw; | 5827 | struct e1000_hw *hw = &adapter->hw; |
5834 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | 5828 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No |
5835 | * need for the IMC write */ | 5829 | * need for the IMC write |
5830 | */ | ||
5836 | u32 icr = rd32(E1000_ICR); | 5831 | u32 icr = rd32(E1000_ICR); |
5837 | 5832 | ||
5838 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | 5833 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
5839 | * not set, then the adapter didn't send an interrupt */ | 5834 | * not set, then the adapter didn't send an interrupt |
5835 | */ | ||
5840 | if (!(icr & E1000_ICR_INT_ASSERTED)) | 5836 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
5841 | return IRQ_NONE; | 5837 | return IRQ_NONE; |
5842 | 5838 | ||
@@ -5895,15 +5891,15 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector) | |||
5895 | } | 5891 | } |
5896 | 5892 | ||
5897 | /** | 5893 | /** |
5898 | * igb_poll - NAPI Rx polling callback | 5894 | * igb_poll - NAPI Rx polling callback |
5899 | * @napi: napi polling structure | 5895 | * @napi: napi polling structure |
5900 | * @budget: count of how many packets we should handle | 5896 | * @budget: count of how many packets we should handle |
5901 | **/ | 5897 | **/ |
5902 | static int igb_poll(struct napi_struct *napi, int budget) | 5898 | static int igb_poll(struct napi_struct *napi, int budget) |
5903 | { | 5899 | { |
5904 | struct igb_q_vector *q_vector = container_of(napi, | 5900 | struct igb_q_vector *q_vector = container_of(napi, |
5905 | struct igb_q_vector, | 5901 | struct igb_q_vector, |
5906 | napi); | 5902 | napi); |
5907 | bool clean_complete = true; | 5903 | bool clean_complete = true; |
5908 | 5904 | ||
5909 | #ifdef CONFIG_IGB_DCA | 5905 | #ifdef CONFIG_IGB_DCA |
@@ -5928,10 +5924,10 @@ static int igb_poll(struct napi_struct *napi, int budget) | |||
5928 | } | 5924 | } |
5929 | 5925 | ||
5930 | /** | 5926 | /** |
5931 | * igb_clean_tx_irq - Reclaim resources after transmit completes | 5927 | * igb_clean_tx_irq - Reclaim resources after transmit completes |
5932 | * @q_vector: pointer to q_vector containing needed info | 5928 | * @q_vector: pointer to q_vector containing needed info |
5933 | * | 5929 | * |
5934 | * returns true if ring is completely cleaned | 5930 | * returns true if ring is completely cleaned |
5935 | **/ | 5931 | **/ |
5936 | static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | 5932 | static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) |
5937 | { | 5933 | { |
@@ -6037,7 +6033,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
6037 | struct e1000_hw *hw = &adapter->hw; | 6033 | struct e1000_hw *hw = &adapter->hw; |
6038 | 6034 | ||
6039 | /* Detect a transmit hang in hardware, this serializes the | 6035 | /* Detect a transmit hang in hardware, this serializes the |
6040 | * check with the clearing of time_stamp and movement of i */ | 6036 | * check with the clearing of time_stamp and movement of i |
6037 | */ | ||
6041 | clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); | 6038 | clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); |
6042 | if (tx_buffer->next_to_watch && | 6039 | if (tx_buffer->next_to_watch && |
6043 | time_after(jiffies, tx_buffer->time_stamp + | 6040 | time_after(jiffies, tx_buffer->time_stamp + |
@@ -6076,8 +6073,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
6076 | 6073 | ||
6077 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 6074 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
6078 | if (unlikely(total_packets && | 6075 | if (unlikely(total_packets && |
6079 | netif_carrier_ok(tx_ring->netdev) && | 6076 | netif_carrier_ok(tx_ring->netdev) && |
6080 | igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { | 6077 | igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { |
6081 | /* Make sure that anybody stopping the queue after this | 6078 | /* Make sure that anybody stopping the queue after this |
6082 | * sees the new next_to_clean. | 6079 | * sees the new next_to_clean. |
6083 | */ | 6080 | */ |
@@ -6098,11 +6095,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) | |||
6098 | } | 6095 | } |
6099 | 6096 | ||
6100 | /** | 6097 | /** |
6101 | * igb_reuse_rx_page - page flip buffer and store it back on the ring | 6098 | * igb_reuse_rx_page - page flip buffer and store it back on the ring |
6102 | * @rx_ring: rx descriptor ring to store buffers on | 6099 | * @rx_ring: rx descriptor ring to store buffers on |
6103 | * @old_buff: donor buffer to have page reused | 6100 | * @old_buff: donor buffer to have page reused |
6104 | * | 6101 | * |
6105 | * Synchronizes page for reuse by the adapter | 6102 | * Synchronizes page for reuse by the adapter |
6106 | **/ | 6103 | **/ |
6107 | static void igb_reuse_rx_page(struct igb_ring *rx_ring, | 6104 | static void igb_reuse_rx_page(struct igb_ring *rx_ring, |
6108 | struct igb_rx_buffer *old_buff) | 6105 | struct igb_rx_buffer *old_buff) |
@@ -6162,19 +6159,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | |||
6162 | } | 6159 | } |
6163 | 6160 | ||
6164 | /** | 6161 | /** |
6165 | * igb_add_rx_frag - Add contents of Rx buffer to sk_buff | 6162 | * igb_add_rx_frag - Add contents of Rx buffer to sk_buff |
6166 | * @rx_ring: rx descriptor ring to transact packets on | 6163 | * @rx_ring: rx descriptor ring to transact packets on |
6167 | * @rx_buffer: buffer containing page to add | 6164 | * @rx_buffer: buffer containing page to add |
6168 | * @rx_desc: descriptor containing length of buffer written by hardware | 6165 | * @rx_desc: descriptor containing length of buffer written by hardware |
6169 | * @skb: sk_buff to place the data into | 6166 | * @skb: sk_buff to place the data into |
6170 | * | 6167 | * |
6171 | * This function will add the data contained in rx_buffer->page to the skb. | 6168 | * This function will add the data contained in rx_buffer->page to the skb. |
6172 | * This is done either through a direct copy if the data in the buffer is | 6169 | * This is done either through a direct copy if the data in the buffer is |
6173 | * less than the skb header size, otherwise it will just attach the page as | 6170 | * less than the skb header size, otherwise it will just attach the page as |
6174 | * a frag to the skb. | 6171 | * a frag to the skb. |
6175 | * | 6172 | * |
6176 | * The function will then update the page offset if necessary and return | 6173 | * The function will then update the page offset if necessary and return |
6177 | * true if the buffer can be reused by the adapter. | 6174 | * true if the buffer can be reused by the adapter. |
6178 | **/ | 6175 | **/ |
6179 | static bool igb_add_rx_frag(struct igb_ring *rx_ring, | 6176 | static bool igb_add_rx_frag(struct igb_ring *rx_ring, |
6180 | struct igb_rx_buffer *rx_buffer, | 6177 | struct igb_rx_buffer *rx_buffer, |
@@ -6317,8 +6314,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | |||
6317 | return NULL; | 6314 | return NULL; |
6318 | } | 6315 | } |
6319 | 6316 | ||
6320 | /* | 6317 | /* we will be copying header into skb->data in |
6321 | * we will be copying header into skb->data in | ||
6322 | * pskb_may_pull so it is in our interest to prefetch | 6318 | * pskb_may_pull so it is in our interest to prefetch |
6323 | * it now to avoid a possible cache miss | 6319 | * it now to avoid a possible cache miss |
6324 | */ | 6320 | */ |
@@ -6366,8 +6362,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring, | |||
6366 | if (igb_test_staterr(rx_desc, | 6362 | if (igb_test_staterr(rx_desc, |
6367 | E1000_RXDEXT_STATERR_TCPE | | 6363 | E1000_RXDEXT_STATERR_TCPE | |
6368 | E1000_RXDEXT_STATERR_IPE)) { | 6364 | E1000_RXDEXT_STATERR_IPE)) { |
6369 | /* | 6365 | /* work around errata with sctp packets where the TCPE aka |
6370 | * work around errata with sctp packets where the TCPE aka | ||
6371 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) | 6366 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) |
6372 | * packets, (aka let the stack check the crc32c) | 6367 | * packets, (aka let the stack check the crc32c) |
6373 | */ | 6368 | */ |
@@ -6398,15 +6393,15 @@ static inline void igb_rx_hash(struct igb_ring *ring, | |||
6398 | } | 6393 | } |
6399 | 6394 | ||
6400 | /** | 6395 | /** |
6401 | * igb_is_non_eop - process handling of non-EOP buffers | 6396 | * igb_is_non_eop - process handling of non-EOP buffers |
6402 | * @rx_ring: Rx ring being processed | 6397 | * @rx_ring: Rx ring being processed |
6403 | * @rx_desc: Rx descriptor for current buffer | 6398 | * @rx_desc: Rx descriptor for current buffer |
6404 | * @skb: current socket buffer containing buffer in progress | 6399 | * @skb: current socket buffer containing buffer in progress |
6405 | * | 6400 | * |
6406 | * This function updates next to clean. If the buffer is an EOP buffer | 6401 | * This function updates next to clean. If the buffer is an EOP buffer |
6407 | * this function exits returning false, otherwise it will place the | 6402 | * this function exits returning false, otherwise it will place the |
6408 | * sk_buff in the next buffer to be chained and return true indicating | 6403 | * sk_buff in the next buffer to be chained and return true indicating |
6409 | * that this is in fact a non-EOP buffer. | 6404 | * that this is in fact a non-EOP buffer. |
6410 | **/ | 6405 | **/ |
6411 | static bool igb_is_non_eop(struct igb_ring *rx_ring, | 6406 | static bool igb_is_non_eop(struct igb_ring *rx_ring, |
6412 | union e1000_adv_rx_desc *rx_desc) | 6407 | union e1000_adv_rx_desc *rx_desc) |
@@ -6426,15 +6421,15 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, | |||
6426 | } | 6421 | } |
6427 | 6422 | ||
6428 | /** | 6423 | /** |
6429 | * igb_get_headlen - determine size of header for LRO/GRO | 6424 | * igb_get_headlen - determine size of header for LRO/GRO |
6430 | * @data: pointer to the start of the headers | 6425 | * @data: pointer to the start of the headers |
6431 | * @max_len: total length of section to find headers in | 6426 | * @max_len: total length of section to find headers in |
6432 | * | 6427 | * |
6433 | * This function is meant to determine the length of headers that will | 6428 | * This function is meant to determine the length of headers that will |
6434 | * be recognized by hardware for LRO, and GRO offloads. The main | 6429 | * be recognized by hardware for LRO, and GRO offloads. The main |
6435 | * motivation of doing this is to only perform one pull for IPv4 TCP | 6430 | * motivation of doing this is to only perform one pull for IPv4 TCP |
6436 | * packets so that we can do basic things like calculating the gso_size | 6431 | * packets so that we can do basic things like calculating the gso_size |
6437 | * based on the average data per packet. | 6432 | * based on the average data per packet. |
6438 | **/ | 6433 | **/ |
6439 | static unsigned int igb_get_headlen(unsigned char *data, | 6434 | static unsigned int igb_get_headlen(unsigned char *data, |
6440 | unsigned int max_len) | 6435 | unsigned int max_len) |
@@ -6521,8 +6516,7 @@ static unsigned int igb_get_headlen(unsigned char *data, | |||
6521 | hdr.network += sizeof(struct udphdr); | 6516 | hdr.network += sizeof(struct udphdr); |
6522 | } | 6517 | } |
6523 | 6518 | ||
6524 | /* | 6519 | /* If everything has gone correctly hdr.network should be the |
6525 | * If everything has gone correctly hdr.network should be the | ||
6526 | * data section of the packet and will be the end of the header. | 6520 | * data section of the packet and will be the end of the header. |
6527 | * If not then it probably represents the end of the last recognized | 6521 | * If not then it probably represents the end of the last recognized |
6528 | * header. | 6522 | * header. |
@@ -6534,17 +6528,17 @@ static unsigned int igb_get_headlen(unsigned char *data, | |||
6534 | } | 6528 | } |
6535 | 6529 | ||
6536 | /** | 6530 | /** |
6537 | * igb_pull_tail - igb specific version of skb_pull_tail | 6531 | * igb_pull_tail - igb specific version of skb_pull_tail |
6538 | * @rx_ring: rx descriptor ring packet is being transacted on | 6532 | * @rx_ring: rx descriptor ring packet is being transacted on |
6539 | * @rx_desc: pointer to the EOP Rx descriptor | 6533 | * @rx_desc: pointer to the EOP Rx descriptor |
6540 | * @skb: pointer to current skb being adjusted | 6534 | * @skb: pointer to current skb being adjusted |
6541 | * | 6535 | * |
6542 | * This function is an igb specific version of __pskb_pull_tail. The | 6536 | * This function is an igb specific version of __pskb_pull_tail. The |
6543 | * main difference between this version and the original function is that | 6537 | * main difference between this version and the original function is that |
6544 | * this function can make several assumptions about the state of things | 6538 | * this function can make several assumptions about the state of things |
6545 | * that allow for significant optimizations versus the standard function. | 6539 | * that allow for significant optimizations versus the standard function. |
6546 | * As a result we can do things like drop a frag and maintain an accurate | 6540 | * As a result we can do things like drop a frag and maintain an accurate |
6547 | * truesize for the skb. | 6541 | * truesize for the skb. |
6548 | */ | 6542 | */ |
6549 | static void igb_pull_tail(struct igb_ring *rx_ring, | 6543 | static void igb_pull_tail(struct igb_ring *rx_ring, |
6550 | union e1000_adv_rx_desc *rx_desc, | 6544 | union e1000_adv_rx_desc *rx_desc, |
@@ -6554,8 +6548,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring, | |||
6554 | unsigned char *va; | 6548 | unsigned char *va; |
6555 | unsigned int pull_len; | 6549 | unsigned int pull_len; |
6556 | 6550 | ||
6557 | /* | 6551 | /* it is valid to use page_address instead of kmap since we are |
6558 | * it is valid to use page_address instead of kmap since we are | ||
6559 | * working with pages allocated out of the lomem pool per | 6552 | * working with pages allocated out of the lomem pool per |
6560 | * alloc_page(GFP_ATOMIC) | 6553 | * alloc_page(GFP_ATOMIC) |
6561 | */ | 6554 | */ |
@@ -6575,8 +6568,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring, | |||
6575 | va += IGB_TS_HDR_LEN; | 6568 | va += IGB_TS_HDR_LEN; |
6576 | } | 6569 | } |
6577 | 6570 | ||
6578 | /* | 6571 | /* we need the header to contain the greater of either ETH_HLEN or |
6579 | * we need the header to contain the greater of either ETH_HLEN or | ||
6580 | * 60 bytes if the skb->len is less than 60 for skb_pad. | 6572 | * 60 bytes if the skb->len is less than 60 for skb_pad. |
6581 | */ | 6573 | */ |
6582 | pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); | 6574 | pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); |
@@ -6592,24 +6584,23 @@ static void igb_pull_tail(struct igb_ring *rx_ring, | |||
6592 | } | 6584 | } |
6593 | 6585 | ||
6594 | /** | 6586 | /** |
6595 | * igb_cleanup_headers - Correct corrupted or empty headers | 6587 | * igb_cleanup_headers - Correct corrupted or empty headers |
6596 | * @rx_ring: rx descriptor ring packet is being transacted on | 6588 | * @rx_ring: rx descriptor ring packet is being transacted on |
6597 | * @rx_desc: pointer to the EOP Rx descriptor | 6589 | * @rx_desc: pointer to the EOP Rx descriptor |
6598 | * @skb: pointer to current skb being fixed | 6590 | * @skb: pointer to current skb being fixed |
6599 | * | 6591 | * |
6600 | * Address the case where we are pulling data in on pages only | 6592 | * Address the case where we are pulling data in on pages only |
6601 | * and as such no data is present in the skb header. | 6593 | * and as such no data is present in the skb header. |
6602 | * | 6594 | * |
6603 | * In addition if skb is not at least 60 bytes we need to pad it so that | 6595 | * In addition if skb is not at least 60 bytes we need to pad it so that |
6604 | * it is large enough to qualify as a valid Ethernet frame. | 6596 | * it is large enough to qualify as a valid Ethernet frame. |
6605 | * | 6597 | * |
6606 | * Returns true if an error was encountered and skb was freed. | 6598 | * Returns true if an error was encountered and skb was freed. |
6607 | **/ | 6599 | **/ |
6608 | static bool igb_cleanup_headers(struct igb_ring *rx_ring, | 6600 | static bool igb_cleanup_headers(struct igb_ring *rx_ring, |
6609 | union e1000_adv_rx_desc *rx_desc, | 6601 | union e1000_adv_rx_desc *rx_desc, |
6610 | struct sk_buff *skb) | 6602 | struct sk_buff *skb) |
6611 | { | 6603 | { |
6612 | |||
6613 | if (unlikely((igb_test_staterr(rx_desc, | 6604 | if (unlikely((igb_test_staterr(rx_desc, |
6614 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { | 6605 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { |
6615 | struct net_device *netdev = rx_ring->netdev; | 6606 | struct net_device *netdev = rx_ring->netdev; |
@@ -6636,14 +6627,14 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, | |||
6636 | } | 6627 | } |
6637 | 6628 | ||
6638 | /** | 6629 | /** |
6639 | * igb_process_skb_fields - Populate skb header fields from Rx descriptor | 6630 | * igb_process_skb_fields - Populate skb header fields from Rx descriptor |
6640 | * @rx_ring: rx descriptor ring packet is being transacted on | 6631 | * @rx_ring: rx descriptor ring packet is being transacted on |
6641 | * @rx_desc: pointer to the EOP Rx descriptor | 6632 | * @rx_desc: pointer to the EOP Rx descriptor |
6642 | * @skb: pointer to current skb being populated | 6633 | * @skb: pointer to current skb being populated |
6643 | * | 6634 | * |
6644 | * This function checks the ring, descriptor, and packet information in | 6635 | * This function checks the ring, descriptor, and packet information in |
6645 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and | 6636 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and |
6646 | * other fields within the skb. | 6637 | * other fields within the skb. |
6647 | **/ | 6638 | **/ |
6648 | static void igb_process_skb_fields(struct igb_ring *rx_ring, | 6639 | static void igb_process_skb_fields(struct igb_ring *rx_ring, |
6649 | union e1000_adv_rx_desc *rx_desc, | 6640 | union e1000_adv_rx_desc *rx_desc, |
@@ -6774,8 +6765,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
6774 | /* map page for use */ | 6765 | /* map page for use */ |
6775 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 6766 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
6776 | 6767 | ||
6777 | /* | 6768 | /* if mapping failed free memory back to system since |
6778 | * if mapping failed free memory back to system since | ||
6779 | * there isn't much point in holding memory we can't use | 6769 | * there isn't much point in holding memory we can't use |
6780 | */ | 6770 | */ |
6781 | if (dma_mapping_error(rx_ring->dev, dma)) { | 6771 | if (dma_mapping_error(rx_ring->dev, dma)) { |
@@ -6801,8 +6791,8 @@ static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) | |||
6801 | } | 6791 | } |
6802 | 6792 | ||
6803 | /** | 6793 | /** |
6804 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split | 6794 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
6805 | * @adapter: address of board private structure | 6795 | * @adapter: address of board private structure |
6806 | **/ | 6796 | **/ |
6807 | void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | 6797 | void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) |
6808 | { | 6798 | { |
@@ -6822,8 +6812,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
6822 | if (!igb_alloc_mapped_page(rx_ring, bi)) | 6812 | if (!igb_alloc_mapped_page(rx_ring, bi)) |
6823 | break; | 6813 | break; |
6824 | 6814 | ||
6825 | /* | 6815 | /* Refresh the desc even if buffer_addrs didn't change |
6826 | * Refresh the desc even if buffer_addrs didn't change | ||
6827 | * because each write-back erases this info. | 6816 | * because each write-back erases this info. |
6828 | */ | 6817 | */ |
6829 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + | 6818 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + |
@@ -6854,8 +6843,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
6854 | /* update next to alloc since we have filled the ring */ | 6843 | /* update next to alloc since we have filled the ring */ |
6855 | rx_ring->next_to_alloc = i; | 6844 | rx_ring->next_to_alloc = i; |
6856 | 6845 | ||
6857 | /* | 6846 | /* Force memory writes to complete before letting h/w |
6858 | * Force memory writes to complete before letting h/w | ||
6859 | * know there are new descriptors to fetch. (Only | 6847 | * know there are new descriptors to fetch. (Only |
6860 | * applicable for weak-ordered memory model archs, | 6848 | * applicable for weak-ordered memory model archs, |
6861 | * such as IA-64). | 6849 | * such as IA-64). |
@@ -7016,7 +7004,8 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) | |||
7016 | mac->autoneg = 0; | 7004 | mac->autoneg = 0; |
7017 | 7005 | ||
7018 | /* Make sure dplx is at most 1 bit and lsb of speed is not set | 7006 | /* Make sure dplx is at most 1 bit and lsb of speed is not set |
7019 | * for the switch() below to work */ | 7007 | * for the switch() below to work |
7008 | */ | ||
7020 | if ((spd & 1) || (dplx & ~1)) | 7009 | if ((spd & 1) || (dplx & ~1)) |
7021 | goto err_inval; | 7010 | goto err_inval; |
7022 | 7011 | ||
@@ -7131,7 +7120,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
7131 | igb_power_up_link(adapter); | 7120 | igb_power_up_link(adapter); |
7132 | 7121 | ||
7133 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 7122 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
7134 | * would have already happened in close and is redundant. */ | 7123 | * would have already happened in close and is redundant. |
7124 | */ | ||
7135 | igb_release_hw_control(adapter); | 7125 | igb_release_hw_control(adapter); |
7136 | 7126 | ||
7137 | pci_disable_device(pdev); | 7127 | pci_disable_device(pdev); |
@@ -7193,7 +7183,8 @@ static int igb_resume(struct device *dev) | |||
7193 | igb_reset(adapter); | 7183 | igb_reset(adapter); |
7194 | 7184 | ||
7195 | /* let the f/w know that the h/w is now under the control of the | 7185 | /* let the f/w know that the h/w is now under the control of the |
7196 | * driver. */ | 7186 | * driver. |
7187 | */ | ||
7197 | igb_get_hw_control(adapter); | 7188 | igb_get_hw_control(adapter); |
7198 | 7189 | ||
7199 | wr32(E1000_WUS, ~0); | 7190 | wr32(E1000_WUS, ~0); |
@@ -7329,8 +7320,7 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) | |||
7329 | } | 7320 | } |
7330 | 7321 | ||
7331 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7322 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7332 | /* | 7323 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
7333 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
7334 | * without having to re-enable interrupts. It's not called while | 7324 | * without having to re-enable interrupts. It's not called while |
7335 | * the interrupt routine is executing. | 7325 | * the interrupt routine is executing. |
7336 | */ | 7326 | */ |
@@ -7353,13 +7343,13 @@ static void igb_netpoll(struct net_device *netdev) | |||
7353 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 7343 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
7354 | 7344 | ||
7355 | /** | 7345 | /** |
7356 | * igb_io_error_detected - called when PCI error is detected | 7346 | * igb_io_error_detected - called when PCI error is detected |
7357 | * @pdev: Pointer to PCI device | 7347 | * @pdev: Pointer to PCI device |
7358 | * @state: The current pci connection state | 7348 | * @state: The current pci connection state |
7359 | * | 7349 | * |
7360 | * This function is called after a PCI bus error affecting | 7350 | * This function is called after a PCI bus error affecting |
7361 | * this device has been detected. | 7351 | * this device has been detected. |
7362 | */ | 7352 | **/ |
7363 | static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, | 7353 | static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, |
7364 | pci_channel_state_t state) | 7354 | pci_channel_state_t state) |
7365 | { | 7355 | { |
@@ -7380,12 +7370,12 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, | |||
7380 | } | 7370 | } |
7381 | 7371 | ||
7382 | /** | 7372 | /** |
7383 | * igb_io_slot_reset - called after the pci bus has been reset. | 7373 | * igb_io_slot_reset - called after the pci bus has been reset. |
7384 | * @pdev: Pointer to PCI device | 7374 | * @pdev: Pointer to PCI device |
7385 | * | 7375 | * |
7386 | * Restart the card from scratch, as if from a cold-boot. Implementation | 7376 | * Restart the card from scratch, as if from a cold-boot. Implementation |
7387 | * resembles the first-half of the igb_resume routine. | 7377 | * resembles the first-half of the igb_resume routine. |
7388 | */ | 7378 | **/ |
7389 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) | 7379 | static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) |
7390 | { | 7380 | { |
7391 | struct net_device *netdev = pci_get_drvdata(pdev); | 7381 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -7413,8 +7403,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) | |||
7413 | 7403 | ||
7414 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | 7404 | err = pci_cleanup_aer_uncorrect_error_status(pdev); |
7415 | if (err) { | 7405 | if (err) { |
7416 | dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " | 7406 | dev_err(&pdev->dev, |
7417 | "failed 0x%0x\n", err); | 7407 | "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", |
7408 | err); | ||
7418 | /* non-fatal, continue */ | 7409 | /* non-fatal, continue */ |
7419 | } | 7410 | } |
7420 | 7411 | ||
@@ -7422,12 +7413,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) | |||
7422 | } | 7413 | } |
7423 | 7414 | ||
7424 | /** | 7415 | /** |
7425 | * igb_io_resume - called when traffic can start flowing again. | 7416 | * igb_io_resume - called when traffic can start flowing again. |
7426 | * @pdev: Pointer to PCI device | 7417 | * @pdev: Pointer to PCI device |
7427 | * | 7418 | * |
7428 | * This callback is called when the error recovery driver tells us that | 7419 | * This callback is called when the error recovery driver tells us that |
7429 | * its OK to resume normal operation. Implementation resembles the | 7420 | * its OK to resume normal operation. Implementation resembles the |
7430 | * second-half of the igb_resume routine. | 7421 | * second-half of the igb_resume routine. |
7431 | */ | 7422 | */ |
7432 | static void igb_io_resume(struct pci_dev *pdev) | 7423 | static void igb_io_resume(struct pci_dev *pdev) |
7433 | { | 7424 | { |
@@ -7444,12 +7435,13 @@ static void igb_io_resume(struct pci_dev *pdev) | |||
7444 | netif_device_attach(netdev); | 7435 | netif_device_attach(netdev); |
7445 | 7436 | ||
7446 | /* let the f/w know that the h/w is now under the control of the | 7437 | /* let the f/w know that the h/w is now under the control of the |
7447 | * driver. */ | 7438 | * driver. |
7439 | */ | ||
7448 | igb_get_hw_control(adapter); | 7440 | igb_get_hw_control(adapter); |
7449 | } | 7441 | } |
7450 | 7442 | ||
7451 | static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, | 7443 | static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, |
7452 | u8 qsel) | 7444 | u8 qsel) |
7453 | { | 7445 | { |
7454 | u32 rar_low, rar_high; | 7446 | u32 rar_low, rar_high; |
7455 | struct e1000_hw *hw = &adapter->hw; | 7447 | struct e1000_hw *hw = &adapter->hw; |
@@ -7458,7 +7450,7 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, | |||
7458 | * from network order (big endian) to little endian | 7450 | * from network order (big endian) to little endian |
7459 | */ | 7451 | */ |
7460 | rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | | 7452 | rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | |
7461 | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); | 7453 | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); |
7462 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); | 7454 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); |
7463 | 7455 | ||
7464 | /* Indicate to hardware the Address is Valid. */ | 7456 | /* Indicate to hardware the Address is Valid. */ |
@@ -7476,11 +7468,12 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, | |||
7476 | } | 7468 | } |
7477 | 7469 | ||
7478 | static int igb_set_vf_mac(struct igb_adapter *adapter, | 7470 | static int igb_set_vf_mac(struct igb_adapter *adapter, |
7479 | int vf, unsigned char *mac_addr) | 7471 | int vf, unsigned char *mac_addr) |
7480 | { | 7472 | { |
7481 | struct e1000_hw *hw = &adapter->hw; | 7473 | struct e1000_hw *hw = &adapter->hw; |
7482 | /* VF MAC addresses start at end of receive addresses and moves | 7474 | /* VF MAC addresses start at end of receive addresses and moves |
7483 | * torwards the first, as a result a collision should not be possible */ | 7475 | * towards the first, as a result a collision should not be possible |
7476 | */ | ||
7484 | int rar_entry = hw->mac.rar_entry_count - (vf + 1); | 7477 | int rar_entry = hw->mac.rar_entry_count - (vf + 1); |
7485 | 7478 | ||
7486 | memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); | 7479 | memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); |
@@ -7497,13 +7490,13 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) | |||
7497 | return -EINVAL; | 7490 | return -EINVAL; |
7498 | adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; | 7491 | adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; |
7499 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); | 7492 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); |
7500 | dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" | 7493 | dev_info(&adapter->pdev->dev, |
7501 | " change effective."); | 7494 | "Reload the VF driver to make this change effective."); |
7502 | if (test_bit(__IGB_DOWN, &adapter->state)) { | 7495 | if (test_bit(__IGB_DOWN, &adapter->state)) { |
7503 | dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," | 7496 | dev_warn(&adapter->pdev->dev, |
7504 | " but the PF device is not up.\n"); | 7497 | "The VF MAC address has been set, but the PF device is not up.\n"); |
7505 | dev_warn(&adapter->pdev->dev, "Bring the PF device up before" | 7498 | dev_warn(&adapter->pdev->dev, |
7506 | " attempting to use the VF device.\n"); | 7499 | "Bring the PF device up before attempting to use the VF device.\n"); |
7507 | } | 7500 | } |
7508 | return igb_set_vf_mac(adapter, vf, mac); | 7501 | return igb_set_vf_mac(adapter, vf, mac); |
7509 | } | 7502 | } |
@@ -7530,19 +7523,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, | |||
7530 | /* Calculate the rate factor values to set */ | 7523 | /* Calculate the rate factor values to set */ |
7531 | rf_int = link_speed / tx_rate; | 7524 | rf_int = link_speed / tx_rate; |
7532 | rf_dec = (link_speed - (rf_int * tx_rate)); | 7525 | rf_dec = (link_speed - (rf_int * tx_rate)); |
7533 | rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; | 7526 | rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / |
7527 | tx_rate; | ||
7534 | 7528 | ||
7535 | bcnrc_val = E1000_RTTBCNRC_RS_ENA; | 7529 | bcnrc_val = E1000_RTTBCNRC_RS_ENA; |
7536 | bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & | 7530 | bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & |
7537 | E1000_RTTBCNRC_RF_INT_MASK); | 7531 | E1000_RTTBCNRC_RF_INT_MASK); |
7538 | bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); | 7532 | bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); |
7539 | } else { | 7533 | } else { |
7540 | bcnrc_val = 0; | 7534 | bcnrc_val = 0; |
7541 | } | 7535 | } |
7542 | 7536 | ||
7543 | wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ | 7537 | wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ |
7544 | /* | 7538 | /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM |
7545 | * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM | ||
7546 | * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. | 7539 | * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. |
7547 | */ | 7540 | */ |
7548 | wr32(E1000_RTTBCNRM, 0x14); | 7541 | wr32(E1000_RTTBCNRM, 0x14); |
@@ -7564,8 +7557,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter) | |||
7564 | reset_rate = true; | 7557 | reset_rate = true; |
7565 | adapter->vf_rate_link_speed = 0; | 7558 | adapter->vf_rate_link_speed = 0; |
7566 | dev_info(&adapter->pdev->dev, | 7559 | dev_info(&adapter->pdev->dev, |
7567 | "Link speed has been changed. VF Transmit " | 7560 | "Link speed has been changed. VF Transmit rate is disabled\n"); |
7568 | "rate is disabled\n"); | ||
7569 | } | 7561 | } |
7570 | 7562 | ||
7571 | for (i = 0; i < adapter->vfs_allocated_count; i++) { | 7563 | for (i = 0; i < adapter->vfs_allocated_count; i++) { |
@@ -7573,8 +7565,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter) | |||
7573 | adapter->vf_data[i].tx_rate = 0; | 7565 | adapter->vf_data[i].tx_rate = 0; |
7574 | 7566 | ||
7575 | igb_set_vf_rate_limit(&adapter->hw, i, | 7567 | igb_set_vf_rate_limit(&adapter->hw, i, |
7576 | adapter->vf_data[i].tx_rate, | 7568 | adapter->vf_data[i].tx_rate, |
7577 | actual_link_speed); | 7569 | actual_link_speed); |
7578 | } | 7570 | } |
7579 | } | 7571 | } |
7580 | 7572 | ||
@@ -7645,7 +7637,7 @@ static void igb_vmm_control(struct igb_adapter *adapter) | |||
7645 | igb_vmdq_set_loopback_pf(hw, true); | 7637 | igb_vmdq_set_loopback_pf(hw, true); |
7646 | igb_vmdq_set_replication_pf(hw, true); | 7638 | igb_vmdq_set_replication_pf(hw, true); |
7647 | igb_vmdq_set_anti_spoofing_pf(hw, true, | 7639 | igb_vmdq_set_anti_spoofing_pf(hw, true, |
7648 | adapter->vfs_allocated_count); | 7640 | adapter->vfs_allocated_count); |
7649 | } else { | 7641 | } else { |
7650 | igb_vmdq_set_loopback_pf(hw, false); | 7642 | igb_vmdq_set_loopback_pf(hw, false); |
7651 | igb_vmdq_set_replication_pf(hw, false); | 7643 | igb_vmdq_set_replication_pf(hw, false); |
@@ -7665,8 +7657,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7665 | /* force threshold to 0. */ | 7657 | /* force threshold to 0. */ |
7666 | wr32(E1000_DMCTXTH, 0); | 7658 | wr32(E1000_DMCTXTH, 0); |
7667 | 7659 | ||
7668 | /* | 7660 | /* DMA Coalescing high water mark needs to be greater |
7669 | * DMA Coalescing high water mark needs to be greater | ||
7670 | * than the Rx threshold. Set hwm to PBA - max frame | 7661 | * than the Rx threshold. Set hwm to PBA - max frame |
7671 | * size in 16B units, capping it at PBA - 6KB. | 7662 | * size in 16B units, capping it at PBA - 6KB. |
7672 | */ | 7663 | */ |
@@ -7679,8 +7670,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7679 | & E1000_FCRTC_RTH_COAL_MASK); | 7670 | & E1000_FCRTC_RTH_COAL_MASK); |
7680 | wr32(E1000_FCRTC, reg); | 7671 | wr32(E1000_FCRTC, reg); |
7681 | 7672 | ||
7682 | /* | 7673 | /* Set the DMA Coalescing Rx threshold to PBA - 2 * max |
7683 | * Set the DMA Coalescing Rx threshold to PBA - 2 * max | ||
7684 | * frame size, capping it at PBA - 10KB. | 7674 | * frame size, capping it at PBA - 10KB. |
7685 | */ | 7675 | */ |
7686 | dmac_thr = pba - adapter->max_frame_size / 512; | 7676 | dmac_thr = pba - adapter->max_frame_size / 512; |
@@ -7701,8 +7691,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7701 | reg &= ~E1000_DMACR_DC_BMC2OSW_EN; | 7691 | reg &= ~E1000_DMACR_DC_BMC2OSW_EN; |
7702 | wr32(E1000_DMACR, reg); | 7692 | wr32(E1000_DMACR, reg); |
7703 | 7693 | ||
7704 | /* | 7694 | /* no lower threshold to disable |
7705 | * no lower threshold to disable | ||
7706 | * coalescing(smart fifb)-UTRESH=0 | 7695 | * coalescing(smart fifb)-UTRESH=0 |
7707 | */ | 7696 | */ |
7708 | wr32(E1000_DMCRTRH, 0); | 7697 | wr32(E1000_DMCRTRH, 0); |
@@ -7711,15 +7700,13 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7711 | 7700 | ||
7712 | wr32(E1000_DMCTLX, reg); | 7701 | wr32(E1000_DMCTLX, reg); |
7713 | 7702 | ||
7714 | /* | 7703 | /* free space in tx packet buffer to wake from |
7715 | * free space in tx packet buffer to wake from | ||
7716 | * DMA coal | 7704 | * DMA coal |
7717 | */ | 7705 | */ |
7718 | wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - | 7706 | wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - |
7719 | (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); | 7707 | (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); |
7720 | 7708 | ||
7721 | /* | 7709 | /* make low power state decision controlled |
7722 | * make low power state decision controlled | ||
7723 | * by DMA coal | 7710 | * by DMA coal |
7724 | */ | 7711 | */ |
7725 | reg = rd32(E1000_PCIEMISC); | 7712 | reg = rd32(E1000_PCIEMISC); |
@@ -7733,7 +7720,8 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7733 | } | 7720 | } |
7734 | } | 7721 | } |
7735 | 7722 | ||
7736 | /* igb_read_i2c_byte - Reads 8 bit word over I2C | 7723 | /** |
7724 | * igb_read_i2c_byte - Reads 8 bit word over I2C | ||
7737 | * @hw: pointer to hardware structure | 7725 | * @hw: pointer to hardware structure |
7738 | * @byte_offset: byte offset to read | 7726 | * @byte_offset: byte offset to read |
7739 | * @dev_addr: device address | 7727 | * @dev_addr: device address |
@@ -7741,9 +7729,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) | |||
7741 | * | 7729 | * |
7742 | * Performs byte read operation over I2C interface at | 7730 | * Performs byte read operation over I2C interface at |
7743 | * a specified device address. | 7731 | * a specified device address. |
7744 | */ | 7732 | **/ |
7745 | s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, | 7733 | s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, |
7746 | u8 dev_addr, u8 *data) | 7734 | u8 dev_addr, u8 *data) |
7747 | { | 7735 | { |
7748 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); | 7736 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); |
7749 | struct i2c_client *this_client = adapter->i2c_client; | 7737 | struct i2c_client *this_client = adapter->i2c_client; |
@@ -7770,7 +7758,8 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, | |||
7770 | } | 7758 | } |
7771 | } | 7759 | } |
7772 | 7760 | ||
7773 | /* igb_write_i2c_byte - Writes 8 bit word over I2C | 7761 | /** |
7762 | * igb_write_i2c_byte - Writes 8 bit word over I2C | ||
7774 | * @hw: pointer to hardware structure | 7763 | * @hw: pointer to hardware structure |
7775 | * @byte_offset: byte offset to write | 7764 | * @byte_offset: byte offset to write |
7776 | * @dev_addr: device address | 7765 | * @dev_addr: device address |
@@ -7778,9 +7767,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, | |||
7778 | * | 7767 | * |
7779 | * Performs byte write operation over I2C interface at | 7768 | * Performs byte write operation over I2C interface at |
7780 | * a specified device address. | 7769 | * a specified device address. |
7781 | */ | 7770 | **/ |
7782 | s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, | 7771 | s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, |
7783 | u8 dev_addr, u8 data) | 7772 | u8 dev_addr, u8 data) |
7784 | { | 7773 | { |
7785 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); | 7774 | struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); |
7786 | struct i2c_client *this_client = adapter->i2c_client; | 7775 | struct i2c_client *this_client = adapter->i2c_client; |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0a237507ee85..9f7da269edc3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 |
2 | * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 | ||
3 | * | 2 | * |
4 | * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> | 3 | * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> |
5 | * | 4 | * |
@@ -27,8 +26,7 @@ | |||
27 | #define INCVALUE_MASK 0x7fffffff | 26 | #define INCVALUE_MASK 0x7fffffff |
28 | #define ISGN 0x80000000 | 27 | #define ISGN 0x80000000 |
29 | 28 | ||
30 | /* | 29 | /* The 82580 timesync updates the system timer every 8ns by 8ns, |
31 | * The 82580 timesync updates the system timer every 8ns by 8ns, | ||
32 | * and this update value cannot be reprogrammed. | 30 | * and this update value cannot be reprogrammed. |
33 | * | 31 | * |
34 | * Neither the 82576 nor the 82580 offer registers wide enough to hold | 32 | * Neither the 82576 nor the 82580 offer registers wide enough to hold |
@@ -77,10 +75,7 @@ | |||
77 | #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) | 75 | #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) |
78 | #define IGB_NBITS_82580 40 | 76 | #define IGB_NBITS_82580 40 |
79 | 77 | ||
80 | /* | 78 | /* SYSTIM read access for the 82576 */ |
81 | * SYSTIM read access for the 82576 | ||
82 | */ | ||
83 | |||
84 | static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) | 79 | static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) |
85 | { | 80 | { |
86 | struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); | 81 | struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); |
@@ -97,10 +92,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) | |||
97 | return val; | 92 | return val; |
98 | } | 93 | } |
99 | 94 | ||
100 | /* | 95 | /* SYSTIM read access for the 82580 */ |
101 | * SYSTIM read access for the 82580 | ||
102 | */ | ||
103 | |||
104 | static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) | 96 | static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) |
105 | { | 97 | { |
106 | struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); | 98 | struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); |
@@ -108,8 +100,7 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) | |||
108 | u64 val; | 100 | u64 val; |
109 | u32 lo, hi, jk; | 101 | u32 lo, hi, jk; |
110 | 102 | ||
111 | /* | 103 | /* The timestamp latches on lowest register read. For the 82580 |
112 | * The timestamp latches on lowest register read. For the 82580 | ||
113 | * the lowest register is SYSTIMR instead of SYSTIML. However we only | 104 | * the lowest register is SYSTIMR instead of SYSTIML. However we only |
114 | * need to provide nanosecond resolution, so we just ignore it. | 105 | * need to provide nanosecond resolution, so we just ignore it. |
115 | */ | 106 | */ |
@@ -123,17 +114,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) | |||
123 | return val; | 114 | return val; |
124 | } | 115 | } |
125 | 116 | ||
126 | /* | 117 | /* SYSTIM read access for I210/I211 */ |
127 | * SYSTIM read access for I210/I211 | ||
128 | */ | ||
129 | |||
130 | static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) | 118 | static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) |
131 | { | 119 | { |
132 | struct e1000_hw *hw = &adapter->hw; | 120 | struct e1000_hw *hw = &adapter->hw; |
133 | u32 sec, nsec, jk; | 121 | u32 sec, nsec, jk; |
134 | 122 | ||
135 | /* | 123 | /* The timestamp latches on lowest register read. For I210/I211, the |
136 | * The timestamp latches on lowest register read. For I210/I211, the | ||
137 | * lowest register is SYSTIMR. Since we only need to provide nanosecond | 124 | * lowest register is SYSTIMR. Since we only need to provide nanosecond |
138 | * resolution, we can ignore it. | 125 | * resolution, we can ignore it. |
139 | */ | 126 | */ |
@@ -150,8 +137,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter, | |||
150 | { | 137 | { |
151 | struct e1000_hw *hw = &adapter->hw; | 138 | struct e1000_hw *hw = &adapter->hw; |
152 | 139 | ||
153 | /* | 140 | /* Writing the SYSTIMR register is not necessary as it only provides |
154 | * Writing the SYSTIMR register is not necessary as it only provides | ||
155 | * sub-nanosecond resolution. | 141 | * sub-nanosecond resolution. |
156 | */ | 142 | */ |
157 | wr32(E1000_SYSTIML, ts->tv_nsec); | 143 | wr32(E1000_SYSTIML, ts->tv_nsec); |
@@ -207,10 +193,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, | |||
207 | } | 193 | } |
208 | } | 194 | } |
209 | 195 | ||
210 | /* | 196 | /* PTP clock operations */ |
211 | * PTP clock operations | ||
212 | */ | ||
213 | |||
214 | static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) | 197 | static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) |
215 | { | 198 | { |
216 | struct igb_adapter *igb = container_of(ptp, struct igb_adapter, | 199 | struct igb_adapter *igb = container_of(ptp, struct igb_adapter, |
@@ -387,7 +370,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp, | |||
387 | * | 370 | * |
388 | * This work function polls the TSYNCTXCTL valid bit to determine when a | 371 | * This work function polls the TSYNCTXCTL valid bit to determine when a |
389 | * timestamp has been taken for the current stored skb. | 372 | * timestamp has been taken for the current stored skb. |
390 | */ | 373 | **/ |
391 | void igb_ptp_tx_work(struct work_struct *work) | 374 | void igb_ptp_tx_work(struct work_struct *work) |
392 | { | 375 | { |
393 | struct igb_adapter *adapter = container_of(work, struct igb_adapter, | 376 | struct igb_adapter *adapter = container_of(work, struct igb_adapter, |
@@ -437,7 +420,7 @@ static void igb_ptp_overflow_check(struct work_struct *work) | |||
437 | * dropped an Rx packet that was timestamped when the ring is full. The | 420 | * dropped an Rx packet that was timestamped when the ring is full. The |
438 | * particular error is rare but leaves the device in a state unable to timestamp | 421 | * particular error is rare but leaves the device in a state unable to timestamp |
439 | * any future packets. | 422 | * any future packets. |
440 | */ | 423 | **/ |
441 | void igb_ptp_rx_hang(struct igb_adapter *adapter) | 424 | void igb_ptp_rx_hang(struct igb_adapter *adapter) |
442 | { | 425 | { |
443 | struct e1000_hw *hw = &adapter->hw; | 426 | struct e1000_hw *hw = &adapter->hw; |
@@ -481,7 +464,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) | |||
481 | * If we were asked to do hardware stamping and such a time stamp is | 464 | * If we were asked to do hardware stamping and such a time stamp is |
482 | * available, then it must have been for this skb here because we only | 465 | * available, then it must have been for this skb here because we only |
483 | * allow only one such packet into the queue. | 466 | * allow only one such packet into the queue. |
484 | */ | 467 | **/ |
485 | void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) | 468 | void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) |
486 | { | 469 | { |
487 | struct e1000_hw *hw = &adapter->hw; | 470 | struct e1000_hw *hw = &adapter->hw; |
@@ -506,15 +489,14 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) | |||
506 | * This function is meant to retrieve a timestamp from the first buffer of an | 489 | * This function is meant to retrieve a timestamp from the first buffer of an |
507 | * incoming frame. The value is stored in little endian format starting on | 490 | * incoming frame. The value is stored in little endian format starting on |
508 | * byte 8. | 491 | * byte 8. |
509 | */ | 492 | **/ |
510 | void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, | 493 | void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, |
511 | unsigned char *va, | 494 | unsigned char *va, |
512 | struct sk_buff *skb) | 495 | struct sk_buff *skb) |
513 | { | 496 | { |
514 | __le64 *regval = (__le64 *)va; | 497 | __le64 *regval = (__le64 *)va; |
515 | 498 | ||
516 | /* | 499 | /* The timestamp is recorded in little endian format. |
517 | * The timestamp is recorded in little endian format. | ||
518 | * DWORD: 0 1 2 3 | 500 | * DWORD: 0 1 2 3 |
519 | * Field: Reserved Reserved SYSTIML SYSTIMH | 501 | * Field: Reserved Reserved SYSTIML SYSTIMH |
520 | */ | 502 | */ |
@@ -529,7 +511,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, | |||
529 | * | 511 | * |
530 | * This function is meant to retrieve a timestamp from the internal registers | 512 | * This function is meant to retrieve a timestamp from the internal registers |
531 | * of the adapter and store it in the skb. | 513 | * of the adapter and store it in the skb. |
532 | */ | 514 | **/ |
533 | void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | 515 | void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, |
534 | struct sk_buff *skb) | 516 | struct sk_buff *skb) |
535 | { | 517 | { |
@@ -537,8 +519,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | |||
537 | struct e1000_hw *hw = &adapter->hw; | 519 | struct e1000_hw *hw = &adapter->hw; |
538 | u64 regval; | 520 | u64 regval; |
539 | 521 | ||
540 | /* | 522 | /* If this bit is set, then the RX registers contain the time stamp. No |
541 | * If this bit is set, then the RX registers contain the time stamp. No | ||
542 | * other packet will be time stamped until we read these registers, so | 523 | * other packet will be time stamped until we read these registers, so |
543 | * read the registers to make them available again. Because only one | 524 | * read the registers to make them available again. Because only one |
544 | * packet can be time stamped at a time, we know that the register | 525 | * packet can be time stamped at a time, we know that the register |
@@ -574,7 +555,6 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | |||
574 | * type has to be specified. Matching the kind of event packet is | 555 | * type has to be specified. Matching the kind of event packet is |
575 | * not supported, with the exception of "all V2 events regardless of | 556 | * not supported, with the exception of "all V2 events regardless of |
576 | * level 2 or 4". | 557 | * level 2 or 4". |
577 | * | ||
578 | **/ | 558 | **/ |
579 | int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, | 559 | int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, |
580 | struct ifreq *ifr, int cmd) | 560 | struct ifreq *ifr, int cmd) |
@@ -655,10 +635,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, | |||
655 | return 0; | 635 | return 0; |
656 | } | 636 | } |
657 | 637 | ||
658 | /* | 638 | /* Per-packet timestamping only works if all packets are |
659 | * Per-packet timestamping only works if all packets are | ||
660 | * timestamped, so enable timestamping in all packets as | 639 | * timestamped, so enable timestamping in all packets as |
661 | * long as one rx filter was configured. | 640 | * long as one Rx filter was configured. |
662 | */ | 641 | */ |
663 | if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { | 642 | if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { |
664 | tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; | 643 | tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; |