aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/e1000_82575.c306
-rw-r--r--drivers/net/igb/e1000_82575.h26
-rw-r--r--drivers/net/igb/e1000_defines.h33
-rw-r--r--drivers/net/igb/e1000_hw.h8
-rw-r--r--drivers/net/igb/e1000_mac.c100
-rw-r--r--drivers/net/igb/e1000_mbx.c82
-rw-r--r--drivers/net/igb/e1000_mbx.h10
-rw-r--r--drivers/net/igb/e1000_nvm.c36
-rw-r--r--drivers/net/igb/e1000_phy.c207
-rw-r--r--drivers/net/igb/e1000_phy.h4
-rw-r--r--drivers/net/igb/e1000_regs.h75
-rw-r--r--drivers/net/igb/igb.h143
-rw-r--r--drivers/net/igb/igb_ethtool.c690
-rw-r--r--drivers/net/igb/igb_main.c3273
14 files changed, 2733 insertions, 2260 deletions
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f8f5772557c..5d345e3036a 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -81,6 +81,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
81 break; 81 break;
82 case E1000_DEV_ID_82576: 82 case E1000_DEV_ID_82576:
83 case E1000_DEV_ID_82576_NS: 83 case E1000_DEV_ID_82576_NS:
84 case E1000_DEV_ID_82576_NS_SERDES:
84 case E1000_DEV_ID_82576_FIBER: 85 case E1000_DEV_ID_82576_FIBER:
85 case E1000_DEV_ID_82576_SERDES: 86 case E1000_DEV_ID_82576_SERDES:
86 case E1000_DEV_ID_82576_QUAD_COPPER: 87 case E1000_DEV_ID_82576_QUAD_COPPER:
@@ -240,9 +241,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
240 **/ 241 **/
241static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 242static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
242{ 243{
243 u16 mask; 244 u16 mask = E1000_SWFW_PHY0_SM;
244 245
245 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 246 if (hw->bus.func == E1000_FUNC_1)
247 mask = E1000_SWFW_PHY1_SM;
246 248
247 return igb_acquire_swfw_sync_82575(hw, mask); 249 return igb_acquire_swfw_sync_82575(hw, mask);
248} 250}
@@ -256,9 +258,11 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
256 **/ 258 **/
257static void igb_release_phy_82575(struct e1000_hw *hw) 259static void igb_release_phy_82575(struct e1000_hw *hw)
258{ 260{
259 u16 mask; 261 u16 mask = E1000_SWFW_PHY0_SM;
262
263 if (hw->bus.func == E1000_FUNC_1)
264 mask = E1000_SWFW_PHY1_SM;
260 265
261 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
262 igb_release_swfw_sync_82575(hw, mask); 266 igb_release_swfw_sync_82575(hw, mask);
263} 267}
264 268
@@ -274,45 +278,23 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
274static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 278static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
275 u16 *data) 279 u16 *data)
276{ 280{
277 struct e1000_phy_info *phy = &hw->phy; 281 s32 ret_val = -E1000_ERR_PARAM;
278 u32 i, i2ccmd = 0;
279 282
280 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 283 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
281 hw_dbg("PHY Address %u is out of range\n", offset); 284 hw_dbg("PHY Address %u is out of range\n", offset);
282 return -E1000_ERR_PARAM; 285 goto out;
283 } 286 }
284 287
285 /* 288 ret_val = hw->phy.ops.acquire(hw);
286 * Set up Op-code, Phy Address, and register address in the I2CCMD 289 if (ret_val)
287 * register. The MAC will take care of interfacing with the 290 goto out;
288 * PHY to retrieve the desired data.
289 */
290 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
291 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
292 (E1000_I2CCMD_OPCODE_READ));
293
294 wr32(E1000_I2CCMD, i2ccmd);
295 291
296 /* Poll the ready bit to see if the I2C read completed */ 292 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
297 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
298 udelay(50);
299 i2ccmd = rd32(E1000_I2CCMD);
300 if (i2ccmd & E1000_I2CCMD_READY)
301 break;
302 }
303 if (!(i2ccmd & E1000_I2CCMD_READY)) {
304 hw_dbg("I2CCMD Read did not complete\n");
305 return -E1000_ERR_PHY;
306 }
307 if (i2ccmd & E1000_I2CCMD_ERROR) {
308 hw_dbg("I2CCMD Error bit set\n");
309 return -E1000_ERR_PHY;
310 }
311 293
312 /* Need to byte-swap the 16-bit value. */ 294 hw->phy.ops.release(hw);
313 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
314 295
315 return 0; 296out:
297 return ret_val;
316} 298}
317 299
318/** 300/**
@@ -327,47 +309,24 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
327static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 309static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
328 u16 data) 310 u16 data)
329{ 311{
330 struct e1000_phy_info *phy = &hw->phy; 312 s32 ret_val = -E1000_ERR_PARAM;
331 u32 i, i2ccmd = 0; 313
332 u16 phy_data_swapped;
333 314
334 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 315 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
335 hw_dbg("PHY Address %d is out of range\n", offset); 316 hw_dbg("PHY Address %d is out of range\n", offset);
336 return -E1000_ERR_PARAM; 317 goto out;
337 } 318 }
338 319
339 /* Swap the data bytes for the I2C interface */ 320 ret_val = hw->phy.ops.acquire(hw);
340 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); 321 if (ret_val)
322 goto out;
341 323
342 /* 324 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
343 * Set up Op-code, Phy Address, and register address in the I2CCMD
344 * register. The MAC will take care of interfacing with the
345 * PHY to retrieve the desired data.
346 */
347 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
348 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
349 E1000_I2CCMD_OPCODE_WRITE |
350 phy_data_swapped);
351
352 wr32(E1000_I2CCMD, i2ccmd);
353
354 /* Poll the ready bit to see if the I2C read completed */
355 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
356 udelay(50);
357 i2ccmd = rd32(E1000_I2CCMD);
358 if (i2ccmd & E1000_I2CCMD_READY)
359 break;
360 }
361 if (!(i2ccmd & E1000_I2CCMD_READY)) {
362 hw_dbg("I2CCMD Write did not complete\n");
363 return -E1000_ERR_PHY;
364 }
365 if (i2ccmd & E1000_I2CCMD_ERROR) {
366 hw_dbg("I2CCMD Error bit set\n");
367 return -E1000_ERR_PHY;
368 }
369 325
370 return 0; 326 hw->phy.ops.release(hw);
327
328out:
329 return ret_val;
371} 330}
372 331
373/** 332/**
@@ -706,9 +665,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
706 s32 ret_val; 665 s32 ret_val;
707 u16 speed, duplex; 666 u16 speed, duplex;
708 667
709 /* SGMII link check is done through the PCS register. */ 668 if (hw->phy.media_type != e1000_media_type_copper) {
710 if ((hw->phy.media_type != e1000_media_type_copper) ||
711 (igb_sgmii_active_82575(hw))) {
712 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 669 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
713 &duplex); 670 &duplex);
714 /* 671 /*
@@ -723,6 +680,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
723 680
724 return ret_val; 681 return ret_val;
725} 682}
683
726/** 684/**
727 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 685 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
728 * @hw: pointer to the HW structure 686 * @hw: pointer to the HW structure
@@ -788,13 +746,23 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
788void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 746void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
789{ 747{
790 u32 reg; 748 u32 reg;
749 u16 eeprom_data = 0;
791 750
792 if (hw->phy.media_type != e1000_media_type_internal_serdes || 751 if (hw->phy.media_type != e1000_media_type_internal_serdes ||
793 igb_sgmii_active_82575(hw)) 752 igb_sgmii_active_82575(hw))
794 return; 753 return;
795 754
796 /* if the management interface is not enabled, then power down */ 755 if (hw->bus.func == E1000_FUNC_0)
797 if (!igb_enable_mng_pass_thru(hw)) { 756 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
757 else if (hw->bus.func == E1000_FUNC_1)
758 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
759
760 /*
761 * If APM is not enabled in the EEPROM and management interface is
762 * not enabled, then power down.
763 */
764 if (!(eeprom_data & E1000_NVM_APME_82575) &&
765 !igb_enable_mng_pass_thru(hw)) {
798 /* Disable PCS to turn off link */ 766 /* Disable PCS to turn off link */
799 reg = rd32(E1000_PCS_CFG0); 767 reg = rd32(E1000_PCS_CFG0);
800 reg &= ~E1000_PCS_CFG_PCS_EN; 768 reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -908,6 +876,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
908 for (i = 0; i < mac->mta_reg_count; i++) 876 for (i = 0; i < mac->mta_reg_count; i++)
909 array_wr32(E1000_MTA, i, 0); 877 array_wr32(E1000_MTA, i, 0);
910 878
879 /* Zero out the Unicast HASH table */
880 hw_dbg("Zeroing the UTA\n");
881 for (i = 0; i < mac->uta_reg_count; i++)
882 array_wr32(E1000_UTA, i, 0);
883
911 /* Setup link and flow control */ 884 /* Setup link and flow control */
912 ret_val = igb_setup_link(hw); 885 ret_val = igb_setup_link(hw);
913 886
@@ -934,7 +907,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
934{ 907{
935 u32 ctrl; 908 u32 ctrl;
936 s32 ret_val; 909 s32 ret_val;
937 bool link;
938 910
939 ctrl = rd32(E1000_CTRL); 911 ctrl = rd32(E1000_CTRL);
940 ctrl |= E1000_CTRL_SLU; 912 ctrl |= E1000_CTRL_SLU;
@@ -967,53 +939,19 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
967 if (ret_val) 939 if (ret_val)
968 goto out; 940 goto out;
969 941
970 if (hw->mac.autoneg) { 942 ret_val = igb_setup_copper_link(hw);
971 /*
972 * Setup autoneg and flow control advertisement
973 * and perform autonegotiation.
974 */
975 ret_val = igb_copper_link_autoneg(hw);
976 if (ret_val)
977 goto out;
978 } else {
979 /*
980 * PHY will be set to 10H, 10F, 100H or 100F
981 * depending on user settings.
982 */
983 hw_dbg("Forcing Speed and Duplex\n");
984 ret_val = hw->phy.ops.force_speed_duplex(hw);
985 if (ret_val) {
986 hw_dbg("Error Forcing Speed and Duplex\n");
987 goto out;
988 }
989 }
990
991 /*
992 * Check link status. Wait up to 100 microseconds for link to become
993 * valid.
994 */
995 ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
996 if (ret_val)
997 goto out;
998
999 if (link) {
1000 hw_dbg("Valid link established!!!\n");
1001 /* Config the MAC and PHY after link is up */
1002 igb_config_collision_dist(hw);
1003 ret_val = igb_config_fc_after_link_up(hw);
1004 } else {
1005 hw_dbg("Unable to establish link!!!\n");
1006 }
1007
1008out: 943out:
1009 return ret_val; 944 return ret_val;
1010} 945}
1011 946
1012/** 947/**
1013 * igb_setup_serdes_link_82575 - Setup link for fiber/serdes 948 * igb_setup_serdes_link_82575 - Setup link for serdes
1014 * @hw: pointer to the HW structure 949 * @hw: pointer to the HW structure
1015 * 950 *
1016 * Configures speed and duplex for fiber and serdes links. 951 * Configure the physical coding sub-layer (PCS) link. The PCS link is
952 * used on copper connections where the serialized gigabit media independent
953 * interface (sgmii), or serdes fiber is being used. Configures the link
954 * for auto-negotiation or forces speed/duplex.
1017 **/ 955 **/
1018static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 956static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1019{ 957{
@@ -1086,18 +1024,27 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1086 */ 1024 */
1087 if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) { 1025 if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) {
1088 /* Set PCS register for autoneg */ 1026 /* Set PCS register for autoneg */
1089 reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1027 reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1090 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1028 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full dplx */
1091 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1029 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1092 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1030 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1093 hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); 1031 hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1094 } else { 1032 } else {
1095 /* Set PCS register for forced speed */ 1033 /* Check for duplex first */
1096 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1034 if (hw->mac.forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
1097 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1035 reg |= E1000_PCS_LCTL_FDV_FULL;
1098 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1036
1099 E1000_PCS_LCTL_FSD | /* Force Speed */ 1037 /* No need to check for 1000/full since the spec states that
1100 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1038 * it requires autoneg to be enabled */
1039 /* Now set speed */
1040 if (hw->mac.forced_speed_duplex & E1000_ALL_100_SPEED)
1041 reg |= E1000_PCS_LCTL_FSV_100;
1042
1043 /* Force speed and force link */
1044 reg |= E1000_PCS_LCTL_FSD |
1045 E1000_PCS_LCTL_FORCE_LINK |
1046 E1000_PCS_LCTL_FLV_LINK_UP;
1047
1101 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1048 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1102 } 1049 }
1103 1050
@@ -1167,9 +1114,18 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1167{ 1114{
1168 s32 ret_val = 0; 1115 s32 ret_val = 0;
1169 1116
1170 if (igb_check_alt_mac_addr(hw)) 1117 /*
1171 ret_val = igb_read_mac_addr(hw); 1118 * If there's an alternate MAC address place it in RAR0
1119 * so that it will override the Si installed default perm
1120 * address.
1121 */
1122 ret_val = igb_check_alt_mac_addr(hw);
1123 if (ret_val)
1124 goto out;
1125
1126 ret_val = igb_read_mac_addr(hw);
1172 1127
1128out:
1173 return ret_val; 1129 return ret_val;
1174} 1130}
1175 1131
@@ -1181,61 +1137,59 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1181 **/ 1137 **/
1182static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1138static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1183{ 1139{
1184 u32 temp;
1185
1186 igb_clear_hw_cntrs_base(hw); 1140 igb_clear_hw_cntrs_base(hw);
1187 1141
1188 temp = rd32(E1000_PRC64); 1142 rd32(E1000_PRC64);
1189 temp = rd32(E1000_PRC127); 1143 rd32(E1000_PRC127);
1190 temp = rd32(E1000_PRC255); 1144 rd32(E1000_PRC255);
1191 temp = rd32(E1000_PRC511); 1145 rd32(E1000_PRC511);
1192 temp = rd32(E1000_PRC1023); 1146 rd32(E1000_PRC1023);
1193 temp = rd32(E1000_PRC1522); 1147 rd32(E1000_PRC1522);
1194 temp = rd32(E1000_PTC64); 1148 rd32(E1000_PTC64);
1195 temp = rd32(E1000_PTC127); 1149 rd32(E1000_PTC127);
1196 temp = rd32(E1000_PTC255); 1150 rd32(E1000_PTC255);
1197 temp = rd32(E1000_PTC511); 1151 rd32(E1000_PTC511);
1198 temp = rd32(E1000_PTC1023); 1152 rd32(E1000_PTC1023);
1199 temp = rd32(E1000_PTC1522); 1153 rd32(E1000_PTC1522);
1200 1154
1201 temp = rd32(E1000_ALGNERRC); 1155 rd32(E1000_ALGNERRC);
1202 temp = rd32(E1000_RXERRC); 1156 rd32(E1000_RXERRC);
1203 temp = rd32(E1000_TNCRS); 1157 rd32(E1000_TNCRS);
1204 temp = rd32(E1000_CEXTERR); 1158 rd32(E1000_CEXTERR);
1205 temp = rd32(E1000_TSCTC); 1159 rd32(E1000_TSCTC);
1206 temp = rd32(E1000_TSCTFC); 1160 rd32(E1000_TSCTFC);
1207 1161
1208 temp = rd32(E1000_MGTPRC); 1162 rd32(E1000_MGTPRC);
1209 temp = rd32(E1000_MGTPDC); 1163 rd32(E1000_MGTPDC);
1210 temp = rd32(E1000_MGTPTC); 1164 rd32(E1000_MGTPTC);
1211 1165
1212 temp = rd32(E1000_IAC); 1166 rd32(E1000_IAC);
1213 temp = rd32(E1000_ICRXOC); 1167 rd32(E1000_ICRXOC);
1214 1168
1215 temp = rd32(E1000_ICRXPTC); 1169 rd32(E1000_ICRXPTC);
1216 temp = rd32(E1000_ICRXATC); 1170 rd32(E1000_ICRXATC);
1217 temp = rd32(E1000_ICTXPTC); 1171 rd32(E1000_ICTXPTC);
1218 temp = rd32(E1000_ICTXATC); 1172 rd32(E1000_ICTXATC);
1219 temp = rd32(E1000_ICTXQEC); 1173 rd32(E1000_ICTXQEC);
1220 temp = rd32(E1000_ICTXQMTC); 1174 rd32(E1000_ICTXQMTC);
1221 temp = rd32(E1000_ICRXDMTC); 1175 rd32(E1000_ICRXDMTC);
1222 1176
1223 temp = rd32(E1000_CBTMPC); 1177 rd32(E1000_CBTMPC);
1224 temp = rd32(E1000_HTDPMC); 1178 rd32(E1000_HTDPMC);
1225 temp = rd32(E1000_CBRMPC); 1179 rd32(E1000_CBRMPC);
1226 temp = rd32(E1000_RPTHC); 1180 rd32(E1000_RPTHC);
1227 temp = rd32(E1000_HGPTC); 1181 rd32(E1000_HGPTC);
1228 temp = rd32(E1000_HTCBDPC); 1182 rd32(E1000_HTCBDPC);
1229 temp = rd32(E1000_HGORCL); 1183 rd32(E1000_HGORCL);
1230 temp = rd32(E1000_HGORCH); 1184 rd32(E1000_HGORCH);
1231 temp = rd32(E1000_HGOTCL); 1185 rd32(E1000_HGOTCL);
1232 temp = rd32(E1000_HGOTCH); 1186 rd32(E1000_HGOTCH);
1233 temp = rd32(E1000_LENERRS); 1187 rd32(E1000_LENERRS);
1234 1188
1235 /* This register should not be read in copper configurations */ 1189 /* This register should not be read in copper configurations */
1236 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1190 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1237 igb_sgmii_active_82575(hw)) 1191 igb_sgmii_active_82575(hw))
1238 temp = rd32(E1000_SCVPC); 1192 rd32(E1000_SCVPC);
1239} 1193}
1240 1194
1241/** 1195/**
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index ebd146fd4e1..b3808ca49ef 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -66,6 +66,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
66 E1000_EICR_RX_QUEUE3) 66 E1000_EICR_RX_QUEUE3)
67 67
68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
69#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
70#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
69 71
70/* Receive Descriptor - Advanced */ 72/* Receive Descriptor - Advanced */
71union e1000_adv_rx_desc { 73union e1000_adv_rx_desc {
@@ -98,6 +100,7 @@ union e1000_adv_rx_desc {
98 100
99#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 101#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
100#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 102#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
103#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
101 104
102/* Transmit Descriptor - Advanced */ 105/* Transmit Descriptor - Advanced */
103union e1000_adv_tx_desc { 106union e1000_adv_tx_desc {
@@ -167,6 +170,18 @@ struct e1000_adv_tx_context_desc {
167#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 170#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
168#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 171#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
169 172
173/* ETQF register bit definitions */
174#define E1000_ETQF_FILTER_ENABLE (1 << 26)
175#define E1000_ETQF_1588 (1 << 30)
176
177/* FTQF register bit definitions */
178#define E1000_FTQF_VF_BP 0x00008000
179#define E1000_FTQF_1588_TIME_STAMP 0x08000000
180#define E1000_FTQF_MASK 0xF0000000
181#define E1000_FTQF_MASK_PROTO_BP 0x10000000
182#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
183
184#define E1000_NVM_APME_82575 0x0400
170#define MAX_NUM_VFS 8 185#define MAX_NUM_VFS 8
171 186
172#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ 187#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
@@ -202,8 +217,19 @@ struct e1000_adv_tx_context_desc {
202#define E1000_IOVCTL 0x05BBC 217#define E1000_IOVCTL 0x05BBC
203#define E1000_IOVCTL_REUSE_VFQ 0x00000001 218#define E1000_IOVCTL_REUSE_VFQ 0x00000001
204 219
220#define E1000_RPLOLR_STRVLAN 0x40000000
221#define E1000_RPLOLR_STRCRC 0x80000000
222
223#define E1000_DTXCTL_8023LL 0x0004
224#define E1000_DTXCTL_VLAN_ADDED 0x0008
225#define E1000_DTXCTL_OOS_ENABLE 0x0010
226#define E1000_DTXCTL_MDP_EN 0x0020
227#define E1000_DTXCTL_SPOOF_INT 0x0040
228
205#define ALL_QUEUES 0xFFFF 229#define ALL_QUEUES 0xFFFF
206 230
231/* RX packet buffer size defines */
232#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
207void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 233void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
208void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 234void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
209 235
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index cb916833f30..48fcab03b75 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -435,6 +435,39 @@
435/* Flow Control */ 435/* Flow Control */
436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
437 437
438#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
439#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
440
441#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
442#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
443#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
444#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
445#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
446#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
447#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
448#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
449
450#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
451#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
452#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
453#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
454#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
455#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
456
457#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
458#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
459#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
460#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
461#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
462#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
463#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
464#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
465#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
466#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
467#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
468
469#define E1000_TIMINCA_16NS_SHIFT 24
470
438/* PCI Express Control */ 471/* PCI Express Control */
439#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 472#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
440#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 473#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 119869b1124..2dc929419df 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -42,6 +42,7 @@ struct e1000_hw;
42#define E1000_DEV_ID_82576_SERDES 0x10E7 42#define E1000_DEV_ID_82576_SERDES 0x10E7
43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
44#define E1000_DEV_ID_82576_NS 0x150A 44#define E1000_DEV_ID_82576_NS 0x150A
45#define E1000_DEV_ID_82576_NS_SERDES 0x1518
45#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D 46#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
46#define E1000_DEV_ID_82575EB_COPPER 0x10A7 47#define E1000_DEV_ID_82575EB_COPPER 0x10A7
47#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 48#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
@@ -50,8 +51,11 @@ struct e1000_hw;
50#define E1000_REVISION_2 2 51#define E1000_REVISION_2 2
51#define E1000_REVISION_4 4 52#define E1000_REVISION_4 4
52 53
54#define E1000_FUNC_0 0
53#define E1000_FUNC_1 1 55#define E1000_FUNC_1 1
54 56
57#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
58
55enum e1000_mac_type { 59enum e1000_mac_type {
56 e1000_undefined = 0, 60 e1000_undefined = 0,
57 e1000_82575, 61 e1000_82575,
@@ -70,7 +74,6 @@ enum e1000_nvm_type {
70 e1000_nvm_unknown = 0, 74 e1000_nvm_unknown = 0,
71 e1000_nvm_none, 75 e1000_nvm_none,
72 e1000_nvm_eeprom_spi, 76 e1000_nvm_eeprom_spi,
73 e1000_nvm_eeprom_microwire,
74 e1000_nvm_flash_hw, 77 e1000_nvm_flash_hw,
75 e1000_nvm_flash_sw 78 e1000_nvm_flash_sw
76}; 79};
@@ -79,8 +82,6 @@ enum e1000_nvm_override {
79 e1000_nvm_override_none = 0, 82 e1000_nvm_override_none = 0,
80 e1000_nvm_override_spi_small, 83 e1000_nvm_override_spi_small,
81 e1000_nvm_override_spi_large, 84 e1000_nvm_override_spi_large,
82 e1000_nvm_override_microwire_small,
83 e1000_nvm_override_microwire_large
84}; 85};
85 86
86enum e1000_phy_type { 87enum e1000_phy_type {
@@ -339,6 +340,7 @@ struct e1000_mac_info {
339 u16 ifs_ratio; 340 u16 ifs_ratio;
340 u16 ifs_step_size; 341 u16 ifs_step_size;
341 u16 mta_reg_count; 342 u16 mta_reg_count;
343 u16 uta_reg_count;
342 344
343 /* Maximum size of the MTA register table in all supported adapters */ 345 /* Maximum size of the MTA register table in all supported adapters */
344 #define MAX_MTA_REG 128 346 #define MAX_MTA_REG 128
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 7d76bb085e1..2ad358a240b 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
185 } 185 }
186 186
187 if (nvm_alt_mac_addr_offset == 0xFFFF) { 187 if (nvm_alt_mac_addr_offset == 0xFFFF) {
188 ret_val = -(E1000_NOT_IMPLEMENTED); 188 /* There is no Alternate MAC Address */
189 goto out; 189 goto out;
190 } 190 }
191 191
192 if (hw->bus.func == E1000_FUNC_1) 192 if (hw->bus.func == E1000_FUNC_1)
193 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16); 193 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
194
195 for (i = 0; i < ETH_ALEN; i += 2) { 194 for (i = 0; i < ETH_ALEN; i += 2) {
196 offset = nvm_alt_mac_addr_offset + (i >> 1); 195 offset = nvm_alt_mac_addr_offset + (i >> 1);
197 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); 196 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
206 205
207 /* if multicast bit is set, the alternate address will not be used */ 206 /* if multicast bit is set, the alternate address will not be used */
208 if (alt_mac_addr[0] & 0x01) { 207 if (alt_mac_addr[0] & 0x01) {
209 ret_val = -(E1000_NOT_IMPLEMENTED); 208 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
210 goto out; 209 goto out;
211 } 210 }
212 211
213 for (i = 0; i < ETH_ALEN; i++) 212 /*
214 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; 213 * We have a valid alternate MAC address, and we want to treat it the
215 214 * same as the normal permanent MAC address stored by the HW into the
216 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0); 215 * RAR. Do this by mapping this address into RAR0.
216 */
217 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
217 218
218out: 219out:
219 return ret_val; 220 return ret_val;
@@ -246,8 +247,15 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
246 if (rar_low || rar_high) 247 if (rar_low || rar_high)
247 rar_high |= E1000_RAH_AV; 248 rar_high |= E1000_RAH_AV;
248 249
250 /*
251 * Some bridges will combine consecutive 32-bit writes into
252 * a single burst write, which will malfunction on some parts.
253 * The flushes avoid this.
254 */
249 wr32(E1000_RAL(index), rar_low); 255 wr32(E1000_RAL(index), rar_low);
256 wrfl();
250 wr32(E1000_RAH(index), rar_high); 257 wr32(E1000_RAH(index), rar_high);
258 wrfl();
251} 259}
252 260
253/** 261/**
@@ -399,45 +407,43 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
399 **/ 407 **/
400void igb_clear_hw_cntrs_base(struct e1000_hw *hw) 408void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
401{ 409{
402 u32 temp; 410 rd32(E1000_CRCERRS);
403 411 rd32(E1000_SYMERRS);
404 temp = rd32(E1000_CRCERRS); 412 rd32(E1000_MPC);
405 temp = rd32(E1000_SYMERRS); 413 rd32(E1000_SCC);
406 temp = rd32(E1000_MPC); 414 rd32(E1000_ECOL);
407 temp = rd32(E1000_SCC); 415 rd32(E1000_MCC);
408 temp = rd32(E1000_ECOL); 416 rd32(E1000_LATECOL);
409 temp = rd32(E1000_MCC); 417 rd32(E1000_COLC);
410 temp = rd32(E1000_LATECOL); 418 rd32(E1000_DC);
411 temp = rd32(E1000_COLC); 419 rd32(E1000_SEC);
412 temp = rd32(E1000_DC); 420 rd32(E1000_RLEC);
413 temp = rd32(E1000_SEC); 421 rd32(E1000_XONRXC);
414 temp = rd32(E1000_RLEC); 422 rd32(E1000_XONTXC);
415 temp = rd32(E1000_XONRXC); 423 rd32(E1000_XOFFRXC);
416 temp = rd32(E1000_XONTXC); 424 rd32(E1000_XOFFTXC);
417 temp = rd32(E1000_XOFFRXC); 425 rd32(E1000_FCRUC);
418 temp = rd32(E1000_XOFFTXC); 426 rd32(E1000_GPRC);
419 temp = rd32(E1000_FCRUC); 427 rd32(E1000_BPRC);
420 temp = rd32(E1000_GPRC); 428 rd32(E1000_MPRC);
421 temp = rd32(E1000_BPRC); 429 rd32(E1000_GPTC);
422 temp = rd32(E1000_MPRC); 430 rd32(E1000_GORCL);
423 temp = rd32(E1000_GPTC); 431 rd32(E1000_GORCH);
424 temp = rd32(E1000_GORCL); 432 rd32(E1000_GOTCL);
425 temp = rd32(E1000_GORCH); 433 rd32(E1000_GOTCH);
426 temp = rd32(E1000_GOTCL); 434 rd32(E1000_RNBC);
427 temp = rd32(E1000_GOTCH); 435 rd32(E1000_RUC);
428 temp = rd32(E1000_RNBC); 436 rd32(E1000_RFC);
429 temp = rd32(E1000_RUC); 437 rd32(E1000_ROC);
430 temp = rd32(E1000_RFC); 438 rd32(E1000_RJC);
431 temp = rd32(E1000_ROC); 439 rd32(E1000_TORL);
432 temp = rd32(E1000_RJC); 440 rd32(E1000_TORH);
433 temp = rd32(E1000_TORL); 441 rd32(E1000_TOTL);
434 temp = rd32(E1000_TORH); 442 rd32(E1000_TOTH);
435 temp = rd32(E1000_TOTL); 443 rd32(E1000_TPR);
436 temp = rd32(E1000_TOTH); 444 rd32(E1000_TPT);
437 temp = rd32(E1000_TPR); 445 rd32(E1000_MPTC);
438 temp = rd32(E1000_TPT); 446 rd32(E1000_BPTC);
439 temp = rd32(E1000_MPTC);
440 temp = rd32(E1000_BPTC);
441} 447}
442 448
443/** 449/**
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index ed9058eca45..c474cdb7004 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -143,12 +143,16 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
143 if (!countdown || !mbx->ops.check_for_msg) 143 if (!countdown || !mbx->ops.check_for_msg)
144 goto out; 144 goto out;
145 145
146 while (mbx->ops.check_for_msg(hw, mbx_id)) { 146 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
147 countdown--; 147 countdown--;
148 if (!countdown) 148 if (!countdown)
149 break; 149 break;
150 udelay(mbx->usec_delay); 150 udelay(mbx->usec_delay);
151 } 151 }
152
153 /* if we failed, all future posted messages fail until reset */
154 if (!countdown)
155 mbx->timeout = 0;
152out: 156out:
153 return countdown ? 0 : -E1000_ERR_MBX; 157 return countdown ? 0 : -E1000_ERR_MBX;
154} 158}
@@ -168,12 +172,16 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
168 if (!countdown || !mbx->ops.check_for_ack) 172 if (!countdown || !mbx->ops.check_for_ack)
169 goto out; 173 goto out;
170 174
171 while (mbx->ops.check_for_ack(hw, mbx_id)) { 175 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
172 countdown--; 176 countdown--;
173 if (!countdown) 177 if (!countdown)
174 break; 178 break;
175 udelay(mbx->usec_delay); 179 udelay(mbx->usec_delay);
176 } 180 }
181
182 /* if we failed, all future posted messages fail until reset */
183 if (!countdown)
184 mbx->timeout = 0;
177out: 185out:
178 return countdown ? 0 : -E1000_ERR_MBX; 186 return countdown ? 0 : -E1000_ERR_MBX;
179} 187}
@@ -217,12 +225,13 @@ out:
217static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 225static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
218{ 226{
219 struct e1000_mbx_info *mbx = &hw->mbx; 227 struct e1000_mbx_info *mbx = &hw->mbx;
220 s32 ret_val = 0; 228 s32 ret_val = -E1000_ERR_MBX;
221 229
222 if (!mbx->ops.write) 230 /* exit if either we can't write or there isn't a defined timeout */
231 if (!mbx->ops.write || !mbx->timeout)
223 goto out; 232 goto out;
224 233
225 /* send msg*/ 234 /* send msg */
226 ret_val = mbx->ops.write(hw, msg, size, mbx_id); 235 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
227 236
228 /* if msg sent wait until we receive an ack */ 237 /* if msg sent wait until we receive an ack */
@@ -305,6 +314,30 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
305} 314}
306 315
307/** 316/**
317 * igb_obtain_mbx_lock_pf - obtain mailbox lock
318 * @hw: pointer to the HW structure
319 * @vf_number: the VF index
320 *
321 * return SUCCESS if we obtained the mailbox lock
322 **/
323static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
324{
325 s32 ret_val = -E1000_ERR_MBX;
326 u32 p2v_mailbox;
327
328
329 /* Take ownership of the buffer */
330 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
331
332 /* reserve mailbox for vf use */
333 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
334 if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
335 ret_val = 0;
336
337 return ret_val;
338}
339
340/**
308 * igb_write_mbx_pf - Places a message in the mailbox 341 * igb_write_mbx_pf - Places a message in the mailbox
309 * @hw: pointer to the HW structure 342 * @hw: pointer to the HW structure
310 * @msg: The message buffer 343 * @msg: The message buffer
@@ -316,27 +349,17 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
316static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 349static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
317 u16 vf_number) 350 u16 vf_number)
318{ 351{
319 u32 p2v_mailbox; 352 s32 ret_val;
320 s32 ret_val = 0;
321 u16 i; 353 u16 i;
322 354
323 /* Take ownership of the buffer */ 355 /* lock the mailbox to prevent pf/vf race condition */
324 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 356 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
325 357 if (ret_val)
326 /* Make sure we have ownership now... */
327 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
328 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
329 /* failed to grab ownership */
330 ret_val = -E1000_ERR_MBX;
331 goto out_no_write; 358 goto out_no_write;
332 }
333 359
334 /* 360 /* flush msg and acks as we are overwriting the message buffer */
335 * flush any ack or msg which may already be in the queue
336 * as they are likely the result of an error
337 */
338 igb_check_for_ack_pf(hw, vf_number);
339 igb_check_for_msg_pf(hw, vf_number); 361 igb_check_for_msg_pf(hw, vf_number);
362 igb_check_for_ack_pf(hw, vf_number);
340 363
341 /* copy the caller specified message to the mailbox memory buffer */ 364 /* copy the caller specified message to the mailbox memory buffer */
342 for (i = 0; i < size; i++) 365 for (i = 0; i < size; i++)
@@ -367,20 +390,13 @@ out_no_write:
367static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 390static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
368 u16 vf_number) 391 u16 vf_number)
369{ 392{
370 u32 p2v_mailbox; 393 s32 ret_val;
371 s32 ret_val = 0;
372 u16 i; 394 u16 i;
373 395
374 /* Take ownership of the buffer */ 396 /* lock the mailbox to prevent pf/vf race condition */
375 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 397 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
376 398 if (ret_val)
377 /* Make sure we have ownership now... */
378 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
379 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
380 /* failed to grab ownership */
381 ret_val = -E1000_ERR_MBX;
382 goto out_no_read; 399 goto out_no_read;
383 }
384 400
385 /* copy the message to the mailbox memory buffer */ 401 /* copy the message to the mailbox memory buffer */
386 for (i = 0; i < size; i++) 402 for (i = 0; i < size; i++)
@@ -392,8 +408,6 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
392 /* update stats */ 408 /* update stats */
393 hw->mbx.stats.msgs_rx++; 409 hw->mbx.stats.msgs_rx++;
394 410
395 ret_val = 0;
396
397out_no_read: 411out_no_read:
398 return ret_val; 412 return ret_val;
399} 413}
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea3f19..bb112fb6c3a 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59 59
60#define E1000_VF_RESET 0x01 /* VF requests reset */ 60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
65 67
66#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
67 69
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a88bfe2f1e8..d83b77fa403 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -78,9 +78,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
78 u32 mask; 78 u32 mask;
79 79
80 mask = 0x01 << (count - 1); 80 mask = 0x01 << (count - 1);
81 if (nvm->type == e1000_nvm_eeprom_microwire) 81 if (nvm->type == e1000_nvm_eeprom_spi)
82 eecd &= ~E1000_EECD_DO;
83 else if (nvm->type == e1000_nvm_eeprom_spi)
84 eecd |= E1000_EECD_DO; 82 eecd |= E1000_EECD_DO;
85 83
86 do { 84 do {
@@ -220,22 +218,7 @@ static void igb_standby_nvm(struct e1000_hw *hw)
220 struct e1000_nvm_info *nvm = &hw->nvm; 218 struct e1000_nvm_info *nvm = &hw->nvm;
221 u32 eecd = rd32(E1000_EECD); 219 u32 eecd = rd32(E1000_EECD);
222 220
223 if (nvm->type == e1000_nvm_eeprom_microwire) { 221 if (nvm->type == e1000_nvm_eeprom_spi) {
224 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
225 wr32(E1000_EECD, eecd);
226 wrfl();
227 udelay(nvm->delay_usec);
228
229 igb_raise_eec_clk(hw, &eecd);
230
231 /* Select EEPROM */
232 eecd |= E1000_EECD_CS;
233 wr32(E1000_EECD, eecd);
234 wrfl();
235 udelay(nvm->delay_usec);
236
237 igb_lower_eec_clk(hw, &eecd);
238 } else if (nvm->type == e1000_nvm_eeprom_spi) {
239 /* Toggle CS to flush commands */ 222 /* Toggle CS to flush commands */
240 eecd |= E1000_EECD_CS; 223 eecd |= E1000_EECD_CS;
241 wr32(E1000_EECD, eecd); 224 wr32(E1000_EECD, eecd);
@@ -263,12 +246,6 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
263 /* Pull CS high */ 246 /* Pull CS high */
264 eecd |= E1000_EECD_CS; 247 eecd |= E1000_EECD_CS;
265 igb_lower_eec_clk(hw, &eecd); 248 igb_lower_eec_clk(hw, &eecd);
266 } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
267 /* CS on Microcwire is active-high */
268 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
269 wr32(E1000_EECD, eecd);
270 igb_raise_eec_clk(hw, &eecd);
271 igb_lower_eec_clk(hw, &eecd);
272 } 249 }
273} 250}
274 251
@@ -304,14 +281,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
304 u8 spi_stat_reg; 281 u8 spi_stat_reg;
305 282
306 283
307 if (nvm->type == e1000_nvm_eeprom_microwire) { 284 if (nvm->type == e1000_nvm_eeprom_spi) {
308 /* Clear SK and DI */
309 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
310 wr32(E1000_EECD, eecd);
311 /* Set CS */
312 eecd |= E1000_EECD_CS;
313 wr32(E1000_EECD, eecd);
314 } else if (nvm->type == e1000_nvm_eeprom_spi) {
315 /* Clear SK and CS */ 285 /* Clear SK and CS */
316 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
317 wr32(E1000_EECD, eecd); 287 wr32(E1000_EECD, eecd);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ee460600e74..83b706c460b 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -39,6 +39,9 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw);
39/* Cable length tables */ 39/* Cable length tables */
40static const u16 e1000_m88_cable_length_table[] = 40static const u16 e1000_m88_cable_length_table[] =
41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
42#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
43 (sizeof(e1000_m88_cable_length_table) / \
44 sizeof(e1000_m88_cable_length_table[0]))
42 45
43static const u16 e1000_igp_2_cable_length_table[] = 46static const u16 e1000_igp_2_cable_length_table[] =
44 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 47 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -109,7 +112,10 @@ out:
109 **/ 112 **/
110static s32 igb_phy_reset_dsp(struct e1000_hw *hw) 113static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
111{ 114{
112 s32 ret_val; 115 s32 ret_val = 0;
116
117 if (!(hw->phy.ops.write_reg))
118 goto out;
113 119
114 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); 120 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
115 if (ret_val) 121 if (ret_val)
@@ -239,6 +245,103 @@ out:
239} 245}
240 246
241/** 247/**
248 * igb_read_phy_reg_i2c - Read PHY register using i2c
249 * @hw: pointer to the HW structure
250 * @offset: register offset to be read
251 * @data: pointer to the read data
252 *
253 * Reads the PHY register at offset using the i2c interface and stores the
254 * retrieved information in data.
255 **/
256s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
257{
258 struct e1000_phy_info *phy = &hw->phy;
259 u32 i, i2ccmd = 0;
260
261
262 /*
263 * Set up Op-code, Phy Address, and register address in the I2CCMD
264 * register. The MAC will take care of interfacing with the
265 * PHY to retrieve the desired data.
266 */
267 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
268 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
269 (E1000_I2CCMD_OPCODE_READ));
270
271 wr32(E1000_I2CCMD, i2ccmd);
272
273 /* Poll the ready bit to see if the I2C read completed */
274 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
275 udelay(50);
276 i2ccmd = rd32(E1000_I2CCMD);
277 if (i2ccmd & E1000_I2CCMD_READY)
278 break;
279 }
280 if (!(i2ccmd & E1000_I2CCMD_READY)) {
281 hw_dbg("I2CCMD Read did not complete\n");
282 return -E1000_ERR_PHY;
283 }
284 if (i2ccmd & E1000_I2CCMD_ERROR) {
285 hw_dbg("I2CCMD Error bit set\n");
286 return -E1000_ERR_PHY;
287 }
288
289 /* Need to byte-swap the 16-bit value. */
290 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
291
292 return 0;
293}
294
295/**
296 * igb_write_phy_reg_i2c - Write PHY register using i2c
297 * @hw: pointer to the HW structure
298 * @offset: register offset to write to
299 * @data: data to write at register offset
300 *
301 * Writes the data to PHY register at the offset using the i2c interface.
302 **/
303s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
304{
305 struct e1000_phy_info *phy = &hw->phy;
306 u32 i, i2ccmd = 0;
307 u16 phy_data_swapped;
308
309
310 /* Swap the data bytes for the I2C interface */
311 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
312
313 /*
314 * Set up Op-code, Phy Address, and register address in the I2CCMD
315 * register. The MAC will take care of interfacing with the
316 * PHY to retrieve the desired data.
317 */
318 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
319 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
320 E1000_I2CCMD_OPCODE_WRITE |
321 phy_data_swapped);
322
323 wr32(E1000_I2CCMD, i2ccmd);
324
325 /* Poll the ready bit to see if the I2C read completed */
326 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
327 udelay(50);
328 i2ccmd = rd32(E1000_I2CCMD);
329 if (i2ccmd & E1000_I2CCMD_READY)
330 break;
331 }
332 if (!(i2ccmd & E1000_I2CCMD_READY)) {
333 hw_dbg("I2CCMD Write did not complete\n");
334 return -E1000_ERR_PHY;
335 }
336 if (i2ccmd & E1000_I2CCMD_ERROR) {
337 hw_dbg("I2CCMD Error bit set\n");
338 return -E1000_ERR_PHY;
339 }
340
341 return 0;
342}
343
344/**
242 * igb_read_phy_reg_igp - Read igp PHY register 345 * igb_read_phy_reg_igp - Read igp PHY register
243 * @hw: pointer to the HW structure 346 * @hw: pointer to the HW structure
244 * @offset: register offset to be read 347 * @offset: register offset to be read
@@ -572,7 +675,7 @@ out:
572 * and restart the negotiation process between the link partner. If 675 * and restart the negotiation process between the link partner. If
573 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. 676 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
574 **/ 677 **/
575s32 igb_copper_link_autoneg(struct e1000_hw *hw) 678static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
576{ 679{
577 struct e1000_phy_info *phy = &hw->phy; 680 struct e1000_phy_info *phy = &hw->phy;
578 s32 ret_val; 681 s32 ret_val;
@@ -796,6 +899,65 @@ out:
796} 899}
797 900
798/** 901/**
902 * igb_setup_copper_link - Configure copper link settings
903 * @hw: pointer to the HW structure
904 *
905 * Calls the appropriate function to configure the link for auto-neg or forced
906 * speed and duplex. Then we check for link, once link is established calls
907 * to configure collision distance and flow control are called. If link is
908 * not established, we return -E1000_ERR_PHY (-2).
909 **/
910s32 igb_setup_copper_link(struct e1000_hw *hw)
911{
912 s32 ret_val;
913 bool link;
914
915
916 if (hw->mac.autoneg) {
917 /*
918 * Setup autoneg and flow control advertisement and perform
919 * autonegotiation.
920 */
921 ret_val = igb_copper_link_autoneg(hw);
922 if (ret_val)
923 goto out;
924 } else {
925 /*
926 * PHY will be set to 10H, 10F, 100H or 100F
927 * depending on user settings.
928 */
929 hw_dbg("Forcing Speed and Duplex\n");
930 ret_val = hw->phy.ops.force_speed_duplex(hw);
931 if (ret_val) {
932 hw_dbg("Error Forcing Speed and Duplex\n");
933 goto out;
934 }
935 }
936
937 /*
938 * Check link status. Wait up to 100 microseconds for link to become
939 * valid.
940 */
941 ret_val = igb_phy_has_link(hw,
942 COPPER_LINK_UP_LIMIT,
943 10,
944 &link);
945 if (ret_val)
946 goto out;
947
948 if (link) {
949 hw_dbg("Valid link established!!!\n");
950 igb_config_collision_dist(hw);
951 ret_val = igb_config_fc_after_link_up(hw);
952 } else {
953 hw_dbg("Unable to establish link!!!\n");
954 }
955
956out:
957 return ret_val;
958}
959
960/**
799 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY 961 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
800 * @hw: pointer to the HW structure 962 * @hw: pointer to the HW structure
801 * 963 *
@@ -903,22 +1065,19 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
903 1065
904 igb_phy_force_speed_duplex_setup(hw, &phy_data); 1066 igb_phy_force_speed_duplex_setup(hw, &phy_data);
905 1067
906 /* Reset the phy to commit changes. */
907 phy_data |= MII_CR_RESET;
908
909 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 1068 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
910 if (ret_val) 1069 if (ret_val)
911 goto out; 1070 goto out;
912 1071
913 udelay(1); 1072 /* Reset the phy to commit changes. */
1073 ret_val = igb_phy_sw_reset(hw);
1074 if (ret_val)
1075 goto out;
914 1076
915 if (phy->autoneg_wait_to_complete) { 1077 if (phy->autoneg_wait_to_complete) {
916 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); 1078 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
917 1079
918 ret_val = igb_phy_has_link(hw, 1080 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
919 PHY_FORCE_LIMIT,
920 100000,
921 &link);
922 if (ret_val) 1081 if (ret_val)
923 goto out; 1082 goto out;
924 1083
@@ -928,8 +1087,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
928 * Reset the DSP and cross our fingers. 1087 * Reset the DSP and cross our fingers.
929 */ 1088 */
930 ret_val = phy->ops.write_reg(hw, 1089 ret_val = phy->ops.write_reg(hw,
931 M88E1000_PHY_PAGE_SELECT, 1090 M88E1000_PHY_PAGE_SELECT,
932 0x001d); 1091 0x001d);
933 if (ret_val) 1092 if (ret_val)
934 goto out; 1093 goto out;
935 ret_val = igb_phy_reset_dsp(hw); 1094 ret_val = igb_phy_reset_dsp(hw);
@@ -939,7 +1098,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
939 1098
940 /* Try once more */ 1099 /* Try once more */
941 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 1100 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
942 100000, &link); 1101 100000, &link);
943 if (ret_val) 1102 if (ret_val)
944 goto out; 1103 goto out;
945 } 1104 }
@@ -1051,9 +1210,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1051s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) 1210s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1052{ 1211{
1053 struct e1000_phy_info *phy = &hw->phy; 1212 struct e1000_phy_info *phy = &hw->phy;
1054 s32 ret_val; 1213 s32 ret_val = 0;
1055 u16 data; 1214 u16 data;
1056 1215
1216 if (!(hw->phy.ops.read_reg))
1217 goto out;
1218
1057 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 1219 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1058 if (ret_val) 1220 if (ret_val)
1059 goto out; 1221 goto out;
@@ -1288,8 +1450,14 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1288 * it across the board. 1450 * it across the board.
1289 */ 1451 */
1290 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1452 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1291 if (ret_val) 1453 if (ret_val) {
1292 break; 1454 /*
1455 * If the first read fails, another entity may have
1456 * ownership of the resources, wait and try again to
1457 * see if they have relinquished the resources yet.
1458 */
1459 udelay(usec_interval);
1460 }
1293 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1461 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1294 if (ret_val) 1462 if (ret_val)
1295 break; 1463 break;
@@ -1333,8 +1501,13 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1333 1501
1334 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 1502 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1335 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 1503 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1504 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1505 ret_val = -E1000_ERR_PHY;
1506 goto out;
1507 }
1508
1336 phy->min_cable_length = e1000_m88_cable_length_table[index]; 1509 phy->min_cable_length = e1000_m88_cable_length_table[index];
1337 phy->max_cable_length = e1000_m88_cable_length_table[index+1]; 1510 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1338 1511
1339 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; 1512 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1340 1513
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index ebe4b616db8..adb9436b733 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -43,7 +43,6 @@ enum e1000_smart_speed {
43 43
44s32 igb_check_downshift(struct e1000_hw *hw); 44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_autoneg(struct e1000_hw *hw);
47s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 48s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
@@ -57,10 +56,13 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw);
57s32 igb_phy_hw_reset(struct e1000_hw *hw); 56s32 igb_phy_hw_reset(struct e1000_hw *hw);
58s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 57s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
59s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); 58s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
59s32 igb_setup_copper_link(struct e1000_hw *hw);
60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, 61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
62 u32 usec_interval, bool *success); 62 u32 usec_interval, bool *success);
63s32 igb_phy_init_script_igp3(struct e1000_hw *hw); 63s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
64s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
65s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
64 66
65/* IGP01E1000 Specific Registers */ 67/* IGP01E1000 Specific Registers */
66#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ 68#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 345d1442d6d..934e03b053a 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -76,59 +76,18 @@
76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ 76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
77 77
78/* IEEE 1588 TIMESYNCH */ 78/* IEEE 1588 TIMESYNCH */
79#define E1000_TSYNCTXCTL 0x0B614 79#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
80#define E1000_TSYNCTXCTL_VALID (1<<0) 80#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
81#define E1000_TSYNCTXCTL_ENABLED (1<<4) 81#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
82#define E1000_TSYNCRXCTL 0x0B620 82#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
83#define E1000_TSYNCRXCTL_VALID (1<<0) 83#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
84#define E1000_TSYNCRXCTL_ENABLED (1<<4) 84#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
85enum { 85#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
86 E1000_TSYNCRXCTL_TYPE_L2_V2 = 0, 86#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
87 E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1), 87#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
88 E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2), 88#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
89 E1000_TSYNCRXCTL_TYPE_ALL = (1<<3), 89#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
90 E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1), 90#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
91};
92#define E1000_TSYNCRXCFG 0x05F50
93enum {
94 E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
95 E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
96 E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
97 E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
98 E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
99
100 E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
101 E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
102 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
103 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
104 E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
105 E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
106 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
107 E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
108 E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
109 E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
110};
111#define E1000_SYSTIML 0x0B600
112#define E1000_SYSTIMH 0x0B604
113#define E1000_TIMINCA 0x0B608
114
115#define E1000_RXMTRL 0x0B634
116#define E1000_RXSTMPL 0x0B624
117#define E1000_RXSTMPH 0x0B628
118#define E1000_RXSATRL 0x0B62C
119#define E1000_RXSATRH 0x0B630
120
121#define E1000_TXSTMPL 0x0B618
122#define E1000_TXSTMPH 0x0B61C
123
124#define E1000_ETQF0 0x05CB0
125#define E1000_ETQF1 0x05CB4
126#define E1000_ETQF2 0x05CB8
127#define E1000_ETQF3 0x05CBC
128#define E1000_ETQF4 0x05CC0
129#define E1000_ETQF5 0x05CC4
130#define E1000_ETQF6 0x05CC8
131#define E1000_ETQF7 0x05CCC
132 91
133/* Filtering Registers */ 92/* Filtering Registers */
134#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) 93#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -143,7 +102,9 @@ enum {
143#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ 102#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
144 103
145#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 104#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
105
146/* Split and Replication RX Control - RW */ 106/* Split and Replication RX Control - RW */
107#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
147/* 108/*
148 * Convenience macros 109 * Convenience macros
149 * 110 *
@@ -288,10 +249,17 @@ enum {
288#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 249#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
289#define E1000_RA 0x05400 /* Receive Address - RW Array */ 250#define E1000_RA 0x05400 /* Receive Address - RW Array */
290#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 251#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
252#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
291#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 253#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
292 (0x054E0 + ((_i - 16) * 8))) 254 (0x054E0 + ((_i - 16) * 8)))
293#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 255#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
294 (0x054E4 + ((_i - 16) * 8))) 256 (0x054E4 + ((_i - 16) * 8)))
257#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
258#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
259#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
260#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
261#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
262#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
295#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 263#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
296#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ 264#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
297#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 265#define E1000_WUC 0x05800 /* Wakeup Control - RW */
@@ -331,6 +299,7 @@ enum {
331#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ 299#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
332#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ 300#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
333#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ 301#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
302#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
334#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ 303#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
335/* These act per VF so an array friendly macro is used */ 304/* These act per VF so an array friendly macro is used */
336#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) 305#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7126fea26fe..63abd1c0d75 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -55,12 +55,14 @@ struct igb_adapter;
55#define IGB_DEFAULT_ITR 3 /* dynamic */ 55#define IGB_DEFAULT_ITR 3 /* dynamic */
56#define IGB_MAX_ITR_USECS 10000 56#define IGB_MAX_ITR_USECS 10000
57#define IGB_MIN_ITR_USECS 10 57#define IGB_MIN_ITR_USECS 10
58#define NON_Q_VECTORS 1
59#define MAX_Q_VECTORS 8
58 60
59/* Transmit and receive queues */ 61/* Transmit and receive queues */
60#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ 62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
61 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) 63 (hw->mac.type > e1000_82575 ? 8 : 4))
62#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES 64#define IGB_ABS_MAX_TX_QUEUES 8
63#define IGB_ABS_MAX_TX_QUEUES 4 65#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
64 66
65#define IGB_MAX_VF_MC_ENTRIES 30 67#define IGB_MAX_VF_MC_ENTRIES 30
66#define IGB_MAX_VF_FUNCTIONS 8 68#define IGB_MAX_VF_FUNCTIONS 8
@@ -71,9 +73,14 @@ struct vf_data_storage {
71 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 73 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
72 u16 num_vf_mc_hashes; 74 u16 num_vf_mc_hashes;
73 u16 vlans_enabled; 75 u16 vlans_enabled;
74 bool clear_to_send; 76 u32 flags;
77 unsigned long last_nack;
75}; 78};
76 79
80#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
81#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
82#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
83
77/* RX descriptor control thresholds. 84/* RX descriptor control thresholds.
78 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 85 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
79 * descriptors available in its onboard memory. 86 * descriptors available in its onboard memory.
@@ -85,17 +92,19 @@ struct vf_data_storage {
85 * descriptors until either it has this many to write back, or the 92 * descriptors until either it has this many to write back, or the
86 * ITR timer expires. 93 * ITR timer expires.
87 */ 94 */
88#define IGB_RX_PTHRESH 16 95#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
89#define IGB_RX_HTHRESH 8 96#define IGB_RX_HTHRESH 8
90#define IGB_RX_WTHRESH 1 97#define IGB_RX_WTHRESH 1
98#define IGB_TX_PTHRESH 8
99#define IGB_TX_HTHRESH 1
100#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
101 adapter->msix_entries) ? 0 : 16)
91 102
92/* this is the size past which hardware will drop packets when setting LPE=0 */ 103/* this is the size past which hardware will drop packets when setting LPE=0 */
93#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 104#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
94 105
95/* Supported Rx Buffer Sizes */ 106/* Supported Rx Buffer Sizes */
96#define IGB_RXBUFFER_128 128 /* Used for packet split */ 107#define IGB_RXBUFFER_128 128 /* Used for packet split */
97#define IGB_RXBUFFER_256 256 /* Used for packet split */
98#define IGB_RXBUFFER_512 512
99#define IGB_RXBUFFER_1024 1024 108#define IGB_RXBUFFER_1024 1024
100#define IGB_RXBUFFER_2048 2048 109#define IGB_RXBUFFER_2048 2048
101#define IGB_RXBUFFER_16384 16384 110#define IGB_RXBUFFER_16384 16384
@@ -141,36 +150,55 @@ struct igb_buffer {
141struct igb_tx_queue_stats { 150struct igb_tx_queue_stats {
142 u64 packets; 151 u64 packets;
143 u64 bytes; 152 u64 bytes;
153 u64 restart_queue;
144}; 154};
145 155
146struct igb_rx_queue_stats { 156struct igb_rx_queue_stats {
147 u64 packets; 157 u64 packets;
148 u64 bytes; 158 u64 bytes;
149 u64 drops; 159 u64 drops;
160 u64 csum_err;
161 u64 alloc_failed;
150}; 162};
151 163
152struct igb_ring { 164struct igb_q_vector {
153 struct igb_adapter *adapter; /* backlink */ 165 struct igb_adapter *adapter; /* backlink */
154 void *desc; /* descriptor ring memory */ 166 struct igb_ring *rx_ring;
155 dma_addr_t dma; /* phys address of the ring */ 167 struct igb_ring *tx_ring;
156 unsigned int size; /* length of desc. ring in bytes */ 168 struct napi_struct napi;
157 unsigned int count; /* number of desc. in the ring */
158 u16 next_to_use;
159 u16 next_to_clean;
160 u16 head;
161 u16 tail;
162 struct igb_buffer *buffer_info; /* array of buffer info structs */
163 169
164 u32 eims_value; 170 u32 eims_value;
165 u32 itr_val;
166 u16 itr_register;
167 u16 cpu; 171 u16 cpu;
168 172
169 u16 queue_index; 173 u16 itr_val;
170 u16 reg_idx; 174 u8 set_itr;
175 u8 itr_shift;
176 void __iomem *itr_register;
177
178 char name[IFNAMSIZ + 9];
179};
180
181struct igb_ring {
182 struct igb_q_vector *q_vector; /* backlink to q_vector */
183 struct net_device *netdev; /* back pointer to net_device */
184 struct pci_dev *pdev; /* pci device for dma mapping */
185 dma_addr_t dma; /* phys address of the ring */
186 void *desc; /* descriptor ring memory */
187 unsigned int size; /* length of desc. ring in bytes */
188 u16 count; /* number of desc. in the ring */
189 u16 next_to_use;
190 u16 next_to_clean;
191 u8 queue_index;
192 u8 reg_idx;
193 void __iomem *head;
194 void __iomem *tail;
195 struct igb_buffer *buffer_info; /* array of buffer info structs */
196
171 unsigned int total_bytes; 197 unsigned int total_bytes;
172 unsigned int total_packets; 198 unsigned int total_packets;
173 199
200 u32 flags;
201
174 union { 202 union {
175 /* TX */ 203 /* TX */
176 struct { 204 struct {
@@ -180,16 +208,18 @@ struct igb_ring {
180 /* RX */ 208 /* RX */
181 struct { 209 struct {
182 struct igb_rx_queue_stats rx_stats; 210 struct igb_rx_queue_stats rx_stats;
183 u64 rx_queue_drops; 211 u32 rx_buffer_len;
184 struct napi_struct napi;
185 int set_itr;
186 struct igb_ring *buddy;
187 }; 212 };
188 }; 213 };
189
190 char name[IFNAMSIZ + 5];
191}; 214};
192 215
216#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
217#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
218
219#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
220
221#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
222
193#define E1000_RX_DESC_ADV(R, i) \ 223#define E1000_RX_DESC_ADV(R, i) \
194 (&(((union e1000_adv_rx_desc *)((R).desc))[i])) 224 (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
195#define E1000_TX_DESC_ADV(R, i) \ 225#define E1000_TX_DESC_ADV(R, i) \
@@ -197,6 +227,15 @@ struct igb_ring {
197#define E1000_TX_CTXTDESC_ADV(R, i) \ 227#define E1000_TX_CTXTDESC_ADV(R, i) \
198 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 228 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
199 229
230/* igb_desc_unused - calculate if we have unused descriptors */
231static inline int igb_desc_unused(struct igb_ring *ring)
232{
233 if (ring->next_to_clean > ring->next_to_use)
234 return ring->next_to_clean - ring->next_to_use - 1;
235
236 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
237}
238
200/* board specific private data structure */ 239/* board specific private data structure */
201 240
202struct igb_adapter { 241struct igb_adapter {
@@ -205,18 +244,14 @@ struct igb_adapter {
205 struct vlan_group *vlgrp; 244 struct vlan_group *vlgrp;
206 u16 mng_vlan_id; 245 u16 mng_vlan_id;
207 u32 bd_number; 246 u32 bd_number;
208 u32 rx_buffer_len;
209 u32 wol; 247 u32 wol;
210 u32 en_mng_pt; 248 u32 en_mng_pt;
211 u16 link_speed; 249 u16 link_speed;
212 u16 link_duplex; 250 u16 link_duplex;
213 unsigned int total_tx_bytes; 251
214 unsigned int total_tx_packets;
215 unsigned int total_rx_bytes;
216 unsigned int total_rx_packets;
217 /* Interrupt Throttle Rate */ 252 /* Interrupt Throttle Rate */
218 u32 itr; 253 u32 rx_itr_setting;
219 u32 itr_setting; 254 u32 tx_itr_setting;
220 u16 tx_itr; 255 u16 tx_itr;
221 u16 rx_itr; 256 u16 rx_itr;
222 257
@@ -229,13 +264,7 @@ struct igb_adapter {
229 264
230 /* TX */ 265 /* TX */
231 struct igb_ring *tx_ring; /* One per active queue */ 266 struct igb_ring *tx_ring; /* One per active queue */
232 unsigned int restart_queue;
233 unsigned long tx_queue_len; 267 unsigned long tx_queue_len;
234 u32 txd_cmd;
235 u32 gotc;
236 u64 gotc_old;
237 u64 tpt_old;
238 u64 colc_old;
239 u32 tx_timeout_count; 268 u32 tx_timeout_count;
240 269
241 /* RX */ 270 /* RX */
@@ -243,20 +272,12 @@ struct igb_adapter {
243 int num_tx_queues; 272 int num_tx_queues;
244 int num_rx_queues; 273 int num_rx_queues;
245 274
246 u64 hw_csum_err;
247 u64 hw_csum_good;
248 u32 alloc_rx_buff_failed;
249 u32 gorc;
250 u64 gorc_old;
251 u16 rx_ps_hdr_size;
252 u32 max_frame_size; 275 u32 max_frame_size;
253 u32 min_frame_size; 276 u32 min_frame_size;
254 277
255 /* OS defined structs */ 278 /* OS defined structs */
256 struct net_device *netdev; 279 struct net_device *netdev;
257 struct napi_struct napi;
258 struct pci_dev *pdev; 280 struct pci_dev *pdev;
259 struct net_device_stats net_stats;
260 struct cyclecounter cycles; 281 struct cyclecounter cycles;
261 struct timecounter clock; 282 struct timecounter clock;
262 struct timecompare compare; 283 struct timecompare compare;
@@ -273,6 +294,9 @@ struct igb_adapter {
273 struct igb_ring test_rx_ring; 294 struct igb_ring test_rx_ring;
274 295
275 int msg_enable; 296 int msg_enable;
297
298 unsigned int num_q_vectors;
299 struct igb_q_vector *q_vector[MAX_Q_VECTORS];
276 struct msix_entry *msix_entries; 300 struct msix_entry *msix_entries;
277 u32 eims_enable_mask; 301 u32 eims_enable_mask;
278 u32 eims_other; 302 u32 eims_other;
@@ -283,18 +307,19 @@ struct igb_adapter {
283 u32 eeprom_wol; 307 u32 eeprom_wol;
284 308
285 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; 309 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
286 unsigned int tx_ring_count; 310 u16 tx_ring_count;
287 unsigned int rx_ring_count; 311 u16 rx_ring_count;
288 unsigned int vfs_allocated_count; 312 unsigned int vfs_allocated_count;
289 struct vf_data_storage *vf_data; 313 struct vf_data_storage *vf_data;
314 u32 rss_queues;
290}; 315};
291 316
292#define IGB_FLAG_HAS_MSI (1 << 0) 317#define IGB_FLAG_HAS_MSI (1 << 0)
293#define IGB_FLAG_DCA_ENABLED (1 << 1) 318#define IGB_FLAG_DCA_ENABLED (1 << 1)
294#define IGB_FLAG_QUAD_PORT_A (1 << 2) 319#define IGB_FLAG_QUAD_PORT_A (1 << 2)
295#define IGB_FLAG_NEED_CTX_IDX (1 << 3) 320#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
296#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4)
297 321
322#define IGB_82576_TSYNC_SHIFT 19
298enum e1000_state_t { 323enum e1000_state_t {
299 __IGB_TESTING, 324 __IGB_TESTING,
300 __IGB_RESETTING, 325 __IGB_RESETTING,
@@ -314,10 +339,18 @@ extern void igb_down(struct igb_adapter *);
314extern void igb_reinit_locked(struct igb_adapter *); 339extern void igb_reinit_locked(struct igb_adapter *);
315extern void igb_reset(struct igb_adapter *); 340extern void igb_reset(struct igb_adapter *);
316extern int igb_set_spd_dplx(struct igb_adapter *, u16); 341extern int igb_set_spd_dplx(struct igb_adapter *, u16);
317extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); 342extern int igb_setup_tx_resources(struct igb_ring *);
318extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); 343extern int igb_setup_rx_resources(struct igb_ring *);
319extern void igb_free_tx_resources(struct igb_ring *); 344extern void igb_free_tx_resources(struct igb_ring *);
320extern void igb_free_rx_resources(struct igb_ring *); 345extern void igb_free_rx_resources(struct igb_ring *);
346extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
347extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
348extern void igb_setup_tctl(struct igb_adapter *);
349extern void igb_setup_rctl(struct igb_adapter *);
350extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
351extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
352 struct igb_buffer *);
353extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
321extern void igb_update_stats(struct igb_adapter *); 354extern void igb_update_stats(struct igb_adapter *);
322extern void igb_set_ethtool_ops(struct net_device *); 355extern void igb_set_ethtool_ops(struct net_device *);
323 356
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index b243ed3b0c3..88e13f7e566 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -44,78 +44,94 @@ struct igb_stats {
44 int stat_offset; 44 int stat_offset;
45}; 45};
46 46
47#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ 47#define IGB_STAT(_name, _stat) { \
48 offsetof(struct igb_adapter, m) 48 .stat_string = _name, \
49 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
50 .stat_offset = offsetof(struct igb_adapter, _stat) \
51}
49static const struct igb_stats igb_gstrings_stats[] = { 52static const struct igb_stats igb_gstrings_stats[] = {
50 { "rx_packets", IGB_STAT(stats.gprc) }, 53 IGB_STAT("rx_packets", stats.gprc),
51 { "tx_packets", IGB_STAT(stats.gptc) }, 54 IGB_STAT("tx_packets", stats.gptc),
52 { "rx_bytes", IGB_STAT(stats.gorc) }, 55 IGB_STAT("rx_bytes", stats.gorc),
53 { "tx_bytes", IGB_STAT(stats.gotc) }, 56 IGB_STAT("tx_bytes", stats.gotc),
54 { "rx_broadcast", IGB_STAT(stats.bprc) }, 57 IGB_STAT("rx_broadcast", stats.bprc),
55 { "tx_broadcast", IGB_STAT(stats.bptc) }, 58 IGB_STAT("tx_broadcast", stats.bptc),
56 { "rx_multicast", IGB_STAT(stats.mprc) }, 59 IGB_STAT("rx_multicast", stats.mprc),
57 { "tx_multicast", IGB_STAT(stats.mptc) }, 60 IGB_STAT("tx_multicast", stats.mptc),
58 { "rx_errors", IGB_STAT(net_stats.rx_errors) }, 61 IGB_STAT("multicast", stats.mprc),
59 { "tx_errors", IGB_STAT(net_stats.tx_errors) }, 62 IGB_STAT("collisions", stats.colc),
60 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, 63 IGB_STAT("rx_crc_errors", stats.crcerrs),
61 { "multicast", IGB_STAT(stats.mprc) }, 64 IGB_STAT("rx_no_buffer_count", stats.rnbc),
62 { "collisions", IGB_STAT(stats.colc) }, 65 IGB_STAT("rx_missed_errors", stats.mpc),
63 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, 66 IGB_STAT("tx_aborted_errors", stats.ecol),
64 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, 67 IGB_STAT("tx_carrier_errors", stats.tncrs),
65 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 68 IGB_STAT("tx_window_errors", stats.latecol),
66 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, 69 IGB_STAT("tx_abort_late_coll", stats.latecol),
67 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 70 IGB_STAT("tx_deferred_ok", stats.dc),
68 { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, 71 IGB_STAT("tx_single_coll_ok", stats.scc),
69 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 72 IGB_STAT("tx_multi_coll_ok", stats.mcc),
70 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 73 IGB_STAT("tx_timeout_count", tx_timeout_count),
71 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 74 IGB_STAT("rx_long_length_errors", stats.roc),
72 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, 75 IGB_STAT("rx_short_length_errors", stats.ruc),
73 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, 76 IGB_STAT("rx_align_errors", stats.algnerrc),
74 { "tx_window_errors", IGB_STAT(stats.latecol) }, 77 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
75 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 78 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
76 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 79 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
77 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 80 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
78 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 81 IGB_STAT("tx_flow_control_xon", stats.xontxc),
79 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 82 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
80 { "tx_restart_queue", IGB_STAT(restart_queue) }, 83 IGB_STAT("rx_long_byte_count", stats.gorc),
81 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 84 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
82 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 85 IGB_STAT("tx_smbus", stats.mgptc),
83 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 86 IGB_STAT("rx_smbus", stats.mgprc),
84 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 87 IGB_STAT("dropped_smbus", stats.mgpdc),
85 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 88};
86 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 89
87 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 90#define IGB_NETDEV_STAT(_net_stat) { \
88 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 91 .stat_string = __stringify(_net_stat), \
89 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 92 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
90 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 93 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
91 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 94}
92 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 95static const struct igb_stats igb_gstrings_net_stats[] = {
93 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 96 IGB_NETDEV_STAT(rx_errors),
94 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 97 IGB_NETDEV_STAT(tx_errors),
95 { "tx_smbus", IGB_STAT(stats.mgptc) }, 98 IGB_NETDEV_STAT(tx_dropped),
96 { "rx_smbus", IGB_STAT(stats.mgprc) }, 99 IGB_NETDEV_STAT(rx_length_errors),
97 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 100 IGB_NETDEV_STAT(rx_over_errors),
101 IGB_NETDEV_STAT(rx_frame_errors),
102 IGB_NETDEV_STAT(rx_fifo_errors),
103 IGB_NETDEV_STAT(tx_fifo_errors),
104 IGB_NETDEV_STAT(tx_heartbeat_errors)
98}; 105};
99 106
100#define IGB_QUEUE_STATS_LEN \
101 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
102 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
103 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
104 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
105#define IGB_GLOBAL_STATS_LEN \ 107#define IGB_GLOBAL_STATS_LEN \
106 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 108 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
107#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 109#define IGB_NETDEV_STATS_LEN \
110 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
111#define IGB_RX_QUEUE_STATS_LEN \
112 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
113#define IGB_TX_QUEUE_STATS_LEN \
114 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
115#define IGB_QUEUE_STATS_LEN \
116 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
117 IGB_RX_QUEUE_STATS_LEN) + \
118 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
119 IGB_TX_QUEUE_STATS_LEN))
120#define IGB_STATS_LEN \
121 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
122
108static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 123static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
109 "Register test (offline)", "Eeprom test (offline)", 124 "Register test (offline)", "Eeprom test (offline)",
110 "Interrupt test (offline)", "Loopback test (offline)", 125 "Interrupt test (offline)", "Loopback test (offline)",
111 "Link test (on/offline)" 126 "Link test (on/offline)"
112}; 127};
113#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 128#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
114 129
115static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 130static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
116{ 131{
117 struct igb_adapter *adapter = netdev_priv(netdev); 132 struct igb_adapter *adapter = netdev_priv(netdev);
118 struct e1000_hw *hw = &adapter->hw; 133 struct e1000_hw *hw = &adapter->hw;
134 u32 status;
119 135
120 if (hw->phy.media_type == e1000_media_type_copper) { 136 if (hw->phy.media_type == e1000_media_type_copper) {
121 137
@@ -150,17 +166,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
150 166
151 ecmd->transceiver = XCVR_INTERNAL; 167 ecmd->transceiver = XCVR_INTERNAL;
152 168
153 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 169 status = rd32(E1000_STATUS);
154 170
155 adapter->hw.mac.ops.get_speed_and_duplex(hw, 171 if (status & E1000_STATUS_LU) {
156 &adapter->link_speed,
157 &adapter->link_duplex);
158 ecmd->speed = adapter->link_speed;
159 172
160 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 173 if ((status & E1000_STATUS_SPEED_1000) ||
161 * and HALF_DUPLEX != DUPLEX_HALF */ 174 hw->phy.media_type != e1000_media_type_copper)
175 ecmd->speed = SPEED_1000;
176 else if (status & E1000_STATUS_SPEED_100)
177 ecmd->speed = SPEED_100;
178 else
179 ecmd->speed = SPEED_10;
162 180
163 if (adapter->link_duplex == FULL_DUPLEX) 181 if ((status & E1000_STATUS_FD) ||
182 hw->phy.media_type != e1000_media_type_copper)
164 ecmd->duplex = DUPLEX_FULL; 183 ecmd->duplex = DUPLEX_FULL;
165 else 184 else
166 ecmd->duplex = DUPLEX_HALF; 185 ecmd->duplex = DUPLEX_HALF;
@@ -251,8 +270,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
251 if (netif_running(adapter->netdev)) { 270 if (netif_running(adapter->netdev)) {
252 igb_down(adapter); 271 igb_down(adapter);
253 igb_up(adapter); 272 igb_up(adapter);
254 } else 273 } else {
255 igb_reset(adapter); 274 igb_reset(adapter);
275 }
256 } else { 276 } else {
257 if (pause->rx_pause && pause->tx_pause) 277 if (pause->rx_pause && pause->tx_pause)
258 hw->fc.requested_mode = e1000_fc_full; 278 hw->fc.requested_mode = e1000_fc_full;
@@ -276,17 +296,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
276static u32 igb_get_rx_csum(struct net_device *netdev) 296static u32 igb_get_rx_csum(struct net_device *netdev)
277{ 297{
278 struct igb_adapter *adapter = netdev_priv(netdev); 298 struct igb_adapter *adapter = netdev_priv(netdev);
279 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); 299 return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
280} 300}
281 301
282static int igb_set_rx_csum(struct net_device *netdev, u32 data) 302static int igb_set_rx_csum(struct net_device *netdev, u32 data)
283{ 303{
284 struct igb_adapter *adapter = netdev_priv(netdev); 304 struct igb_adapter *adapter = netdev_priv(netdev);
305 int i;
285 306
286 if (data) 307 for (i = 0; i < adapter->num_rx_queues; i++) {
287 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; 308 if (data)
288 else 309 adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
289 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; 310 else
311 adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
312 }
290 313
291 return 0; 314 return 0;
292} 315}
@@ -302,7 +325,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data)
302 325
303 if (data) { 326 if (data) {
304 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 327 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
305 if (adapter->hw.mac.type == e1000_82576) 328 if (adapter->hw.mac.type >= e1000_82576)
306 netdev->features |= NETIF_F_SCTP_CSUM; 329 netdev->features |= NETIF_F_SCTP_CSUM;
307 } else { 330 } else {
308 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 331 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -496,19 +519,10 @@ static void igb_get_regs(struct net_device *netdev,
496 regs_buff[119] = adapter->stats.scvpc; 519 regs_buff[119] = adapter->stats.scvpc;
497 regs_buff[120] = adapter->stats.hrmpc; 520 regs_buff[120] = adapter->stats.hrmpc;
498 521
499 /* These should probably be added to e1000_regs.h instead */
500 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
501 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
502 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
503 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
504 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
505 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
506 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
507
508 for (i = 0; i < 4; i++) 522 for (i = 0; i < 4; i++)
509 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 523 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
510 for (i = 0; i < 4; i++) 524 for (i = 0; i < 4; i++)
511 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 525 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
512 for (i = 0; i < 4; i++) 526 for (i = 0; i < 4; i++)
513 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 527 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
514 for (i = 0; i < 4; i++) 528 for (i = 0; i < 4; i++)
@@ -733,17 +747,17 @@ static int igb_set_ringparam(struct net_device *netdev,
733 struct igb_adapter *adapter = netdev_priv(netdev); 747 struct igb_adapter *adapter = netdev_priv(netdev);
734 struct igb_ring *temp_ring; 748 struct igb_ring *temp_ring;
735 int i, err = 0; 749 int i, err = 0;
736 u32 new_rx_count, new_tx_count; 750 u16 new_rx_count, new_tx_count;
737 751
738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 752 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
739 return -EINVAL; 753 return -EINVAL;
740 754
741 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 755 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
742 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 756 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
743 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 757 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
744 758
745 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 759 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
746 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 760 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
747 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 761 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
748 762
749 if ((new_tx_count == adapter->tx_ring_count) && 763 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -788,7 +802,7 @@ static int igb_set_ringparam(struct net_device *netdev,
788 802
789 for (i = 0; i < adapter->num_tx_queues; i++) { 803 for (i = 0; i < adapter->num_tx_queues; i++) {
790 temp_ring[i].count = new_tx_count; 804 temp_ring[i].count = new_tx_count;
791 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 805 err = igb_setup_tx_resources(&temp_ring[i]);
792 if (err) { 806 if (err) {
793 while (i) { 807 while (i) {
794 i--; 808 i--;
@@ -813,7 +827,7 @@ static int igb_set_ringparam(struct net_device *netdev,
813 827
814 for (i = 0; i < adapter->num_rx_queues; i++) { 828 for (i = 0; i < adapter->num_rx_queues; i++) {
815 temp_ring[i].count = new_rx_count; 829 temp_ring[i].count = new_rx_count;
816 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 830 err = igb_setup_rx_resources(&temp_ring[i]);
817 if (err) { 831 if (err) {
818 while (i) { 832 while (i) {
819 i--; 833 i--;
@@ -944,7 +958,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
944{ 958{
945 struct e1000_hw *hw = &adapter->hw; 959 struct e1000_hw *hw = &adapter->hw;
946 u32 pat, val; 960 u32 pat, val;
947 u32 _test[] = 961 static const u32 _test[] =
948 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 962 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
949 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 963 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
950 wr32(reg, (_test[pat] & write)); 964 wr32(reg, (_test[pat] & write));
@@ -957,6 +971,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
957 return 1; 971 return 1;
958 } 972 }
959 } 973 }
974
960 return 0; 975 return 0;
961} 976}
962 977
@@ -974,6 +989,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
974 *data = reg; 989 *data = reg;
975 return 1; 990 return 1;
976 } 991 }
992
977 return 0; 993 return 0;
978} 994}
979 995
@@ -996,14 +1012,14 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
996 u32 value, before, after; 1012 u32 value, before, after;
997 u32 i, toggle; 1013 u32 i, toggle;
998 1014
999 toggle = 0x7FFFF3FF;
1000
1001 switch (adapter->hw.mac.type) { 1015 switch (adapter->hw.mac.type) {
1002 case e1000_82576: 1016 case e1000_82576:
1003 test = reg_test_82576; 1017 test = reg_test_82576;
1018 toggle = 0x7FFFF3FF;
1004 break; 1019 break;
1005 default: 1020 default:
1006 test = reg_test_82575; 1021 test = reg_test_82575;
1022 toggle = 0x7FFFF3FF;
1007 break; 1023 break;
1008 } 1024 }
1009 1025
@@ -1081,8 +1097,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1081 *data = 0; 1097 *data = 0;
1082 /* Read and add up the contents of the EEPROM */ 1098 /* Read and add up the contents of the EEPROM */
1083 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1099 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1084 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) 1100 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1085 < 0) {
1086 *data = 1; 1101 *data = 1;
1087 break; 1102 break;
1088 } 1103 }
@@ -1098,8 +1113,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1098 1113
1099static irqreturn_t igb_test_intr(int irq, void *data) 1114static irqreturn_t igb_test_intr(int irq, void *data)
1100{ 1115{
1101 struct net_device *netdev = (struct net_device *) data; 1116 struct igb_adapter *adapter = (struct igb_adapter *) data;
1102 struct igb_adapter *adapter = netdev_priv(netdev);
1103 struct e1000_hw *hw = &adapter->hw; 1117 struct e1000_hw *hw = &adapter->hw;
1104 1118
1105 adapter->test_icr |= rd32(E1000_ICR); 1119 adapter->test_icr |= rd32(E1000_ICR);
@@ -1117,32 +1131,36 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1117 *data = 0; 1131 *data = 0;
1118 1132
1119 /* Hook up test interrupt handler just for this test */ 1133 /* Hook up test interrupt handler just for this test */
1120 if (adapter->msix_entries) 1134 if (adapter->msix_entries) {
1121 /* NOTE: we don't test MSI-X interrupts here, yet */ 1135 if (request_irq(adapter->msix_entries[0].vector,
1122 return 0; 1136 &igb_test_intr, 0, netdev->name, adapter)) {
1123 1137 *data = 1;
1124 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1138 return -1;
1139 }
1140 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1125 shared_int = false; 1141 shared_int = false;
1126 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1142 if (request_irq(irq,
1143 &igb_test_intr, 0, netdev->name, adapter)) {
1127 *data = 1; 1144 *data = 1;
1128 return -1; 1145 return -1;
1129 } 1146 }
1130 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1147 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
1131 netdev->name, netdev)) { 1148 netdev->name, adapter)) {
1132 shared_int = false; 1149 shared_int = false;
1133 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1150 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1134 netdev->name, netdev)) { 1151 netdev->name, adapter)) {
1135 *data = 1; 1152 *data = 1;
1136 return -1; 1153 return -1;
1137 } 1154 }
1138 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1155 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1139 (shared_int ? "shared" : "unshared")); 1156 (shared_int ? "shared" : "unshared"));
1157
1140 /* Disable all the interrupts */ 1158 /* Disable all the interrupts */
1141 wr32(E1000_IMC, 0xFFFFFFFF); 1159 wr32(E1000_IMC, ~0);
1142 msleep(10); 1160 msleep(10);
1143 1161
1144 /* Define all writable bits for ICS */ 1162 /* Define all writable bits for ICS */
1145 switch(hw->mac.type) { 1163 switch (hw->mac.type) {
1146 case e1000_82575: 1164 case e1000_82575:
1147 ics_mask = 0x37F47EDD; 1165 ics_mask = 0x37F47EDD;
1148 break; 1166 break;
@@ -1232,190 +1250,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1232 msleep(10); 1250 msleep(10);
1233 1251
1234 /* Unhook test interrupt handler */ 1252 /* Unhook test interrupt handler */
1235 free_irq(irq, netdev); 1253 if (adapter->msix_entries)
1254 free_irq(adapter->msix_entries[0].vector, adapter);
1255 else
1256 free_irq(irq, adapter);
1236 1257
1237 return *data; 1258 return *data;
1238} 1259}
1239 1260
1240static void igb_free_desc_rings(struct igb_adapter *adapter) 1261static void igb_free_desc_rings(struct igb_adapter *adapter)
1241{ 1262{
1242 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1263 igb_free_tx_resources(&adapter->test_tx_ring);
1243 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1264 igb_free_rx_resources(&adapter->test_rx_ring);
1244 struct pci_dev *pdev = adapter->pdev;
1245 int i;
1246
1247 if (tx_ring->desc && tx_ring->buffer_info) {
1248 for (i = 0; i < tx_ring->count; i++) {
1249 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1250 if (buf->dma)
1251 pci_unmap_single(pdev, buf->dma, buf->length,
1252 PCI_DMA_TODEVICE);
1253 if (buf->skb)
1254 dev_kfree_skb(buf->skb);
1255 }
1256 }
1257
1258 if (rx_ring->desc && rx_ring->buffer_info) {
1259 for (i = 0; i < rx_ring->count; i++) {
1260 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1261 if (buf->dma)
1262 pci_unmap_single(pdev, buf->dma,
1263 IGB_RXBUFFER_2048,
1264 PCI_DMA_FROMDEVICE);
1265 if (buf->skb)
1266 dev_kfree_skb(buf->skb);
1267 }
1268 }
1269
1270 if (tx_ring->desc) {
1271 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1272 tx_ring->dma);
1273 tx_ring->desc = NULL;
1274 }
1275 if (rx_ring->desc) {
1276 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1277 rx_ring->dma);
1278 rx_ring->desc = NULL;
1279 }
1280
1281 kfree(tx_ring->buffer_info);
1282 tx_ring->buffer_info = NULL;
1283 kfree(rx_ring->buffer_info);
1284 rx_ring->buffer_info = NULL;
1285
1286 return;
1287} 1265}
1288 1266
1289static int igb_setup_desc_rings(struct igb_adapter *adapter) 1267static int igb_setup_desc_rings(struct igb_adapter *adapter)
1290{ 1268{
1291 struct e1000_hw *hw = &adapter->hw;
1292 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1269 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1293 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1270 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1294 struct pci_dev *pdev = adapter->pdev; 1271 struct e1000_hw *hw = &adapter->hw;
1295 struct igb_buffer *buffer_info; 1272 int ret_val;
1296 u32 rctl;
1297 int i, ret_val;
1298 1273
1299 /* Setup Tx descriptor ring and Tx buffers */ 1274 /* Setup Tx descriptor ring and Tx buffers */
1275 tx_ring->count = IGB_DEFAULT_TXD;
1276 tx_ring->pdev = adapter->pdev;
1277 tx_ring->netdev = adapter->netdev;
1278 tx_ring->reg_idx = adapter->vfs_allocated_count;
1300 1279
1301 if (!tx_ring->count) 1280 if (igb_setup_tx_resources(tx_ring)) {
1302 tx_ring->count = IGB_DEFAULT_TXD;
1303
1304 tx_ring->buffer_info = kcalloc(tx_ring->count,
1305 sizeof(struct igb_buffer),
1306 GFP_KERNEL);
1307 if (!tx_ring->buffer_info) {
1308 ret_val = 1; 1281 ret_val = 1;
1309 goto err_nomem; 1282 goto err_nomem;
1310 } 1283 }
1311 1284
1312 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1285 igb_setup_tctl(adapter);
1313 tx_ring->size = ALIGN(tx_ring->size, 4096); 1286 igb_configure_tx_ring(adapter, tx_ring);
1314 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1315 &tx_ring->dma);
1316 if (!tx_ring->desc) {
1317 ret_val = 2;
1318 goto err_nomem;
1319 }
1320 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1321
1322 wr32(E1000_TDBAL(0),
1323 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1324 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1325 wr32(E1000_TDLEN(0),
1326 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1327 wr32(E1000_TDH(0), 0);
1328 wr32(E1000_TDT(0), 0);
1329 wr32(E1000_TCTL,
1330 E1000_TCTL_PSP | E1000_TCTL_EN |
1331 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1332 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1333
1334 for (i = 0; i < tx_ring->count; i++) {
1335 union e1000_adv_tx_desc *tx_desc;
1336 struct sk_buff *skb;
1337 unsigned int size = 1024;
1338
1339 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1340 skb = alloc_skb(size, GFP_KERNEL);
1341 if (!skb) {
1342 ret_val = 3;
1343 goto err_nomem;
1344 }
1345 skb_put(skb, size);
1346 buffer_info = &tx_ring->buffer_info[i];
1347 buffer_info->skb = skb;
1348 buffer_info->length = skb->len;
1349 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1350 PCI_DMA_TODEVICE);
1351 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1352 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1353 E1000_ADVTXD_PAYLEN_SHIFT;
1354 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1355 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1356 E1000_TXD_CMD_IFCS |
1357 E1000_TXD_CMD_RS |
1358 E1000_ADVTXD_DTYP_DATA |
1359 E1000_ADVTXD_DCMD_DEXT);
1360 }
1361 1287
1362 /* Setup Rx descriptor ring and Rx buffers */ 1288 /* Setup Rx descriptor ring and Rx buffers */
1363 1289 rx_ring->count = IGB_DEFAULT_RXD;
1364 if (!rx_ring->count) 1290 rx_ring->pdev = adapter->pdev;
1365 rx_ring->count = IGB_DEFAULT_RXD; 1291 rx_ring->netdev = adapter->netdev;
1366 1292 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1367 rx_ring->buffer_info = kcalloc(rx_ring->count, 1293 rx_ring->reg_idx = adapter->vfs_allocated_count;
1368 sizeof(struct igb_buffer), 1294
1369 GFP_KERNEL); 1295 if (igb_setup_rx_resources(rx_ring)) {
1370 if (!rx_ring->buffer_info) { 1296 ret_val = 3;
1371 ret_val = 4;
1372 goto err_nomem; 1297 goto err_nomem;
1373 } 1298 }
1374 1299
1375 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1300 /* set the default queue to queue 0 of PF */
1376 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1301 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1377 &rx_ring->dma);
1378 if (!rx_ring->desc) {
1379 ret_val = 5;
1380 goto err_nomem;
1381 }
1382 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1383 1302
1384 rctl = rd32(E1000_RCTL); 1303 /* enable receive ring */
1385 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1304 igb_setup_rctl(adapter);
1386 wr32(E1000_RDBAL(0), 1305 igb_configure_rx_ring(adapter, rx_ring);
1387 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1306
1388 wr32(E1000_RDBAH(0), 1307 igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
1389 ((u64) rx_ring->dma >> 32));
1390 wr32(E1000_RDLEN(0), rx_ring->size);
1391 wr32(E1000_RDH(0), 0);
1392 wr32(E1000_RDT(0), 0);
1393 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1394 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1395 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1396 wr32(E1000_RCTL, rctl);
1397 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1398
1399 for (i = 0; i < rx_ring->count; i++) {
1400 union e1000_adv_rx_desc *rx_desc;
1401 struct sk_buff *skb;
1402
1403 buffer_info = &rx_ring->buffer_info[i];
1404 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1405 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1406 GFP_KERNEL);
1407 if (!skb) {
1408 ret_val = 6;
1409 goto err_nomem;
1410 }
1411 skb_reserve(skb, NET_IP_ALIGN);
1412 buffer_info->skb = skb;
1413 buffer_info->dma = pci_map_single(pdev, skb->data,
1414 IGB_RXBUFFER_2048,
1415 PCI_DMA_FROMDEVICE);
1416 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1417 memset(skb->data, 0x00, skb->len);
1418 }
1419 1308
1420 return 0; 1309 return 0;
1421 1310
@@ -1491,7 +1380,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1491 struct e1000_hw *hw = &adapter->hw; 1380 struct e1000_hw *hw = &adapter->hw;
1492 u32 reg; 1381 u32 reg;
1493 1382
1494 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1383 reg = rd32(E1000_CTRL_EXT);
1384
1385 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1386 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1495 reg = rd32(E1000_RCTL); 1387 reg = rd32(E1000_RCTL);
1496 reg |= E1000_RCTL_LBM_TCVR; 1388 reg |= E1000_RCTL_LBM_TCVR;
1497 wr32(E1000_RCTL, reg); 1389 wr32(E1000_RCTL, reg);
@@ -1522,11 +1414,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1522 wr32(E1000_PCS_LCTL, reg); 1414 wr32(E1000_PCS_LCTL, reg);
1523 1415
1524 return 0; 1416 return 0;
1525 } else if (hw->phy.media_type == e1000_media_type_copper) {
1526 return igb_set_phy_loopback(adapter);
1527 } 1417 }
1528 1418
1529 return 7; 1419 return igb_set_phy_loopback(adapter);
1530} 1420}
1531 1421
1532static void igb_loopback_cleanup(struct igb_adapter *adapter) 1422static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1552,35 +1442,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1552 unsigned int frame_size) 1442 unsigned int frame_size)
1553{ 1443{
1554 memset(skb->data, 0xFF, frame_size); 1444 memset(skb->data, 0xFF, frame_size);
1555 frame_size &= ~1; 1445 frame_size /= 2;
1556 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1446 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1557 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1447 memset(&skb->data[frame_size + 10], 0xBE, 1);
1558 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1448 memset(&skb->data[frame_size + 12], 0xAF, 1);
1559} 1449}
1560 1450
1561static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1451static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1562{ 1452{
1563 frame_size &= ~1; 1453 frame_size /= 2;
1564 if (*(skb->data + 3) == 0xFF) 1454 if (*(skb->data + 3) == 0xFF) {
1565 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1455 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1566 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1456 (*(skb->data + frame_size + 12) == 0xAF)) {
1567 return 0; 1457 return 0;
1458 }
1459 }
1568 return 13; 1460 return 13;
1569} 1461}
1570 1462
1463static int igb_clean_test_rings(struct igb_ring *rx_ring,
1464 struct igb_ring *tx_ring,
1465 unsigned int size)
1466{
1467 union e1000_adv_rx_desc *rx_desc;
1468 struct igb_buffer *buffer_info;
1469 int rx_ntc, tx_ntc, count = 0;
1470 u32 staterr;
1471
1472 /* initialize next to clean and descriptor values */
1473 rx_ntc = rx_ring->next_to_clean;
1474 tx_ntc = tx_ring->next_to_clean;
1475 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1476 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1477
1478 while (staterr & E1000_RXD_STAT_DD) {
1479 /* check rx buffer */
1480 buffer_info = &rx_ring->buffer_info[rx_ntc];
1481
1482 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1483 pci_unmap_single(rx_ring->pdev,
1484 buffer_info->dma,
1485 rx_ring->rx_buffer_len,
1486 PCI_DMA_FROMDEVICE);
1487 buffer_info->dma = 0;
1488
1489 /* verify contents of skb */
1490 if (!igb_check_lbtest_frame(buffer_info->skb, size))
1491 count++;
1492
1493 /* unmap buffer on tx side */
1494 buffer_info = &tx_ring->buffer_info[tx_ntc];
1495 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
1496
1497 /* increment rx/tx next to clean counters */
1498 rx_ntc++;
1499 if (rx_ntc == rx_ring->count)
1500 rx_ntc = 0;
1501 tx_ntc++;
1502 if (tx_ntc == tx_ring->count)
1503 tx_ntc = 0;
1504
1505 /* fetch next descriptor */
1506 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1507 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1508 }
1509
1510 /* re-map buffers to ring, store next to clean values */
1511 igb_alloc_rx_buffers_adv(rx_ring, count);
1512 rx_ring->next_to_clean = rx_ntc;
1513 tx_ring->next_to_clean = tx_ntc;
1514
1515 return count;
1516}
1517
1571static int igb_run_loopback_test(struct igb_adapter *adapter) 1518static int igb_run_loopback_test(struct igb_adapter *adapter)
1572{ 1519{
1573 struct e1000_hw *hw = &adapter->hw;
1574 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1520 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1575 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1521 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1576 struct pci_dev *pdev = adapter->pdev; 1522 int i, j, lc, good_cnt, ret_val = 0;
1577 int i, j, k, l, lc, good_cnt; 1523 unsigned int size = 1024;
1578 int ret_val = 0; 1524 netdev_tx_t tx_ret_val;
1579 unsigned long time; 1525 struct sk_buff *skb;
1580 1526
1581 wr32(E1000_RDT(0), rx_ring->count - 1); 1527 /* allocate test skb */
1528 skb = alloc_skb(size, GFP_KERNEL);
1529 if (!skb)
1530 return 11;
1582 1531
1583 /* Calculate the loop count based on the largest descriptor ring 1532 /* place data into test skb */
1533 igb_create_lbtest_frame(skb, size);
1534 skb_put(skb, size);
1535
1536 /*
1537 * Calculate the loop count based on the largest descriptor ring
1584 * The idea is to wrap the largest ring a number of times using 64 1538 * The idea is to wrap the largest ring a number of times using 64
1585 * send/receive pairs during each loop 1539 * send/receive pairs during each loop
1586 */ 1540 */
@@ -1590,50 +1544,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1590 else 1544 else
1591 lc = ((rx_ring->count / 64) * 2) + 1; 1545 lc = ((rx_ring->count / 64) * 2) + 1;
1592 1546
1593 k = l = 0;
1594 for (j = 0; j <= lc; j++) { /* loop count loop */ 1547 for (j = 0; j <= lc; j++) { /* loop count loop */
1595 for (i = 0; i < 64; i++) { /* send the packets */ 1548 /* reset count of good packets */
1596 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1597 1024);
1598 pci_dma_sync_single_for_device(pdev,
1599 tx_ring->buffer_info[k].dma,
1600 tx_ring->buffer_info[k].length,
1601 PCI_DMA_TODEVICE);
1602 k++;
1603 if (k == tx_ring->count)
1604 k = 0;
1605 }
1606 wr32(E1000_TDT(0), k);
1607 msleep(200);
1608 time = jiffies; /* set the start time for the receive */
1609 good_cnt = 0; 1549 good_cnt = 0;
1610 do { /* receive the sent packets */ 1550
1611 pci_dma_sync_single_for_cpu(pdev, 1551 /* place 64 packets on the transmit queue*/
1612 rx_ring->buffer_info[l].dma, 1552 for (i = 0; i < 64; i++) {
1613 IGB_RXBUFFER_2048, 1553 skb_get(skb);
1614 PCI_DMA_FROMDEVICE); 1554 tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
1615 1555 if (tx_ret_val == NETDEV_TX_OK)
1616 ret_val = igb_check_lbtest_frame(
1617 rx_ring->buffer_info[l].skb, 1024);
1618 if (!ret_val)
1619 good_cnt++; 1556 good_cnt++;
1620 l++; 1557 }
1621 if (l == rx_ring->count) 1558
1622 l = 0;
1623 /* time + 20 msecs (200 msecs on 2.4) is more than
1624 * enough time to complete the receives, if it's
1625 * exceeded, break and error off
1626 */
1627 } while (good_cnt < 64 && jiffies < (time + 20));
1628 if (good_cnt != 64) { 1559 if (good_cnt != 64) {
1629 ret_val = 13; /* ret_val is the same as mis-compare */ 1560 ret_val = 12;
1630 break; 1561 break;
1631 } 1562 }
1632 if (jiffies >= (time + 20)) { 1563
1633 ret_val = 14; /* error code for time out error */ 1564 /* allow 200 milliseconds for packets to go from tx to rx */
1565 msleep(200);
1566
1567 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1568 if (good_cnt != 64) {
1569 ret_val = 13;
1634 break; 1570 break;
1635 } 1571 }
1636 } /* end loop count loop */ 1572 } /* end loop count loop */
1573
1574 /* free the original skb */
1575 kfree_skb(skb);
1576
1637 return ret_val; 1577 return ret_val;
1638} 1578}
1639 1579
@@ -1686,8 +1626,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1686 if (hw->mac.autoneg) 1626 if (hw->mac.autoneg)
1687 msleep(4000); 1627 msleep(4000);
1688 1628
1689 if (!(rd32(E1000_STATUS) & 1629 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1690 E1000_STATUS_LU))
1691 *data = 1; 1630 *data = 1;
1692 } 1631 }
1693 return *data; 1632 return *data;
@@ -1869,7 +1808,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1869 adapter->wol |= E1000_WUFC_BC; 1808 adapter->wol |= E1000_WUFC_BC;
1870 if (wol->wolopts & WAKE_MAGIC) 1809 if (wol->wolopts & WAKE_MAGIC)
1871 adapter->wol |= E1000_WUFC_MAG; 1810 adapter->wol |= E1000_WUFC_MAG;
1872
1873 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1811 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1874 1812
1875 return 0; 1813 return 0;
@@ -1882,12 +1820,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
1882{ 1820{
1883 struct igb_adapter *adapter = netdev_priv(netdev); 1821 struct igb_adapter *adapter = netdev_priv(netdev);
1884 struct e1000_hw *hw = &adapter->hw; 1822 struct e1000_hw *hw = &adapter->hw;
1823 unsigned long timeout;
1885 1824
1886 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1825 timeout = data * 1000;
1887 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1826
1827 /*
1828 * msleep_interruptable only accepts unsigned int so we are limited
1829 * in how long a duration we can wait
1830 */
1831 if (!timeout || timeout > UINT_MAX)
1832 timeout = UINT_MAX;
1888 1833
1889 igb_blink_led(hw); 1834 igb_blink_led(hw);
1890 msleep_interruptible(data * 1000); 1835 msleep_interruptible(timeout);
1891 1836
1892 igb_led_off(hw); 1837 igb_led_off(hw);
1893 clear_bit(IGB_LED_ON, &adapter->led_status); 1838 clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1900,7 +1845,6 @@ static int igb_set_coalesce(struct net_device *netdev,
1900 struct ethtool_coalesce *ec) 1845 struct ethtool_coalesce *ec)
1901{ 1846{
1902 struct igb_adapter *adapter = netdev_priv(netdev); 1847 struct igb_adapter *adapter = netdev_priv(netdev);
1903 struct e1000_hw *hw = &adapter->hw;
1904 int i; 1848 int i;
1905 1849
1906 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1850 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1909,17 +1853,39 @@ static int igb_set_coalesce(struct net_device *netdev,
1909 (ec->rx_coalesce_usecs == 2)) 1853 (ec->rx_coalesce_usecs == 2))
1910 return -EINVAL; 1854 return -EINVAL;
1911 1855
1856 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1857 ((ec->tx_coalesce_usecs > 3) &&
1858 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1859 (ec->tx_coalesce_usecs == 2))
1860 return -EINVAL;
1861
1862 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1863 return -EINVAL;
1864
1912 /* convert to rate of irq's per second */ 1865 /* convert to rate of irq's per second */
1913 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1866 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1914 adapter->itr_setting = ec->rx_coalesce_usecs; 1867 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1915 adapter->itr = IGB_START_ITR; 1868 else
1916 } else { 1869 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1917 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1918 adapter->itr = adapter->itr_setting;
1919 }
1920 1870
1921 for (i = 0; i < adapter->num_rx_queues; i++) 1871 /* convert to rate of irq's per second */
1922 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1872 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1873 adapter->tx_itr_setting = adapter->rx_itr_setting;
1874 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1875 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1876 else
1877 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1878
1879 for (i = 0; i < adapter->num_q_vectors; i++) {
1880 struct igb_q_vector *q_vector = adapter->q_vector[i];
1881 if (q_vector->rx_ring)
1882 q_vector->itr_val = adapter->rx_itr_setting;
1883 else
1884 q_vector->itr_val = adapter->tx_itr_setting;
1885 if (q_vector->itr_val && q_vector->itr_val <= 3)
1886 q_vector->itr_val = IGB_START_ITR;
1887 q_vector->set_itr = 1;
1888 }
1923 1889
1924 return 0; 1890 return 0;
1925} 1891}
@@ -1929,15 +1895,21 @@ static int igb_get_coalesce(struct net_device *netdev,
1929{ 1895{
1930 struct igb_adapter *adapter = netdev_priv(netdev); 1896 struct igb_adapter *adapter = netdev_priv(netdev);
1931 1897
1932 if (adapter->itr_setting <= 3) 1898 if (adapter->rx_itr_setting <= 3)
1933 ec->rx_coalesce_usecs = adapter->itr_setting; 1899 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1934 else 1900 else
1935 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1901 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1902
1903 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
1904 if (adapter->tx_itr_setting <= 3)
1905 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1906 else
1907 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1908 }
1936 1909
1937 return 0; 1910 return 0;
1938} 1911}
1939 1912
1940
1941static int igb_nway_reset(struct net_device *netdev) 1913static int igb_nway_reset(struct net_device *netdev)
1942{ 1914{
1943 struct igb_adapter *adapter = netdev_priv(netdev); 1915 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1962,31 +1934,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1962 struct ethtool_stats *stats, u64 *data) 1934 struct ethtool_stats *stats, u64 *data)
1963{ 1935{
1964 struct igb_adapter *adapter = netdev_priv(netdev); 1936 struct igb_adapter *adapter = netdev_priv(netdev);
1937 struct net_device_stats *net_stats = &netdev->stats;
1965 u64 *queue_stat; 1938 u64 *queue_stat;
1966 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 1939 int i, j, k;
1967 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 1940 char *p;
1968 int j;
1969 int i;
1970 1941
1971 igb_update_stats(adapter); 1942 igb_update_stats(adapter);
1943
1972 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1944 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1973 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; 1945 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1974 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1946 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1975 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1947 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1976 } 1948 }
1949 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
1950 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
1951 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
1952 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1953 }
1977 for (j = 0; j < adapter->num_tx_queues; j++) { 1954 for (j = 0; j < adapter->num_tx_queues; j++) {
1978 int k;
1979 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 1955 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1980 for (k = 0; k < stat_count_tx; k++) 1956 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
1981 data[i + k] = queue_stat[k]; 1957 data[i] = queue_stat[k];
1982 i += k;
1983 } 1958 }
1984 for (j = 0; j < adapter->num_rx_queues; j++) { 1959 for (j = 0; j < adapter->num_rx_queues; j++) {
1985 int k;
1986 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1960 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1987 for (k = 0; k < stat_count_rx; k++) 1961 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
1988 data[i + k] = queue_stat[k]; 1962 data[i] = queue_stat[k];
1989 i += k;
1990 } 1963 }
1991} 1964}
1992 1965
@@ -2007,11 +1980,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2007 ETH_GSTRING_LEN); 1980 ETH_GSTRING_LEN);
2008 p += ETH_GSTRING_LEN; 1981 p += ETH_GSTRING_LEN;
2009 } 1982 }
1983 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
1984 memcpy(p, igb_gstrings_net_stats[i].stat_string,
1985 ETH_GSTRING_LEN);
1986 p += ETH_GSTRING_LEN;
1987 }
2010 for (i = 0; i < adapter->num_tx_queues; i++) { 1988 for (i = 0; i < adapter->num_tx_queues; i++) {
2011 sprintf(p, "tx_queue_%u_packets", i); 1989 sprintf(p, "tx_queue_%u_packets", i);
2012 p += ETH_GSTRING_LEN; 1990 p += ETH_GSTRING_LEN;
2013 sprintf(p, "tx_queue_%u_bytes", i); 1991 sprintf(p, "tx_queue_%u_bytes", i);
2014 p += ETH_GSTRING_LEN; 1992 p += ETH_GSTRING_LEN;
1993 sprintf(p, "tx_queue_%u_restart", i);
1994 p += ETH_GSTRING_LEN;
2015 } 1995 }
2016 for (i = 0; i < adapter->num_rx_queues; i++) { 1996 for (i = 0; i < adapter->num_rx_queues; i++) {
2017 sprintf(p, "rx_queue_%u_packets", i); 1997 sprintf(p, "rx_queue_%u_packets", i);
@@ -2020,6 +2000,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2020 p += ETH_GSTRING_LEN; 2000 p += ETH_GSTRING_LEN;
2021 sprintf(p, "rx_queue_%u_drops", i); 2001 sprintf(p, "rx_queue_%u_drops", i);
2022 p += ETH_GSTRING_LEN; 2002 p += ETH_GSTRING_LEN;
2003 sprintf(p, "rx_queue_%u_csum_err", i);
2004 p += ETH_GSTRING_LEN;
2005 sprintf(p, "rx_queue_%u_alloc_failed", i);
2006 p += ETH_GSTRING_LEN;
2023 } 2007 }
2024/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2008/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2025 break; 2009 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 714c3a4a44e..0cab5e2b089 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -63,6 +63,7 @@ static const struct e1000_info *igb_info_tbl[] = {
63static struct pci_device_id igb_pci_tbl[] = { 63static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
@@ -81,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
81static int igb_setup_all_rx_resources(struct igb_adapter *); 82static int igb_setup_all_rx_resources(struct igb_adapter *);
82static void igb_free_all_tx_resources(struct igb_adapter *); 83static void igb_free_all_tx_resources(struct igb_adapter *);
83static void igb_free_all_rx_resources(struct igb_adapter *); 84static void igb_free_all_rx_resources(struct igb_adapter *);
85static void igb_setup_mrqc(struct igb_adapter *);
84void igb_update_stats(struct igb_adapter *); 86void igb_update_stats(struct igb_adapter *);
85static int igb_probe(struct pci_dev *, const struct pci_device_id *); 87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
86static void __devexit igb_remove(struct pci_dev *pdev); 88static void __devexit igb_remove(struct pci_dev *pdev);
@@ -89,7 +91,6 @@ static int igb_open(struct net_device *);
89static int igb_close(struct net_device *); 91static int igb_close(struct net_device *);
90static void igb_configure_tx(struct igb_adapter *); 92static void igb_configure_tx(struct igb_adapter *);
91static void igb_configure_rx(struct igb_adapter *); 93static void igb_configure_rx(struct igb_adapter *);
92static void igb_setup_rctl(struct igb_adapter *);
93static void igb_clean_all_tx_rings(struct igb_adapter *); 94static void igb_clean_all_tx_rings(struct igb_adapter *);
94static void igb_clean_all_rx_rings(struct igb_adapter *); 95static void igb_clean_all_rx_rings(struct igb_adapter *);
95static void igb_clean_tx_ring(struct igb_ring *); 96static void igb_clean_tx_ring(struct igb_ring *);
@@ -98,28 +99,22 @@ static void igb_set_rx_mode(struct net_device *);
98static void igb_update_phy_info(unsigned long); 99static void igb_update_phy_info(unsigned long);
99static void igb_watchdog(unsigned long); 100static void igb_watchdog(unsigned long);
100static void igb_watchdog_task(struct work_struct *); 101static void igb_watchdog_task(struct work_struct *);
101static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, 102static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
102 struct net_device *,
103 struct igb_ring *);
104static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
105 struct net_device *);
106static struct net_device_stats *igb_get_stats(struct net_device *); 103static struct net_device_stats *igb_get_stats(struct net_device *);
107static int igb_change_mtu(struct net_device *, int); 104static int igb_change_mtu(struct net_device *, int);
108static int igb_set_mac(struct net_device *, void *); 105static int igb_set_mac(struct net_device *, void *);
106static void igb_set_uta(struct igb_adapter *adapter);
109static irqreturn_t igb_intr(int irq, void *); 107static irqreturn_t igb_intr(int irq, void *);
110static irqreturn_t igb_intr_msi(int irq, void *); 108static irqreturn_t igb_intr_msi(int irq, void *);
111static irqreturn_t igb_msix_other(int irq, void *); 109static irqreturn_t igb_msix_other(int irq, void *);
112static irqreturn_t igb_msix_rx(int irq, void *); 110static irqreturn_t igb_msix_ring(int irq, void *);
113static irqreturn_t igb_msix_tx(int irq, void *);
114#ifdef CONFIG_IGB_DCA 111#ifdef CONFIG_IGB_DCA
115static void igb_update_rx_dca(struct igb_ring *); 112static void igb_update_dca(struct igb_q_vector *);
116static void igb_update_tx_dca(struct igb_ring *);
117static void igb_setup_dca(struct igb_adapter *); 113static void igb_setup_dca(struct igb_adapter *);
118#endif /* CONFIG_IGB_DCA */ 114#endif /* CONFIG_IGB_DCA */
119static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_q_vector *);
120static int igb_poll(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
121static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
122static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -127,57 +122,13 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16); 122static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16); 123static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *); 124static void igb_restore_vlan(struct igb_adapter *);
125static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
130static void igb_ping_all_vfs(struct igb_adapter *); 126static void igb_ping_all_vfs(struct igb_adapter *);
131static void igb_msg_task(struct igb_adapter *); 127static void igb_msg_task(struct igb_adapter *);
132static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
133static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
134static void igb_vmm_control(struct igb_adapter *); 128static void igb_vmm_control(struct igb_adapter *);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 129static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 130static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 131
138static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
139{
140 u32 reg_data;
141
142 reg_data = rd32(E1000_VMOLR(vfn));
143 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
144 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
149}
150
151static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
152 int vfn)
153{
154 struct e1000_hw *hw = &adapter->hw;
155 u32 vmolr;
156
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
162
163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
167
168 return 0;
169}
170
171static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
172{
173 u32 reg_data;
174
175 reg_data = rd32(E1000_RAH(entry));
176 reg_data &= ~E1000_RAH_POOL_MASK;
177 reg_data |= E1000_RAH_POOL_1 << pool;;
178 wr32(E1000_RAH(entry), reg_data);
179}
180
181#ifdef CONFIG_PM 132#ifdef CONFIG_PM
182static int igb_suspend(struct pci_dev *, pm_message_t); 133static int igb_suspend(struct pci_dev *, pm_message_t);
183static int igb_resume(struct pci_dev *); 134static int igb_resume(struct pci_dev *);
@@ -228,46 +179,12 @@ static struct pci_driver igb_driver = {
228 .err_handler = &igb_err_handler 179 .err_handler = &igb_err_handler
229}; 180};
230 181
231static int global_quad_port_a; /* global quad port a indication */
232
233MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 182MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
234MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 183MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
235MODULE_LICENSE("GPL"); 184MODULE_LICENSE("GPL");
236MODULE_VERSION(DRV_VERSION); 185MODULE_VERSION(DRV_VERSION);
237 186
238/** 187/**
239 * Scale the NIC clock cycle by a large factor so that
240 * relatively small clock corrections can be added or
241 * substracted at each clock tick. The drawbacks of a
242 * large factor are a) that the clock register overflows
243 * more quickly (not such a big deal) and b) that the
244 * increment per tick has to fit into 24 bits.
245 *
246 * Note that
247 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
248 * IGB_TSYNC_SCALE
249 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
250 *
251 * The base scale factor is intentionally a power of two
252 * so that the division in %struct timecounter can be done with
253 * a shift.
254 */
255#define IGB_TSYNC_SHIFT (19)
256#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
257
258/**
259 * The duration of one clock cycle of the NIC.
260 *
261 * @todo This hard-coded value is part of the specification and might change
262 * in future hardware revisions. Add revision check.
263 */
264#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
265
266#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
267# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
268#endif
269
270/**
271 * igb_read_clock - read raw cycle counter (to be used by time counter) 188 * igb_read_clock - read raw cycle counter (to be used by time counter)
272 */ 189 */
273static cycle_t igb_read_clock(const struct cyclecounter *tc) 190static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -275,11 +192,11 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
275 struct igb_adapter *adapter = 192 struct igb_adapter *adapter =
276 container_of(tc, struct igb_adapter, cycles); 193 container_of(tc, struct igb_adapter, cycles);
277 struct e1000_hw *hw = &adapter->hw; 194 struct e1000_hw *hw = &adapter->hw;
278 u64 stamp; 195 u64 stamp = 0;
279 196 int shift = 0;
280 stamp = rd32(E1000_SYSTIML);
281 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
282 197
198 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
199 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
283 return stamp; 200 return stamp;
284} 201}
285 202
@@ -320,17 +237,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
320#endif 237#endif
321 238
322/** 239/**
323 * igb_desc_unused - calculate if we have unused descriptors
324 **/
325static int igb_desc_unused(struct igb_ring *ring)
326{
327 if (ring->next_to_clean > ring->next_to_use)
328 return ring->next_to_clean - ring->next_to_use - 1;
329
330 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
331}
332
333/**
334 * igb_init_module - Driver Registration Routine 240 * igb_init_module - Driver Registration Routine
335 * 241 *
336 * igb_init_module is the first routine called when the driver is 242 * igb_init_module is the first routine called when the driver is
@@ -344,12 +250,9 @@ static int __init igb_init_module(void)
344 250
345 printk(KERN_INFO "%s\n", igb_copyright); 251 printk(KERN_INFO "%s\n", igb_copyright);
346 252
347 global_quad_port_a = 0;
348
349#ifdef CONFIG_IGB_DCA 253#ifdef CONFIG_IGB_DCA
350 dca_register_notify(&dca_notifier); 254 dca_register_notify(&dca_notifier);
351#endif 255#endif
352
353 ret = pci_register_driver(&igb_driver); 256 ret = pci_register_driver(&igb_driver);
354 return ret; 257 return ret;
355} 258}
@@ -382,8 +285,8 @@ module_exit(igb_exit_module);
382 **/ 285 **/
383static void igb_cache_ring_register(struct igb_adapter *adapter) 286static void igb_cache_ring_register(struct igb_adapter *adapter)
384{ 287{
385 int i; 288 int i = 0, j = 0;
386 unsigned int rbase_offset = adapter->vfs_allocated_count; 289 u32 rbase_offset = adapter->vfs_allocated_count;
387 290
388 switch (adapter->hw.mac.type) { 291 switch (adapter->hw.mac.type) {
389 case e1000_82576: 292 case e1000_82576:
@@ -392,23 +295,36 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
392 * In order to avoid collision we start at the first free queue 295 * In order to avoid collision we start at the first free queue
393 * and continue consuming queues in the same sequence 296 * and continue consuming queues in the same sequence
394 */ 297 */
395 for (i = 0; i < adapter->num_rx_queues; i++) 298 if (adapter->vfs_allocated_count) {
396 adapter->rx_ring[i].reg_idx = rbase_offset + 299 for (; i < adapter->rss_queues; i++)
397 Q_IDX_82576(i); 300 adapter->rx_ring[i].reg_idx = rbase_offset +
398 for (i = 0; i < adapter->num_tx_queues; i++) 301 Q_IDX_82576(i);
399 adapter->tx_ring[i].reg_idx = rbase_offset + 302 for (; j < adapter->rss_queues; j++)
400 Q_IDX_82576(i); 303 adapter->tx_ring[j].reg_idx = rbase_offset +
401 break; 304 Q_IDX_82576(j);
305 }
402 case e1000_82575: 306 case e1000_82575:
403 default: 307 default:
404 for (i = 0; i < adapter->num_rx_queues; i++) 308 for (; i < adapter->num_rx_queues; i++)
405 adapter->rx_ring[i].reg_idx = i; 309 adapter->rx_ring[i].reg_idx = rbase_offset + i;
406 for (i = 0; i < adapter->num_tx_queues; i++) 310 for (; j < adapter->num_tx_queues; j++)
407 adapter->tx_ring[i].reg_idx = i; 311 adapter->tx_ring[j].reg_idx = rbase_offset + j;
408 break; 312 break;
409 } 313 }
410} 314}
411 315
316static void igb_free_queues(struct igb_adapter *adapter)
317{
318 kfree(adapter->tx_ring);
319 kfree(adapter->rx_ring);
320
321 adapter->tx_ring = NULL;
322 adapter->rx_ring = NULL;
323
324 adapter->num_rx_queues = 0;
325 adapter->num_tx_queues = 0;
326}
327
412/** 328/**
413 * igb_alloc_queues - Allocate memory for all rings 329 * igb_alloc_queues - Allocate memory for all rings
414 * @adapter: board private structure to initialize 330 * @adapter: board private structure to initialize
@@ -423,59 +339,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
423 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 339 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
424 sizeof(struct igb_ring), GFP_KERNEL); 340 sizeof(struct igb_ring), GFP_KERNEL);
425 if (!adapter->tx_ring) 341 if (!adapter->tx_ring)
426 return -ENOMEM; 342 goto err;
427 343
428 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 344 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
429 sizeof(struct igb_ring), GFP_KERNEL); 345 sizeof(struct igb_ring), GFP_KERNEL);
430 if (!adapter->rx_ring) { 346 if (!adapter->rx_ring)
431 kfree(adapter->tx_ring); 347 goto err;
432 return -ENOMEM;
433 }
434
435 adapter->rx_ring->buddy = adapter->tx_ring;
436 348
437 for (i = 0; i < adapter->num_tx_queues; i++) { 349 for (i = 0; i < adapter->num_tx_queues; i++) {
438 struct igb_ring *ring = &(adapter->tx_ring[i]); 350 struct igb_ring *ring = &(adapter->tx_ring[i]);
439 ring->count = adapter->tx_ring_count; 351 ring->count = adapter->tx_ring_count;
440 ring->adapter = adapter;
441 ring->queue_index = i; 352 ring->queue_index = i;
353 ring->pdev = adapter->pdev;
354 ring->netdev = adapter->netdev;
355 /* For 82575, context index must be unique per ring. */
356 if (adapter->hw.mac.type == e1000_82575)
357 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
442 } 358 }
359
443 for (i = 0; i < adapter->num_rx_queues; i++) { 360 for (i = 0; i < adapter->num_rx_queues; i++) {
444 struct igb_ring *ring = &(adapter->rx_ring[i]); 361 struct igb_ring *ring = &(adapter->rx_ring[i]);
445 ring->count = adapter->rx_ring_count; 362 ring->count = adapter->rx_ring_count;
446 ring->adapter = adapter;
447 ring->queue_index = i; 363 ring->queue_index = i;
448 ring->itr_register = E1000_ITR; 364 ring->pdev = adapter->pdev;
449 365 ring->netdev = adapter->netdev;
450 /* set a default napi handler for each rx_ring */ 366 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
451 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 367 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
368 /* set flag indicating ring supports SCTP checksum offload */
369 if (adapter->hw.mac.type >= e1000_82576)
370 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
452 } 371 }
453 372
454 igb_cache_ring_register(adapter); 373 igb_cache_ring_register(adapter);
455 return 0;
456}
457
458static void igb_free_queues(struct igb_adapter *adapter)
459{
460 int i;
461 374
462 for (i = 0; i < adapter->num_rx_queues; i++) 375 return 0;
463 netif_napi_del(&adapter->rx_ring[i].napi);
464 376
465 adapter->num_rx_queues = 0; 377err:
466 adapter->num_tx_queues = 0; 378 igb_free_queues(adapter);
467 379
468 kfree(adapter->tx_ring); 380 return -ENOMEM;
469 kfree(adapter->rx_ring);
470} 381}
471 382
472#define IGB_N0_QUEUE -1 383#define IGB_N0_QUEUE -1
473static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, 384static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
474 int tx_queue, int msix_vector)
475{ 385{
476 u32 msixbm = 0; 386 u32 msixbm = 0;
387 struct igb_adapter *adapter = q_vector->adapter;
477 struct e1000_hw *hw = &adapter->hw; 388 struct e1000_hw *hw = &adapter->hw;
478 u32 ivar, index; 389 u32 ivar, index;
390 int rx_queue = IGB_N0_QUEUE;
391 int tx_queue = IGB_N0_QUEUE;
392
393 if (q_vector->rx_ring)
394 rx_queue = q_vector->rx_ring->reg_idx;
395 if (q_vector->tx_ring)
396 tx_queue = q_vector->tx_ring->reg_idx;
479 397
480 switch (hw->mac.type) { 398 switch (hw->mac.type) {
481 case e1000_82575: 399 case e1000_82575:
@@ -483,16 +401,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
483 bitmask for the EICR/EIMS/EIMC registers. To assign one 401 bitmask for the EICR/EIMS/EIMC registers. To assign one
484 or more queues to a vector, we write the appropriate bits 402 or more queues to a vector, we write the appropriate bits
485 into the MSIXBM register for that vector. */ 403 into the MSIXBM register for that vector. */
486 if (rx_queue > IGB_N0_QUEUE) { 404 if (rx_queue > IGB_N0_QUEUE)
487 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 405 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
488 adapter->rx_ring[rx_queue].eims_value = msixbm; 406 if (tx_queue > IGB_N0_QUEUE)
489 }
490 if (tx_queue > IGB_N0_QUEUE) {
491 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 407 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
492 adapter->tx_ring[tx_queue].eims_value =
493 E1000_EICR_TX_QUEUE0 << tx_queue;
494 }
495 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 408 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
409 q_vector->eims_value = msixbm;
496 break; 410 break;
497 case e1000_82576: 411 case e1000_82576:
498 /* 82576 uses a table-based method for assigning vectors. 412 /* 82576 uses a table-based method for assigning vectors.
@@ -500,35 +414,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
500 a vector number along with a "valid" bit. Sadly, the layout 414 a vector number along with a "valid" bit. Sadly, the layout
501 of the table is somewhat counterintuitive. */ 415 of the table is somewhat counterintuitive. */
502 if (rx_queue > IGB_N0_QUEUE) { 416 if (rx_queue > IGB_N0_QUEUE) {
503 index = (rx_queue >> 1) + adapter->vfs_allocated_count; 417 index = (rx_queue & 0x7);
504 ivar = array_rd32(E1000_IVAR0, index); 418 ivar = array_rd32(E1000_IVAR0, index);
505 if (rx_queue & 0x1) { 419 if (rx_queue < 8) {
506 /* vector goes into third byte of register */
507 ivar = ivar & 0xFF00FFFF;
508 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
509 } else {
510 /* vector goes into low byte of register */ 420 /* vector goes into low byte of register */
511 ivar = ivar & 0xFFFFFF00; 421 ivar = ivar & 0xFFFFFF00;
512 ivar |= msix_vector | E1000_IVAR_VALID; 422 ivar |= msix_vector | E1000_IVAR_VALID;
423 } else {
424 /* vector goes into third byte of register */
425 ivar = ivar & 0xFF00FFFF;
426 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
513 } 427 }
514 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
515 array_wr32(E1000_IVAR0, index, ivar); 428 array_wr32(E1000_IVAR0, index, ivar);
516 } 429 }
517 if (tx_queue > IGB_N0_QUEUE) { 430 if (tx_queue > IGB_N0_QUEUE) {
518 index = (tx_queue >> 1) + adapter->vfs_allocated_count; 431 index = (tx_queue & 0x7);
519 ivar = array_rd32(E1000_IVAR0, index); 432 ivar = array_rd32(E1000_IVAR0, index);
520 if (tx_queue & 0x1) { 433 if (tx_queue < 8) {
521 /* vector goes into high byte of register */
522 ivar = ivar & 0x00FFFFFF;
523 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
524 } else {
525 /* vector goes into second byte of register */ 434 /* vector goes into second byte of register */
526 ivar = ivar & 0xFFFF00FF; 435 ivar = ivar & 0xFFFF00FF;
527 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 436 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
437 } else {
438 /* vector goes into high byte of register */
439 ivar = ivar & 0x00FFFFFF;
440 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
528 } 441 }
529 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
530 array_wr32(E1000_IVAR0, index, ivar); 442 array_wr32(E1000_IVAR0, index, ivar);
531 } 443 }
444 q_vector->eims_value = 1 << msix_vector;
532 break; 445 break;
533 default: 446 default:
534 BUG(); 447 BUG();
@@ -549,43 +462,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
549 struct e1000_hw *hw = &adapter->hw; 462 struct e1000_hw *hw = &adapter->hw;
550 463
551 adapter->eims_enable_mask = 0; 464 adapter->eims_enable_mask = 0;
552 if (hw->mac.type == e1000_82576)
553 /* Turn on MSI-X capability first, or our settings
554 * won't stick. And it will take days to debug. */
555 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
556 E1000_GPIE_PBA | E1000_GPIE_EIAME |
557 E1000_GPIE_NSICR);
558
559 for (i = 0; i < adapter->num_tx_queues; i++) {
560 struct igb_ring *tx_ring = &adapter->tx_ring[i];
561 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
562 adapter->eims_enable_mask |= tx_ring->eims_value;
563 if (tx_ring->itr_val)
564 writel(tx_ring->itr_val,
565 hw->hw_addr + tx_ring->itr_register);
566 else
567 writel(1, hw->hw_addr + tx_ring->itr_register);
568 }
569
570 for (i = 0; i < adapter->num_rx_queues; i++) {
571 struct igb_ring *rx_ring = &adapter->rx_ring[i];
572 rx_ring->buddy = NULL;
573 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
574 adapter->eims_enable_mask |= rx_ring->eims_value;
575 if (rx_ring->itr_val)
576 writel(rx_ring->itr_val,
577 hw->hw_addr + rx_ring->itr_register);
578 else
579 writel(1, hw->hw_addr + rx_ring->itr_register);
580 }
581
582 465
583 /* set vector for other causes, i.e. link changes */ 466 /* set vector for other causes, i.e. link changes */
584 switch (hw->mac.type) { 467 switch (hw->mac.type) {
585 case e1000_82575: 468 case e1000_82575:
586 array_wr32(E1000_MSIXBM(0), vector++,
587 E1000_EIMS_OTHER);
588
589 tmp = rd32(E1000_CTRL_EXT); 469 tmp = rd32(E1000_CTRL_EXT);
590 /* enable MSI-X PBA support*/ 470 /* enable MSI-X PBA support*/
591 tmp |= E1000_CTRL_EXT_PBA_CLR; 471 tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -595,22 +475,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
595 tmp |= E1000_CTRL_EXT_IRCA; 475 tmp |= E1000_CTRL_EXT_IRCA;
596 476
597 wr32(E1000_CTRL_EXT, tmp); 477 wr32(E1000_CTRL_EXT, tmp);
598 adapter->eims_enable_mask |= E1000_EIMS_OTHER; 478
479 /* enable msix_other interrupt */
480 array_wr32(E1000_MSIXBM(0), vector++,
481 E1000_EIMS_OTHER);
599 adapter->eims_other = E1000_EIMS_OTHER; 482 adapter->eims_other = E1000_EIMS_OTHER;
600 483
601 break; 484 break;
602 485
603 case e1000_82576: 486 case e1000_82576:
487 /* Turn on MSI-X capability first, or our settings
488 * won't stick. And it will take days to debug. */
489 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
490 E1000_GPIE_PBA | E1000_GPIE_EIAME |
491 E1000_GPIE_NSICR);
492
493 /* enable msix_other interrupt */
494 adapter->eims_other = 1 << vector;
604 tmp = (vector++ | E1000_IVAR_VALID) << 8; 495 tmp = (vector++ | E1000_IVAR_VALID) << 8;
605 wr32(E1000_IVAR_MISC, tmp);
606 496
607 adapter->eims_enable_mask = (1 << (vector)) - 1; 497 wr32(E1000_IVAR_MISC, tmp);
608 adapter->eims_other = 1 << (vector - 1);
609 break; 498 break;
610 default: 499 default:
611 /* do nothing, since nothing else supports MSI-X */ 500 /* do nothing, since nothing else supports MSI-X */
612 break; 501 break;
613 } /* switch (hw->mac.type) */ 502 } /* switch (hw->mac.type) */
503
504 adapter->eims_enable_mask |= adapter->eims_other;
505
506 for (i = 0; i < adapter->num_q_vectors; i++) {
507 struct igb_q_vector *q_vector = adapter->q_vector[i];
508 igb_assign_vector(q_vector, vector++);
509 adapter->eims_enable_mask |= q_vector->eims_value;
510 }
511
614 wrfl(); 512 wrfl();
615} 513}
616 514
@@ -623,43 +521,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
623static int igb_request_msix(struct igb_adapter *adapter) 521static int igb_request_msix(struct igb_adapter *adapter)
624{ 522{
625 struct net_device *netdev = adapter->netdev; 523 struct net_device *netdev = adapter->netdev;
524 struct e1000_hw *hw = &adapter->hw;
626 int i, err = 0, vector = 0; 525 int i, err = 0, vector = 0;
627 526
628 vector = 0; 527 err = request_irq(adapter->msix_entries[vector].vector,
629 528 &igb_msix_other, 0, netdev->name, adapter);
630 for (i = 0; i < adapter->num_tx_queues; i++) { 529 if (err)
631 struct igb_ring *ring = &(adapter->tx_ring[i]); 530 goto out;
632 sprintf(ring->name, "%s-tx-%d", netdev->name, i); 531 vector++;
633 err = request_irq(adapter->msix_entries[vector].vector, 532
634 &igb_msix_tx, 0, ring->name, 533 for (i = 0; i < adapter->num_q_vectors; i++) {
635 &(adapter->tx_ring[i])); 534 struct igb_q_vector *q_vector = adapter->q_vector[i];
636 if (err) 535
637 goto out; 536 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
638 ring->itr_register = E1000_EITR(0) + (vector << 2); 537
639 ring->itr_val = 976; /* ~4000 ints/sec */ 538 if (q_vector->rx_ring && q_vector->tx_ring)
640 vector++; 539 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
641 } 540 q_vector->rx_ring->queue_index);
642 for (i = 0; i < adapter->num_rx_queues; i++) { 541 else if (q_vector->tx_ring)
643 struct igb_ring *ring = &(adapter->rx_ring[i]); 542 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
644 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 543 q_vector->tx_ring->queue_index);
645 sprintf(ring->name, "%s-rx-%d", netdev->name, i); 544 else if (q_vector->rx_ring)
545 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
546 q_vector->rx_ring->queue_index);
646 else 547 else
647 memcpy(ring->name, netdev->name, IFNAMSIZ); 548 sprintf(q_vector->name, "%s-unused", netdev->name);
549
648 err = request_irq(adapter->msix_entries[vector].vector, 550 err = request_irq(adapter->msix_entries[vector].vector,
649 &igb_msix_rx, 0, ring->name, 551 &igb_msix_ring, 0, q_vector->name,
650 &(adapter->rx_ring[i])); 552 q_vector);
651 if (err) 553 if (err)
652 goto out; 554 goto out;
653 ring->itr_register = E1000_EITR(0) + (vector << 2);
654 ring->itr_val = adapter->itr;
655 vector++; 555 vector++;
656 } 556 }
657 557
658 err = request_irq(adapter->msix_entries[vector].vector,
659 &igb_msix_other, 0, netdev->name, netdev);
660 if (err)
661 goto out;
662
663 igb_configure_msix(adapter); 558 igb_configure_msix(adapter);
664 return 0; 559 return 0;
665out: 560out:
@@ -672,11 +567,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
672 pci_disable_msix(adapter->pdev); 567 pci_disable_msix(adapter->pdev);
673 kfree(adapter->msix_entries); 568 kfree(adapter->msix_entries);
674 adapter->msix_entries = NULL; 569 adapter->msix_entries = NULL;
675 } else if (adapter->flags & IGB_FLAG_HAS_MSI) 570 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
676 pci_disable_msi(adapter->pdev); 571 pci_disable_msi(adapter->pdev);
677 return; 572 }
678} 573}
679 574
575/**
576 * igb_free_q_vectors - Free memory allocated for interrupt vectors
577 * @adapter: board private structure to initialize
578 *
579 * This function frees the memory allocated to the q_vectors. In addition if
580 * NAPI is enabled it will delete any references to the NAPI struct prior
581 * to freeing the q_vector.
582 **/
583static void igb_free_q_vectors(struct igb_adapter *adapter)
584{
585 int v_idx;
586
587 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
588 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
589 adapter->q_vector[v_idx] = NULL;
590 netif_napi_del(&q_vector->napi);
591 kfree(q_vector);
592 }
593 adapter->num_q_vectors = 0;
594}
595
596/**
597 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
598 *
599 * This function resets the device so that it has 0 rx queues, tx queues, and
600 * MSI-X interrupts allocated.
601 */
602static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
603{
604 igb_free_queues(adapter);
605 igb_free_q_vectors(adapter);
606 igb_reset_interrupt_capability(adapter);
607}
680 608
681/** 609/**
682 * igb_set_interrupt_capability - set MSI or MSI-X if supported 610 * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -690,11 +618,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
690 int numvecs, i; 618 int numvecs, i;
691 619
692 /* Number of supported queues. */ 620 /* Number of supported queues. */
693 /* Having more queues than CPUs doesn't make sense. */ 621 adapter->num_rx_queues = adapter->rss_queues;
694 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 622 adapter->num_tx_queues = adapter->rss_queues;
695 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 623
624 /* start with one vector for every rx queue */
625 numvecs = adapter->num_rx_queues;
626
627 /* if tx handler is seperate add 1 for every tx queue */
628 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
629 numvecs += adapter->num_tx_queues;
696 630
697 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 631 /* store the number of vectors reserved for queues */
632 adapter->num_q_vectors = numvecs;
633
634 /* add 1 vector for link status interrupts */
635 numvecs++;
698 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 636 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
699 GFP_KERNEL); 637 GFP_KERNEL);
700 if (!adapter->msix_entries) 638 if (!adapter->msix_entries)
@@ -728,8 +666,12 @@ msi_only:
728 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 666 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
729 } 667 }
730#endif 668#endif
669 adapter->vfs_allocated_count = 0;
670 adapter->rss_queues = 1;
671 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
731 adapter->num_rx_queues = 1; 672 adapter->num_rx_queues = 1;
732 adapter->num_tx_queues = 1; 673 adapter->num_tx_queues = 1;
674 adapter->num_q_vectors = 1;
733 if (!pci_enable_msi(adapter->pdev)) 675 if (!pci_enable_msi(adapter->pdev))
734 adapter->flags |= IGB_FLAG_HAS_MSI; 676 adapter->flags |= IGB_FLAG_HAS_MSI;
735out: 677out:
@@ -739,6 +681,143 @@ out:
739} 681}
740 682
741/** 683/**
684 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
685 * @adapter: board private structure to initialize
686 *
687 * We allocate one q_vector per queue interrupt. If allocation fails we
688 * return -ENOMEM.
689 **/
690static int igb_alloc_q_vectors(struct igb_adapter *adapter)
691{
692 struct igb_q_vector *q_vector;
693 struct e1000_hw *hw = &adapter->hw;
694 int v_idx;
695
696 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
697 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
698 if (!q_vector)
699 goto err_out;
700 q_vector->adapter = adapter;
701 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
702 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
703 q_vector->itr_val = IGB_START_ITR;
704 q_vector->set_itr = 1;
705 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
706 adapter->q_vector[v_idx] = q_vector;
707 }
708 return 0;
709
710err_out:
711 while (v_idx) {
712 v_idx--;
713 q_vector = adapter->q_vector[v_idx];
714 netif_napi_del(&q_vector->napi);
715 kfree(q_vector);
716 adapter->q_vector[v_idx] = NULL;
717 }
718 return -ENOMEM;
719}
720
721static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
722 int ring_idx, int v_idx)
723{
724 struct igb_q_vector *q_vector;
725
726 q_vector = adapter->q_vector[v_idx];
727 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
728 q_vector->rx_ring->q_vector = q_vector;
729 q_vector->itr_val = adapter->rx_itr_setting;
730 if (q_vector->itr_val && q_vector->itr_val <= 3)
731 q_vector->itr_val = IGB_START_ITR;
732}
733
734static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
735 int ring_idx, int v_idx)
736{
737 struct igb_q_vector *q_vector;
738
739 q_vector = adapter->q_vector[v_idx];
740 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
741 q_vector->tx_ring->q_vector = q_vector;
742 q_vector->itr_val = adapter->tx_itr_setting;
743 if (q_vector->itr_val && q_vector->itr_val <= 3)
744 q_vector->itr_val = IGB_START_ITR;
745}
746
747/**
748 * igb_map_ring_to_vector - maps allocated queues to vectors
749 *
750 * This function maps the recently allocated queues to vectors.
751 **/
752static int igb_map_ring_to_vector(struct igb_adapter *adapter)
753{
754 int i;
755 int v_idx = 0;
756
757 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
758 (adapter->num_q_vectors < adapter->num_tx_queues))
759 return -ENOMEM;
760
761 if (adapter->num_q_vectors >=
762 (adapter->num_rx_queues + adapter->num_tx_queues)) {
763 for (i = 0; i < adapter->num_rx_queues; i++)
764 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
765 for (i = 0; i < adapter->num_tx_queues; i++)
766 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
767 } else {
768 for (i = 0; i < adapter->num_rx_queues; i++) {
769 if (i < adapter->num_tx_queues)
770 igb_map_tx_ring_to_vector(adapter, i, v_idx);
771 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
772 }
773 for (; i < adapter->num_tx_queues; i++)
774 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
775 }
776 return 0;
777}
778
779/**
780 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
781 *
782 * This function initializes the interrupts and allocates all of the queues.
783 **/
784static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
785{
786 struct pci_dev *pdev = adapter->pdev;
787 int err;
788
789 igb_set_interrupt_capability(adapter);
790
791 err = igb_alloc_q_vectors(adapter);
792 if (err) {
793 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
794 goto err_alloc_q_vectors;
795 }
796
797 err = igb_alloc_queues(adapter);
798 if (err) {
799 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
800 goto err_alloc_queues;
801 }
802
803 err = igb_map_ring_to_vector(adapter);
804 if (err) {
805 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
806 goto err_map_queues;
807 }
808
809
810 return 0;
811err_map_queues:
812 igb_free_queues(adapter);
813err_alloc_queues:
814 igb_free_q_vectors(adapter);
815err_alloc_q_vectors:
816 igb_reset_interrupt_capability(adapter);
817 return err;
818}
819
820/**
742 * igb_request_irq - initialize interrupts 821 * igb_request_irq - initialize interrupts
743 * 822 *
744 * Attempts to configure interrupts using the best available 823 * Attempts to configure interrupts using the best available
@@ -747,6 +826,7 @@ out:
747static int igb_request_irq(struct igb_adapter *adapter) 826static int igb_request_irq(struct igb_adapter *adapter)
748{ 827{
749 struct net_device *netdev = adapter->netdev; 828 struct net_device *netdev = adapter->netdev;
829 struct pci_dev *pdev = adapter->pdev;
750 struct e1000_hw *hw = &adapter->hw; 830 struct e1000_hw *hw = &adapter->hw;
751 int err = 0; 831 int err = 0;
752 832
@@ -755,18 +835,36 @@ static int igb_request_irq(struct igb_adapter *adapter)
755 if (!err) 835 if (!err)
756 goto request_done; 836 goto request_done;
757 /* fall back to MSI */ 837 /* fall back to MSI */
758 igb_reset_interrupt_capability(adapter); 838 igb_clear_interrupt_scheme(adapter);
759 if (!pci_enable_msi(adapter->pdev)) 839 if (!pci_enable_msi(adapter->pdev))
760 adapter->flags |= IGB_FLAG_HAS_MSI; 840 adapter->flags |= IGB_FLAG_HAS_MSI;
761 igb_free_all_tx_resources(adapter); 841 igb_free_all_tx_resources(adapter);
762 igb_free_all_rx_resources(adapter); 842 igb_free_all_rx_resources(adapter);
843 adapter->num_tx_queues = 1;
763 adapter->num_rx_queues = 1; 844 adapter->num_rx_queues = 1;
764 igb_alloc_queues(adapter); 845 adapter->num_q_vectors = 1;
846 err = igb_alloc_q_vectors(adapter);
847 if (err) {
848 dev_err(&pdev->dev,
849 "Unable to allocate memory for vectors\n");
850 goto request_done;
851 }
852 err = igb_alloc_queues(adapter);
853 if (err) {
854 dev_err(&pdev->dev,
855 "Unable to allocate memory for queues\n");
856 igb_free_q_vectors(adapter);
857 goto request_done;
858 }
859 igb_setup_all_tx_resources(adapter);
860 igb_setup_all_rx_resources(adapter);
765 } else { 861 } else {
766 switch (hw->mac.type) { 862 switch (hw->mac.type) {
767 case e1000_82575: 863 case e1000_82575:
768 wr32(E1000_MSIXBM(0), 864 wr32(E1000_MSIXBM(0),
769 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); 865 (E1000_EICR_RX_QUEUE0 |
866 E1000_EICR_TX_QUEUE0 |
867 E1000_EIMS_OTHER));
770 break; 868 break;
771 case e1000_82576: 869 case e1000_82576:
772 wr32(E1000_IVAR0, E1000_IVAR_VALID); 870 wr32(E1000_IVAR0, E1000_IVAR_VALID);
@@ -778,16 +876,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
778 876
779 if (adapter->flags & IGB_FLAG_HAS_MSI) { 877 if (adapter->flags & IGB_FLAG_HAS_MSI) {
780 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 878 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
781 netdev->name, netdev); 879 netdev->name, adapter);
782 if (!err) 880 if (!err)
783 goto request_done; 881 goto request_done;
882
784 /* fall back to legacy interrupts */ 883 /* fall back to legacy interrupts */
785 igb_reset_interrupt_capability(adapter); 884 igb_reset_interrupt_capability(adapter);
786 adapter->flags &= ~IGB_FLAG_HAS_MSI; 885 adapter->flags &= ~IGB_FLAG_HAS_MSI;
787 } 886 }
788 887
789 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 888 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
790 netdev->name, netdev); 889 netdev->name, adapter);
791 890
792 if (err) 891 if (err)
793 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", 892 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -799,23 +898,19 @@ request_done:
799 898
800static void igb_free_irq(struct igb_adapter *adapter) 899static void igb_free_irq(struct igb_adapter *adapter)
801{ 900{
802 struct net_device *netdev = adapter->netdev;
803
804 if (adapter->msix_entries) { 901 if (adapter->msix_entries) {
805 int vector = 0, i; 902 int vector = 0, i;
806 903
807 for (i = 0; i < adapter->num_tx_queues; i++) 904 free_irq(adapter->msix_entries[vector++].vector, adapter);
808 free_irq(adapter->msix_entries[vector++].vector,
809 &(adapter->tx_ring[i]));
810 for (i = 0; i < adapter->num_rx_queues; i++)
811 free_irq(adapter->msix_entries[vector++].vector,
812 &(adapter->rx_ring[i]));
813 905
814 free_irq(adapter->msix_entries[vector++].vector, netdev); 906 for (i = 0; i < adapter->num_q_vectors; i++) {
815 return; 907 struct igb_q_vector *q_vector = adapter->q_vector[i];
908 free_irq(adapter->msix_entries[vector++].vector,
909 q_vector);
910 }
911 } else {
912 free_irq(adapter->pdev->irq, adapter);
816 } 913 }
817
818 free_irq(adapter->pdev->irq, netdev);
819} 914}
820 915
821/** 916/**
@@ -826,6 +921,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
826{ 921{
827 struct e1000_hw *hw = &adapter->hw; 922 struct e1000_hw *hw = &adapter->hw;
828 923
924 /*
925 * we need to be careful when disabling interrupts. The VFs are also
926 * mapped into these registers and so clearing the bits can cause
927 * issues on the VF drivers so we only need to clear what we set
928 */
829 if (adapter->msix_entries) { 929 if (adapter->msix_entries) {
830 u32 regval = rd32(E1000_EIAM); 930 u32 regval = rd32(E1000_EIAM);
831 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 931 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -849,15 +949,17 @@ static void igb_irq_enable(struct igb_adapter *adapter)
849 struct e1000_hw *hw = &adapter->hw; 949 struct e1000_hw *hw = &adapter->hw;
850 950
851 if (adapter->msix_entries) { 951 if (adapter->msix_entries) {
952 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
852 u32 regval = rd32(E1000_EIAC); 953 u32 regval = rd32(E1000_EIAC);
853 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 954 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
854 regval = rd32(E1000_EIAM); 955 regval = rd32(E1000_EIAM);
855 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 956 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
856 wr32(E1000_EIMS, adapter->eims_enable_mask); 957 wr32(E1000_EIMS, adapter->eims_enable_mask);
857 if (adapter->vfs_allocated_count) 958 if (adapter->vfs_allocated_count) {
858 wr32(E1000_MBVFIMR, 0xFF); 959 wr32(E1000_MBVFIMR, 0xFF);
859 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | 960 ims |= E1000_IMS_VMMB;
860 E1000_IMS_DOUTSYNC)); 961 }
962 wr32(E1000_IMS, ims);
861 } else { 963 } else {
862 wr32(E1000_IMS, IMS_ENABLE_MASK); 964 wr32(E1000_IMS, IMS_ENABLE_MASK);
863 wr32(E1000_IAM, IMS_ENABLE_MASK); 965 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -866,24 +968,23 @@ static void igb_irq_enable(struct igb_adapter *adapter)
866 968
867static void igb_update_mng_vlan(struct igb_adapter *adapter) 969static void igb_update_mng_vlan(struct igb_adapter *adapter)
868{ 970{
869 struct net_device *netdev = adapter->netdev; 971 struct e1000_hw *hw = &adapter->hw;
870 u16 vid = adapter->hw.mng_cookie.vlan_id; 972 u16 vid = adapter->hw.mng_cookie.vlan_id;
871 u16 old_vid = adapter->mng_vlan_id; 973 u16 old_vid = adapter->mng_vlan_id;
872 if (adapter->vlgrp) {
873 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
874 if (adapter->hw.mng_cookie.status &
875 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
876 igb_vlan_rx_add_vid(netdev, vid);
877 adapter->mng_vlan_id = vid;
878 } else
879 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
880 974
881 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && 975 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
882 (vid != old_vid) && 976 /* add VID to filter table */
883 !vlan_group_get_device(adapter->vlgrp, old_vid)) 977 igb_vfta_set(hw, vid, true);
884 igb_vlan_rx_kill_vid(netdev, old_vid); 978 adapter->mng_vlan_id = vid;
885 } else 979 } else {
886 adapter->mng_vlan_id = vid; 980 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
981 }
982
983 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
984 (vid != old_vid) &&
985 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
986 /* remove VID from filter table */
987 igb_vfta_set(hw, old_vid, false);
887 } 988 }
888} 989}
889 990
@@ -907,7 +1008,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
907 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1008 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
908} 1009}
909 1010
910
911/** 1011/**
912 * igb_get_hw_control - get control of the h/w from f/w 1012 * igb_get_hw_control - get control of the h/w from f/w
913 * @adapter: address of board private structure 1013 * @adapter: address of board private structure
@@ -942,8 +1042,11 @@ static void igb_configure(struct igb_adapter *adapter)
942 1042
943 igb_restore_vlan(adapter); 1043 igb_restore_vlan(adapter);
944 1044
945 igb_configure_tx(adapter); 1045 igb_setup_tctl(adapter);
1046 igb_setup_mrqc(adapter);
946 igb_setup_rctl(adapter); 1047 igb_setup_rctl(adapter);
1048
1049 igb_configure_tx(adapter);
947 igb_configure_rx(adapter); 1050 igb_configure_rx(adapter);
948 1051
949 igb_rx_fifo_flush_82575(&adapter->hw); 1052 igb_rx_fifo_flush_82575(&adapter->hw);
@@ -965,7 +1068,6 @@ static void igb_configure(struct igb_adapter *adapter)
965 * igb_up - Open the interface and prepare it to handle traffic 1068 * igb_up - Open the interface and prepare it to handle traffic
966 * @adapter: board private structure 1069 * @adapter: board private structure
967 **/ 1070 **/
968
969int igb_up(struct igb_adapter *adapter) 1071int igb_up(struct igb_adapter *adapter)
970{ 1072{
971 struct e1000_hw *hw = &adapter->hw; 1073 struct e1000_hw *hw = &adapter->hw;
@@ -976,30 +1078,37 @@ int igb_up(struct igb_adapter *adapter)
976 1078
977 clear_bit(__IGB_DOWN, &adapter->state); 1079 clear_bit(__IGB_DOWN, &adapter->state);
978 1080
979 for (i = 0; i < adapter->num_rx_queues; i++) 1081 for (i = 0; i < adapter->num_q_vectors; i++) {
980 napi_enable(&adapter->rx_ring[i].napi); 1082 struct igb_q_vector *q_vector = adapter->q_vector[i];
1083 napi_enable(&q_vector->napi);
1084 }
981 if (adapter->msix_entries) 1085 if (adapter->msix_entries)
982 igb_configure_msix(adapter); 1086 igb_configure_msix(adapter);
983 1087
984 igb_vmm_control(adapter);
985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
986 igb_set_vmolr(hw, adapter->vfs_allocated_count);
987
988 /* Clear any pending interrupts. */ 1088 /* Clear any pending interrupts. */
989 rd32(E1000_ICR); 1089 rd32(E1000_ICR);
990 igb_irq_enable(adapter); 1090 igb_irq_enable(adapter);
991 1091
1092 /* notify VFs that reset has been completed */
1093 if (adapter->vfs_allocated_count) {
1094 u32 reg_data = rd32(E1000_CTRL_EXT);
1095 reg_data |= E1000_CTRL_EXT_PFRSTD;
1096 wr32(E1000_CTRL_EXT, reg_data);
1097 }
1098
992 netif_tx_start_all_queues(adapter->netdev); 1099 netif_tx_start_all_queues(adapter->netdev);
993 1100
994 /* Fire a link change interrupt to start the watchdog. */ 1101 /* start the watchdog. */
995 wr32(E1000_ICS, E1000_ICS_LSC); 1102 hw->mac.get_link_status = 1;
1103 schedule_work(&adapter->watchdog_task);
1104
996 return 0; 1105 return 0;
997} 1106}
998 1107
999void igb_down(struct igb_adapter *adapter) 1108void igb_down(struct igb_adapter *adapter)
1000{ 1109{
1001 struct e1000_hw *hw = &adapter->hw;
1002 struct net_device *netdev = adapter->netdev; 1110 struct net_device *netdev = adapter->netdev;
1111 struct e1000_hw *hw = &adapter->hw;
1003 u32 tctl, rctl; 1112 u32 tctl, rctl;
1004 int i; 1113 int i;
1005 1114
@@ -1022,8 +1131,10 @@ void igb_down(struct igb_adapter *adapter)
1022 wrfl(); 1131 wrfl();
1023 msleep(10); 1132 msleep(10);
1024 1133
1025 for (i = 0; i < adapter->num_rx_queues; i++) 1134 for (i = 0; i < adapter->num_q_vectors; i++) {
1026 napi_disable(&adapter->rx_ring[i].napi); 1135 struct igb_q_vector *q_vector = adapter->q_vector[i];
1136 napi_disable(&q_vector->napi);
1137 }
1027 1138
1028 igb_irq_disable(adapter); 1139 igb_irq_disable(adapter);
1029 1140
@@ -1062,6 +1173,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1062 1173
1063void igb_reset(struct igb_adapter *adapter) 1174void igb_reset(struct igb_adapter *adapter)
1064{ 1175{
1176 struct pci_dev *pdev = adapter->pdev;
1065 struct e1000_hw *hw = &adapter->hw; 1177 struct e1000_hw *hw = &adapter->hw;
1066 struct e1000_mac_info *mac = &hw->mac; 1178 struct e1000_mac_info *mac = &hw->mac;
1067 struct e1000_fc_info *fc = &hw->fc; 1179 struct e1000_fc_info *fc = &hw->fc;
@@ -1073,7 +1185,8 @@ void igb_reset(struct igb_adapter *adapter)
1073 */ 1185 */
1074 switch (mac->type) { 1186 switch (mac->type) {
1075 case e1000_82576: 1187 case e1000_82576:
1076 pba = E1000_PBA_64K; 1188 pba = rd32(E1000_RXPBS);
1189 pba &= E1000_RXPBS_SIZE_MASK_82576;
1077 break; 1190 break;
1078 case e1000_82575: 1191 case e1000_82575:
1079 default: 1192 default:
@@ -1148,10 +1261,10 @@ void igb_reset(struct igb_adapter *adapter)
1148 if (adapter->vfs_allocated_count) { 1261 if (adapter->vfs_allocated_count) {
1149 int i; 1262 int i;
1150 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1263 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1151 adapter->vf_data[i].clear_to_send = false; 1264 adapter->vf_data[i].flags = 0;
1152 1265
1153 /* ping all the active vfs to let them know we are going down */ 1266 /* ping all the active vfs to let them know we are going down */
1154 igb_ping_all_vfs(adapter); 1267 igb_ping_all_vfs(adapter);
1155 1268
1156 /* disable transmits and receives */ 1269 /* disable transmits and receives */
1157 wr32(E1000_VFRE, 0); 1270 wr32(E1000_VFRE, 0);
@@ -1159,23 +1272,23 @@ void igb_reset(struct igb_adapter *adapter)
1159 } 1272 }
1160 1273
1161 /* Allow time for pending master requests to run */ 1274 /* Allow time for pending master requests to run */
1162 adapter->hw.mac.ops.reset_hw(&adapter->hw); 1275 hw->mac.ops.reset_hw(hw);
1163 wr32(E1000_WUC, 0); 1276 wr32(E1000_WUC, 0);
1164 1277
1165 if (adapter->hw.mac.ops.init_hw(&adapter->hw)) 1278 if (hw->mac.ops.init_hw(hw))
1166 dev_err(&adapter->pdev->dev, "Hardware Error\n"); 1279 dev_err(&pdev->dev, "Hardware Error\n");
1167 1280
1168 igb_update_mng_vlan(adapter); 1281 igb_update_mng_vlan(adapter);
1169 1282
1170 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1283 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1171 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1284 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1172 1285
1173 igb_reset_adaptive(&adapter->hw); 1286 igb_reset_adaptive(hw);
1174 igb_get_phy_info(&adapter->hw); 1287 igb_get_phy_info(hw);
1175} 1288}
1176 1289
1177static const struct net_device_ops igb_netdev_ops = { 1290static const struct net_device_ops igb_netdev_ops = {
1178 .ndo_open = igb_open, 1291 .ndo_open = igb_open,
1179 .ndo_stop = igb_close, 1292 .ndo_stop = igb_close,
1180 .ndo_start_xmit = igb_xmit_frame_adv, 1293 .ndo_start_xmit = igb_xmit_frame_adv,
1181 .ndo_get_stats = igb_get_stats, 1294 .ndo_get_stats = igb_get_stats,
@@ -1211,10 +1324,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1211 struct net_device *netdev; 1324 struct net_device *netdev;
1212 struct igb_adapter *adapter; 1325 struct igb_adapter *adapter;
1213 struct e1000_hw *hw; 1326 struct e1000_hw *hw;
1327 u16 eeprom_data = 0;
1328 static int global_quad_port_a; /* global quad port a indication */
1214 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1329 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1215 unsigned long mmio_start, mmio_len; 1330 unsigned long mmio_start, mmio_len;
1216 int err, pci_using_dac; 1331 int err, pci_using_dac;
1217 u16 eeprom_data = 0;
1218 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1332 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1219 u32 part_num; 1333 u32 part_num;
1220 1334
@@ -1291,8 +1405,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1291 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1405 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1292 hw->subsystem_device_id = pdev->subsystem_device; 1406 hw->subsystem_device_id = pdev->subsystem_device;
1293 1407
1294 /* setup the private structure */
1295 hw->back = adapter;
1296 /* Copy the default MAC, PHY and NVM function pointers */ 1408 /* Copy the default MAC, PHY and NVM function pointers */
1297 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 1409 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1298 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 1410 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1302,46 +1414,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1302 if (err) 1414 if (err)
1303 goto err_sw_init; 1415 goto err_sw_init;
1304 1416
1305#ifdef CONFIG_PCI_IOV
1306 /* since iov functionality isn't critical to base device function we
1307 * can accept failure. If it fails we don't allow iov to be enabled */
1308 if (hw->mac.type == e1000_82576) {
1309 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1310 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1311 int i;
1312 unsigned char mac_addr[ETH_ALEN];
1313
1314 if (num_vfs) {
1315 adapter->vf_data = kcalloc(num_vfs,
1316 sizeof(struct vf_data_storage),
1317 GFP_KERNEL);
1318 if (!adapter->vf_data) {
1319 dev_err(&pdev->dev,
1320 "Could not allocate VF private data - "
1321 "IOV enable failed\n");
1322 } else {
1323 err = pci_enable_sriov(pdev, num_vfs);
1324 if (!err) {
1325 adapter->vfs_allocated_count = num_vfs;
1326 dev_info(&pdev->dev,
1327 "%d vfs allocated\n",
1328 num_vfs);
1329 for (i = 0;
1330 i < adapter->vfs_allocated_count;
1331 i++) {
1332 random_ether_addr(mac_addr);
1333 igb_set_vf_mac(adapter, i,
1334 mac_addr);
1335 }
1336 } else {
1337 kfree(adapter->vf_data);
1338 adapter->vf_data = NULL;
1339 }
1340 }
1341 }
1342 }
1343
1344#endif
1345 /* setup the private structure */ 1417 /* setup the private structure */
1346 err = igb_sw_init(adapter); 1418 err = igb_sw_init(adapter);
1347 if (err) 1419 if (err)
@@ -1349,16 +1421,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1349 1421
1350 igb_get_bus_info_pcie(hw); 1422 igb_get_bus_info_pcie(hw);
1351 1423
1352 /* set flags */
1353 switch (hw->mac.type) {
1354 case e1000_82575:
1355 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1356 break;
1357 case e1000_82576:
1358 default:
1359 break;
1360 }
1361
1362 hw->phy.autoneg_wait_to_complete = false; 1424 hw->phy.autoneg_wait_to_complete = false;
1363 hw->mac.adaptive_ifs = true; 1425 hw->mac.adaptive_ifs = true;
1364 1426
@@ -1382,7 +1444,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1382 netdev->features |= NETIF_F_IPV6_CSUM; 1444 netdev->features |= NETIF_F_IPV6_CSUM;
1383 netdev->features |= NETIF_F_TSO; 1445 netdev->features |= NETIF_F_TSO;
1384 netdev->features |= NETIF_F_TSO6; 1446 netdev->features |= NETIF_F_TSO6;
1385
1386 netdev->features |= NETIF_F_GRO; 1447 netdev->features |= NETIF_F_GRO;
1387 1448
1388 netdev->vlan_features |= NETIF_F_TSO; 1449 netdev->vlan_features |= NETIF_F_TSO;
@@ -1394,10 +1455,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1394 if (pci_using_dac) 1455 if (pci_using_dac)
1395 netdev->features |= NETIF_F_HIGHDMA; 1456 netdev->features |= NETIF_F_HIGHDMA;
1396 1457
1397 if (adapter->hw.mac.type == e1000_82576) 1458 if (hw->mac.type >= e1000_82576)
1398 netdev->features |= NETIF_F_SCTP_CSUM; 1459 netdev->features |= NETIF_F_SCTP_CSUM;
1399 1460
1400 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1461 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1401 1462
1402 /* before reading the NVM, reset the controller to put the device in a 1463 /* before reading the NVM, reset the controller to put the device in a
1403 * known good starting state */ 1464 * known good starting state */
@@ -1439,9 +1500,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1439 hw->fc.requested_mode = e1000_fc_default; 1500 hw->fc.requested_mode = e1000_fc_default;
1440 hw->fc.current_mode = e1000_fc_default; 1501 hw->fc.current_mode = e1000_fc_default;
1441 1502
1442 adapter->itr_setting = IGB_DEFAULT_ITR;
1443 adapter->itr = IGB_START_ITR;
1444
1445 igb_validate_mdi_setting(hw); 1503 igb_validate_mdi_setting(hw);
1446 1504
1447 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, 1505 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1508,66 +1566,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1508 dev_info(&pdev->dev, "DCA enabled\n"); 1566 dev_info(&pdev->dev, "DCA enabled\n");
1509 igb_setup_dca(adapter); 1567 igb_setup_dca(adapter);
1510 } 1568 }
1511#endif
1512
1513 /*
1514 * Initialize hardware timer: we keep it running just in case
1515 * that some program needs it later on.
1516 */
1517 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1518 adapter->cycles.read = igb_read_clock;
1519 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1520 adapter->cycles.mult = 1;
1521 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1522 wr32(E1000_TIMINCA,
1523 (1<<24) |
1524 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1525#if 0
1526 /*
1527 * Avoid rollover while we initialize by resetting the time counter.
1528 */
1529 wr32(E1000_SYSTIML, 0x00000000);
1530 wr32(E1000_SYSTIMH, 0x00000000);
1531#else
1532 /*
1533 * Set registers so that rollover occurs soon to test this.
1534 */
1535 wr32(E1000_SYSTIML, 0x00000000);
1536 wr32(E1000_SYSTIMH, 0xFF800000);
1537#endif
1538 wrfl();
1539 timecounter_init(&adapter->clock,
1540 &adapter->cycles,
1541 ktime_to_ns(ktime_get_real()));
1542
1543 /*
1544 * Synchronize our NIC clock against system wall clock. NIC
1545 * time stamp reading requires ~3us per sample, each sample
1546 * was pretty stable even under load => only require 10
1547 * samples for each offset comparison.
1548 */
1549 memset(&adapter->compare, 0, sizeof(adapter->compare));
1550 adapter->compare.source = &adapter->clock;
1551 adapter->compare.target = ktime_get_real;
1552 adapter->compare.num_samples = 10;
1553 timecompare_update(&adapter->compare, 0);
1554 1569
1555#ifdef DEBUG
1556 {
1557 char buffer[160];
1558 printk(KERN_DEBUG
1559 "igb: %s: hw %p initialized timer\n",
1560 igb_get_time_str(adapter, buffer),
1561 &adapter->hw);
1562 }
1563#endif 1570#endif
1564
1565 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1571 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1566 /* print bus type/speed/width info */ 1572 /* print bus type/speed/width info */
1567 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1573 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1568 netdev->name, 1574 netdev->name,
1569 ((hw->bus.speed == e1000_bus_speed_2500) 1575 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1570 ? "2.5Gb/s" : "unknown"), 1576 "unknown"),
1571 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1577 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1572 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1578 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1573 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 1579 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1594,15 +1600,14 @@ err_eeprom:
1594 1600
1595 if (hw->flash_address) 1601 if (hw->flash_address)
1596 iounmap(hw->flash_address); 1602 iounmap(hw->flash_address);
1597
1598 igb_free_queues(adapter);
1599err_sw_init: 1603err_sw_init:
1604 igb_clear_interrupt_scheme(adapter);
1600 iounmap(hw->hw_addr); 1605 iounmap(hw->hw_addr);
1601err_ioremap: 1606err_ioremap:
1602 free_netdev(netdev); 1607 free_netdev(netdev);
1603err_alloc_etherdev: 1608err_alloc_etherdev:
1604 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1609 pci_release_selected_regions(pdev,
1605 IORESOURCE_MEM)); 1610 pci_select_bars(pdev, IORESOURCE_MEM));
1606err_pci_reg: 1611err_pci_reg:
1607err_dma: 1612err_dma:
1608 pci_disable_device(pdev); 1613 pci_disable_device(pdev);
@@ -1647,12 +1652,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1647 1652
1648 unregister_netdev(netdev); 1653 unregister_netdev(netdev);
1649 1654
1650 if (!igb_check_reset_block(&adapter->hw)) 1655 if (!igb_check_reset_block(hw))
1651 igb_reset_phy(&adapter->hw); 1656 igb_reset_phy(hw);
1652
1653 igb_reset_interrupt_capability(adapter);
1654 1657
1655 igb_free_queues(adapter); 1658 igb_clear_interrupt_scheme(adapter);
1656 1659
1657#ifdef CONFIG_PCI_IOV 1660#ifdef CONFIG_PCI_IOV
1658 /* reclaim resources allocated to VFs */ 1661 /* reclaim resources allocated to VFs */
@@ -1668,11 +1671,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1668 dev_info(&pdev->dev, "IOV Disabled\n"); 1671 dev_info(&pdev->dev, "IOV Disabled\n");
1669 } 1672 }
1670#endif 1673#endif
1674
1671 iounmap(hw->hw_addr); 1675 iounmap(hw->hw_addr);
1672 if (hw->flash_address) 1676 if (hw->flash_address)
1673 iounmap(hw->flash_address); 1677 iounmap(hw->flash_address);
1674 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1678 pci_release_selected_regions(pdev,
1675 IORESOURCE_MEM)); 1679 pci_select_bars(pdev, IORESOURCE_MEM));
1676 1680
1677 free_netdev(netdev); 1681 free_netdev(netdev);
1678 1682
@@ -1682,6 +1686,118 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682} 1686}
1683 1687
1684/** 1688/**
1689 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1690 * @adapter: board private structure to initialize
1691 *
1692 * This function initializes the vf specific data storage and then attempts to
1693 * allocate the VFs. The reason for ordering it this way is because it is much
1694 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1695 * the memory for the VFs.
1696 **/
1697static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1698{
1699#ifdef CONFIG_PCI_IOV
1700 struct pci_dev *pdev = adapter->pdev;
1701
1702 if (adapter->vfs_allocated_count > 7)
1703 adapter->vfs_allocated_count = 7;
1704
1705 if (adapter->vfs_allocated_count) {
1706 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1707 sizeof(struct vf_data_storage),
1708 GFP_KERNEL);
1709 /* if allocation failed then we do not support SR-IOV */
1710 if (!adapter->vf_data) {
1711 adapter->vfs_allocated_count = 0;
1712 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1713 "Data Storage\n");
1714 }
1715 }
1716
1717 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1718 kfree(adapter->vf_data);
1719 adapter->vf_data = NULL;
1720#endif /* CONFIG_PCI_IOV */
1721 adapter->vfs_allocated_count = 0;
1722#ifdef CONFIG_PCI_IOV
1723 } else {
1724 unsigned char mac_addr[ETH_ALEN];
1725 int i;
1726 dev_info(&pdev->dev, "%d vfs allocated\n",
1727 adapter->vfs_allocated_count);
1728 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1729 random_ether_addr(mac_addr);
1730 igb_set_vf_mac(adapter, i, mac_addr);
1731 }
1732 }
1733#endif /* CONFIG_PCI_IOV */
1734}
1735
1736
1737/**
1738 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1739 * @adapter: board private structure to initialize
1740 *
1741 * igb_init_hw_timer initializes the function pointer and values for the hw
1742 * timer found in hardware.
1743 **/
1744static void igb_init_hw_timer(struct igb_adapter *adapter)
1745{
1746 struct e1000_hw *hw = &adapter->hw;
1747
1748 switch (hw->mac.type) {
1749 case e1000_82576:
1750 /*
1751 * Initialize hardware timer: we keep it running just in case
1752 * that some program needs it later on.
1753 */
1754 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1755 adapter->cycles.read = igb_read_clock;
1756 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1757 adapter->cycles.mult = 1;
1758 /**
1759 * Scale the NIC clock cycle by a large factor so that
1760 * relatively small clock corrections can be added or
1761 * substracted at each clock tick. The drawbacks of a large
1762 * factor are a) that the clock register overflows more quickly
1763 * (not such a big deal) and b) that the increment per tick has
1764 * to fit into 24 bits. As a result we need to use a shift of
1765 * 19 so we can fit a value of 16 into the TIMINCA register.
1766 */
1767 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1768 wr32(E1000_TIMINCA,
1769 (1 << E1000_TIMINCA_16NS_SHIFT) |
1770 (16 << IGB_82576_TSYNC_SHIFT));
1771
1772 /* Set registers so that rollover occurs soon to test this. */
1773 wr32(E1000_SYSTIML, 0x00000000);
1774 wr32(E1000_SYSTIMH, 0xFF800000);
1775 wrfl();
1776
1777 timecounter_init(&adapter->clock,
1778 &adapter->cycles,
1779 ktime_to_ns(ktime_get_real()));
1780 /*
1781 * Synchronize our NIC clock against system wall clock. NIC
1782 * time stamp reading requires ~3us per sample, each sample
1783 * was pretty stable even under load => only require 10
1784 * samples for each offset comparison.
1785 */
1786 memset(&adapter->compare, 0, sizeof(adapter->compare));
1787 adapter->compare.source = &adapter->clock;
1788 adapter->compare.target = ktime_get_real;
1789 adapter->compare.num_samples = 10;
1790 timecompare_update(&adapter->compare, 0);
1791 break;
1792 case e1000_82575:
1793 /* 82575 does not support timesync */
1794 default:
1795 break;
1796 }
1797
1798}
1799
1800/**
1685 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1801 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1686 * @adapter: board private structure to initialize 1802 * @adapter: board private structure to initialize
1687 * 1803 *
@@ -1699,20 +1815,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1699 1815
1700 adapter->tx_ring_count = IGB_DEFAULT_TXD; 1816 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1701 adapter->rx_ring_count = IGB_DEFAULT_RXD; 1817 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1702 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1818 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1703 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1819 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1820
1704 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1821 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1705 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1822 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1706 1823
1707 /* This call may decrease the number of queues depending on 1824#ifdef CONFIG_PCI_IOV
1708 * interrupt mode. */ 1825 if (hw->mac.type == e1000_82576)
1709 igb_set_interrupt_capability(adapter); 1826 adapter->vfs_allocated_count = max_vfs;
1827
1828#endif /* CONFIG_PCI_IOV */
1829 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1830
1831 /*
1832 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1833 * then we should combine the queues into a queue pair in order to
1834 * conserve interrupts due to limited supply
1835 */
1836 if ((adapter->rss_queues > 4) ||
1837 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1838 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1710 1839
1711 if (igb_alloc_queues(adapter)) { 1840 /* This call may decrease the number of queues */
1841 if (igb_init_interrupt_scheme(adapter)) {
1712 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1842 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1713 return -ENOMEM; 1843 return -ENOMEM;
1714 } 1844 }
1715 1845
1846 igb_init_hw_timer(adapter);
1847 igb_probe_vfs(adapter);
1848
1716 /* Explicitly disable IRQ since the NIC can be in any state. */ 1849 /* Explicitly disable IRQ since the NIC can be in any state. */
1717 igb_irq_disable(adapter); 1850 igb_irq_disable(adapter);
1718 1851
@@ -1757,21 +1890,12 @@ static int igb_open(struct net_device *netdev)
1757 1890
1758 /* e1000_power_up_phy(adapter); */ 1891 /* e1000_power_up_phy(adapter); */
1759 1892
1760 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1761 if ((adapter->hw.mng_cookie.status &
1762 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1763 igb_update_mng_vlan(adapter);
1764
1765 /* before we allocate an interrupt, we must be ready to handle it. 1893 /* before we allocate an interrupt, we must be ready to handle it.
1766 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1894 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1767 * as soon as we call pci_request_irq, so we have to setup our 1895 * as soon as we call pci_request_irq, so we have to setup our
1768 * clean_rx handler before we do so. */ 1896 * clean_rx handler before we do so. */
1769 igb_configure(adapter); 1897 igb_configure(adapter);
1770 1898
1771 igb_vmm_control(adapter);
1772 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1773 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1774
1775 err = igb_request_irq(adapter); 1899 err = igb_request_irq(adapter);
1776 if (err) 1900 if (err)
1777 goto err_req_irq; 1901 goto err_req_irq;
@@ -1779,18 +1903,28 @@ static int igb_open(struct net_device *netdev)
1779 /* From here on the code is the same as igb_up() */ 1903 /* From here on the code is the same as igb_up() */
1780 clear_bit(__IGB_DOWN, &adapter->state); 1904 clear_bit(__IGB_DOWN, &adapter->state);
1781 1905
1782 for (i = 0; i < adapter->num_rx_queues; i++) 1906 for (i = 0; i < adapter->num_q_vectors; i++) {
1783 napi_enable(&adapter->rx_ring[i].napi); 1907 struct igb_q_vector *q_vector = adapter->q_vector[i];
1908 napi_enable(&q_vector->napi);
1909 }
1784 1910
1785 /* Clear any pending interrupts. */ 1911 /* Clear any pending interrupts. */
1786 rd32(E1000_ICR); 1912 rd32(E1000_ICR);
1787 1913
1788 igb_irq_enable(adapter); 1914 igb_irq_enable(adapter);
1789 1915
1916 /* notify VFs that reset has been completed */
1917 if (adapter->vfs_allocated_count) {
1918 u32 reg_data = rd32(E1000_CTRL_EXT);
1919 reg_data |= E1000_CTRL_EXT_PFRSTD;
1920 wr32(E1000_CTRL_EXT, reg_data);
1921 }
1922
1790 netif_tx_start_all_queues(netdev); 1923 netif_tx_start_all_queues(netdev);
1791 1924
1792 /* Fire a link status change interrupt to start the watchdog. */ 1925 /* start the watchdog. */
1793 wr32(E1000_ICS, E1000_ICS_LSC); 1926 hw->mac.get_link_status = 1;
1927 schedule_work(&adapter->watchdog_task);
1794 1928
1795 return 0; 1929 return 0;
1796 1930
@@ -1829,28 +1963,18 @@ static int igb_close(struct net_device *netdev)
1829 igb_free_all_tx_resources(adapter); 1963 igb_free_all_tx_resources(adapter);
1830 igb_free_all_rx_resources(adapter); 1964 igb_free_all_rx_resources(adapter);
1831 1965
1832 /* kill manageability vlan ID if supported, but not if a vlan with
1833 * the same ID is registered on the host OS (let 8021q kill it) */
1834 if ((adapter->hw.mng_cookie.status &
1835 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1836 !(adapter->vlgrp &&
1837 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1838 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1839
1840 return 0; 1966 return 0;
1841} 1967}
1842 1968
1843/** 1969/**
1844 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 1970 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1845 * @adapter: board private structure
1846 * @tx_ring: tx descriptor ring (for a specific queue) to setup 1971 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1847 * 1972 *
1848 * Return 0 on success, negative on failure 1973 * Return 0 on success, negative on failure
1849 **/ 1974 **/
1850int igb_setup_tx_resources(struct igb_adapter *adapter, 1975int igb_setup_tx_resources(struct igb_ring *tx_ring)
1851 struct igb_ring *tx_ring)
1852{ 1976{
1853 struct pci_dev *pdev = adapter->pdev; 1977 struct pci_dev *pdev = tx_ring->pdev;
1854 int size; 1978 int size;
1855 1979
1856 size = sizeof(struct igb_buffer) * tx_ring->count; 1980 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1863,20 +1987,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1863 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1987 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1864 tx_ring->size = ALIGN(tx_ring->size, 4096); 1988 tx_ring->size = ALIGN(tx_ring->size, 4096);
1865 1989
1866 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1990 tx_ring->desc = pci_alloc_consistent(pdev,
1991 tx_ring->size,
1867 &tx_ring->dma); 1992 &tx_ring->dma);
1868 1993
1869 if (!tx_ring->desc) 1994 if (!tx_ring->desc)
1870 goto err; 1995 goto err;
1871 1996
1872 tx_ring->adapter = adapter;
1873 tx_ring->next_to_use = 0; 1997 tx_ring->next_to_use = 0;
1874 tx_ring->next_to_clean = 0; 1998 tx_ring->next_to_clean = 0;
1875 return 0; 1999 return 0;
1876 2000
1877err: 2001err:
1878 vfree(tx_ring->buffer_info); 2002 vfree(tx_ring->buffer_info);
1879 dev_err(&adapter->pdev->dev, 2003 dev_err(&pdev->dev,
1880 "Unable to allocate memory for the transmit descriptor ring\n"); 2004 "Unable to allocate memory for the transmit descriptor ring\n");
1881 return -ENOMEM; 2005 return -ENOMEM;
1882} 2006}
@@ -1890,13 +2014,13 @@ err:
1890 **/ 2014 **/
1891static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2015static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1892{ 2016{
2017 struct pci_dev *pdev = adapter->pdev;
1893 int i, err = 0; 2018 int i, err = 0;
1894 int r_idx;
1895 2019
1896 for (i = 0; i < adapter->num_tx_queues; i++) { 2020 for (i = 0; i < adapter->num_tx_queues; i++) {
1897 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2021 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
1898 if (err) { 2022 if (err) {
1899 dev_err(&adapter->pdev->dev, 2023 dev_err(&pdev->dev,
1900 "Allocation for Tx Queue %u failed\n", i); 2024 "Allocation for Tx Queue %u failed\n", i);
1901 for (i--; i >= 0; i--) 2025 for (i--; i >= 0; i--)
1902 igb_free_tx_resources(&adapter->tx_ring[i]); 2026 igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1904,57 +2028,24 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1904 } 2028 }
1905 } 2029 }
1906 2030
1907 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2031 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
1908 r_idx = i % adapter->num_tx_queues; 2032 int r_idx = i % adapter->num_tx_queues;
1909 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2033 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1910 } 2034 }
1911 return err; 2035 return err;
1912} 2036}
1913 2037
1914/** 2038/**
1915 * igb_configure_tx - Configure transmit Unit after Reset 2039 * igb_setup_tctl - configure the transmit control registers
1916 * @adapter: board private structure 2040 * @adapter: Board private structure
1917 *
1918 * Configure the Tx unit of the MAC after a reset.
1919 **/ 2041 **/
1920static void igb_configure_tx(struct igb_adapter *adapter) 2042void igb_setup_tctl(struct igb_adapter *adapter)
1921{ 2043{
1922 u64 tdba;
1923 struct e1000_hw *hw = &adapter->hw; 2044 struct e1000_hw *hw = &adapter->hw;
1924 u32 tctl; 2045 u32 tctl;
1925 u32 txdctl, txctrl;
1926 int i, j;
1927
1928 for (i = 0; i < adapter->num_tx_queues; i++) {
1929 struct igb_ring *ring = &adapter->tx_ring[i];
1930 j = ring->reg_idx;
1931 wr32(E1000_TDLEN(j),
1932 ring->count * sizeof(union e1000_adv_tx_desc));
1933 tdba = ring->dma;
1934 wr32(E1000_TDBAL(j),
1935 tdba & 0x00000000ffffffffULL);
1936 wr32(E1000_TDBAH(j), tdba >> 32);
1937
1938 ring->head = E1000_TDH(j);
1939 ring->tail = E1000_TDT(j);
1940 writel(0, hw->hw_addr + ring->tail);
1941 writel(0, hw->hw_addr + ring->head);
1942 txdctl = rd32(E1000_TXDCTL(j));
1943 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1944 wr32(E1000_TXDCTL(j), txdctl);
1945
1946 /* Turn off Relaxed Ordering on head write-backs. The
1947 * writebacks MUST be delivered in order or it will
1948 * completely screw up our bookeeping.
1949 */
1950 txctrl = rd32(E1000_DCA_TXCTRL(j));
1951 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1952 wr32(E1000_DCA_TXCTRL(j), txctrl);
1953 }
1954 2046
1955 /* disable queue 0 to prevent tail bump w/o re-configuration */ 2047 /* disable queue 0 which is enabled by default on 82575 and 82576 */
1956 if (adapter->vfs_allocated_count) 2048 wr32(E1000_TXDCTL(0), 0);
1957 wr32(E1000_TXDCTL(0), 0);
1958 2049
1959 /* Program the Transmit Control Register */ 2050 /* Program the Transmit Control Register */
1960 tctl = rd32(E1000_TCTL); 2051 tctl = rd32(E1000_TCTL);
@@ -1964,9 +2055,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1964 2055
1965 igb_config_collision_dist(hw); 2056 igb_config_collision_dist(hw);
1966 2057
1967 /* Setup Transmit Descriptor Settings for eop descriptor */
1968 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1969
1970 /* Enable transmits */ 2058 /* Enable transmits */
1971 tctl |= E1000_TCTL_EN; 2059 tctl |= E1000_TCTL_EN;
1972 2060
@@ -1974,16 +2062,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1974} 2062}
1975 2063
1976/** 2064/**
1977 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 2065 * igb_configure_tx_ring - Configure transmit ring after Reset
1978 * @adapter: board private structure 2066 * @adapter: board private structure
2067 * @ring: tx ring to configure
2068 *
2069 * Configure a transmit ring after a reset.
2070 **/
2071void igb_configure_tx_ring(struct igb_adapter *adapter,
2072 struct igb_ring *ring)
2073{
2074 struct e1000_hw *hw = &adapter->hw;
2075 u32 txdctl;
2076 u64 tdba = ring->dma;
2077 int reg_idx = ring->reg_idx;
2078
2079 /* disable the queue */
2080 txdctl = rd32(E1000_TXDCTL(reg_idx));
2081 wr32(E1000_TXDCTL(reg_idx),
2082 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2083 wrfl();
2084 mdelay(10);
2085
2086 wr32(E1000_TDLEN(reg_idx),
2087 ring->count * sizeof(union e1000_adv_tx_desc));
2088 wr32(E1000_TDBAL(reg_idx),
2089 tdba & 0x00000000ffffffffULL);
2090 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2091
2092 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2093 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2094 writel(0, ring->head);
2095 writel(0, ring->tail);
2096
2097 txdctl |= IGB_TX_PTHRESH;
2098 txdctl |= IGB_TX_HTHRESH << 8;
2099 txdctl |= IGB_TX_WTHRESH << 16;
2100
2101 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2102 wr32(E1000_TXDCTL(reg_idx), txdctl);
2103}
2104
2105/**
2106 * igb_configure_tx - Configure transmit Unit after Reset
2107 * @adapter: board private structure
2108 *
2109 * Configure the Tx unit of the MAC after a reset.
2110 **/
2111static void igb_configure_tx(struct igb_adapter *adapter)
2112{
2113 int i;
2114
2115 for (i = 0; i < adapter->num_tx_queues; i++)
2116 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2117}
2118
2119/**
2120 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1979 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2121 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1980 * 2122 *
1981 * Returns 0 on success, negative on failure 2123 * Returns 0 on success, negative on failure
1982 **/ 2124 **/
1983int igb_setup_rx_resources(struct igb_adapter *adapter, 2125int igb_setup_rx_resources(struct igb_ring *rx_ring)
1984 struct igb_ring *rx_ring)
1985{ 2126{
1986 struct pci_dev *pdev = adapter->pdev; 2127 struct pci_dev *pdev = rx_ring->pdev;
1987 int size, desc_len; 2128 int size, desc_len;
1988 2129
1989 size = sizeof(struct igb_buffer) * rx_ring->count; 2130 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2007,13 +2148,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
2007 rx_ring->next_to_clean = 0; 2148 rx_ring->next_to_clean = 0;
2008 rx_ring->next_to_use = 0; 2149 rx_ring->next_to_use = 0;
2009 2150
2010 rx_ring->adapter = adapter;
2011
2012 return 0; 2151 return 0;
2013 2152
2014err: 2153err:
2015 vfree(rx_ring->buffer_info); 2154 vfree(rx_ring->buffer_info);
2016 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 2155 rx_ring->buffer_info = NULL;
2156 dev_err(&pdev->dev, "Unable to allocate memory for "
2017 "the receive descriptor ring\n"); 2157 "the receive descriptor ring\n");
2018 return -ENOMEM; 2158 return -ENOMEM;
2019} 2159}
@@ -2027,12 +2167,13 @@ err:
2027 **/ 2167 **/
2028static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 2168static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2029{ 2169{
2170 struct pci_dev *pdev = adapter->pdev;
2030 int i, err = 0; 2171 int i, err = 0;
2031 2172
2032 for (i = 0; i < adapter->num_rx_queues; i++) { 2173 for (i = 0; i < adapter->num_rx_queues; i++) {
2033 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2174 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2034 if (err) { 2175 if (err) {
2035 dev_err(&adapter->pdev->dev, 2176 dev_err(&pdev->dev,
2036 "Allocation for Rx Queue %u failed\n", i); 2177 "Allocation for Rx Queue %u failed\n", i);
2037 for (i--; i >= 0; i--) 2178 for (i--; i >= 0; i--)
2038 igb_free_rx_resources(&adapter->rx_ring[i]); 2179 igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2044,15 +2185,118 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2044} 2185}
2045 2186
2046/** 2187/**
2188 * igb_setup_mrqc - configure the multiple receive queue control registers
2189 * @adapter: Board private structure
2190 **/
2191static void igb_setup_mrqc(struct igb_adapter *adapter)
2192{
2193 struct e1000_hw *hw = &adapter->hw;
2194 u32 mrqc, rxcsum;
2195 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2196 union e1000_reta {
2197 u32 dword;
2198 u8 bytes[4];
2199 } reta;
2200 static const u8 rsshash[40] = {
2201 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2202 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2203 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2204 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2205
2206 /* Fill out hash function seeds */
2207 for (j = 0; j < 10; j++) {
2208 u32 rsskey = rsshash[(j * 4)];
2209 rsskey |= rsshash[(j * 4) + 1] << 8;
2210 rsskey |= rsshash[(j * 4) + 2] << 16;
2211 rsskey |= rsshash[(j * 4) + 3] << 24;
2212 array_wr32(E1000_RSSRK(0), j, rsskey);
2213 }
2214
2215 num_rx_queues = adapter->rss_queues;
2216
2217 if (adapter->vfs_allocated_count) {
2218 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2219 switch (hw->mac.type) {
2220 case e1000_82576:
2221 shift = 3;
2222 num_rx_queues = 2;
2223 break;
2224 case e1000_82575:
2225 shift = 2;
2226 shift2 = 6;
2227 default:
2228 break;
2229 }
2230 } else {
2231 if (hw->mac.type == e1000_82575)
2232 shift = 6;
2233 }
2234
2235 for (j = 0; j < (32 * 4); j++) {
2236 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2237 if (shift2)
2238 reta.bytes[j & 3] |= num_rx_queues << shift2;
2239 if ((j & 3) == 3)
2240 wr32(E1000_RETA(j >> 2), reta.dword);
2241 }
2242
2243 /*
2244 * Disable raw packet checksumming so that RSS hash is placed in
2245 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2246 * offloads as they are enabled by default
2247 */
2248 rxcsum = rd32(E1000_RXCSUM);
2249 rxcsum |= E1000_RXCSUM_PCSD;
2250
2251 if (adapter->hw.mac.type >= e1000_82576)
2252 /* Enable Receive Checksum Offload for SCTP */
2253 rxcsum |= E1000_RXCSUM_CRCOFL;
2254
2255 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2256 wr32(E1000_RXCSUM, rxcsum);
2257
2258 /* If VMDq is enabled then we set the appropriate mode for that, else
2259 * we default to RSS so that an RSS hash is calculated per packet even
2260 * if we are only using one queue */
2261 if (adapter->vfs_allocated_count) {
2262 if (hw->mac.type > e1000_82575) {
2263 /* Set the default pool for the PF's first queue */
2264 u32 vtctl = rd32(E1000_VT_CTL);
2265 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2266 E1000_VT_CTL_DISABLE_DEF_POOL);
2267 vtctl |= adapter->vfs_allocated_count <<
2268 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2269 wr32(E1000_VT_CTL, vtctl);
2270 }
2271 if (adapter->rss_queues > 1)
2272 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2273 else
2274 mrqc = E1000_MRQC_ENABLE_VMDQ;
2275 } else {
2276 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2277 }
2278 igb_vmm_control(adapter);
2279
2280 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2281 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2282 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2283 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2284 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2285 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2286 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2287 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2288
2289 wr32(E1000_MRQC, mrqc);
2290}
2291
2292/**
2047 * igb_setup_rctl - configure the receive control registers 2293 * igb_setup_rctl - configure the receive control registers
2048 * @adapter: Board private structure 2294 * @adapter: Board private structure
2049 **/ 2295 **/
2050static void igb_setup_rctl(struct igb_adapter *adapter) 2296void igb_setup_rctl(struct igb_adapter *adapter)
2051{ 2297{
2052 struct e1000_hw *hw = &adapter->hw; 2298 struct e1000_hw *hw = &adapter->hw;
2053 u32 rctl; 2299 u32 rctl;
2054 u32 srrctl = 0;
2055 int i;
2056 2300
2057 rctl = rd32(E1000_RCTL); 2301 rctl = rd32(E1000_RCTL);
2058 2302
@@ -2069,75 +2313,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2069 */ 2313 */
2070 rctl |= E1000_RCTL_SECRC; 2314 rctl |= E1000_RCTL_SECRC;
2071 2315
2072 /* 2316 /* disable store bad packets and clear size bits. */
2073 * disable store bad packets and clear size bits.
2074 */
2075 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 2317 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2076 2318
2077 /* enable LPE when to prevent packets larger than max_frame_size */ 2319 /* enable LPE to prevent packets larger than max_frame_size */
2078 rctl |= E1000_RCTL_LPE; 2320 rctl |= E1000_RCTL_LPE;
2079 2321
2080 /* Setup buffer sizes */ 2322 /* disable queue 0 to prevent tail write w/o re-config */
2081 switch (adapter->rx_buffer_len) { 2323 wr32(E1000_RXDCTL(0), 0);
2082 case IGB_RXBUFFER_256:
2083 rctl |= E1000_RCTL_SZ_256;
2084 break;
2085 case IGB_RXBUFFER_512:
2086 rctl |= E1000_RCTL_SZ_512;
2087 break;
2088 default:
2089 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2090 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2091 break;
2092 }
2093
2094 /* 82575 and greater support packet-split where the protocol
2095 * header is placed in skb->data and the packet data is
2096 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2097 * In the case of a non-split, skb->data is linearly filled,
2098 * followed by the page buffers. Therefore, skb->data is
2099 * sized to hold the largest protocol header.
2100 */
2101 /* allocations using alloc_page take too long for regular MTU
2102 * so only enable packet split for jumbo frames */
2103 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2104 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2105 srrctl |= adapter->rx_ps_hdr_size <<
2106 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2107 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2108 } else {
2109 adapter->rx_ps_hdr_size = 0;
2110 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 }
2112 2324
2113 /* Attention!!! For SR-IOV PF driver operations you must enable 2325 /* Attention!!! For SR-IOV PF driver operations you must enable
2114 * queue drop for all VF and PF queues to prevent head of line blocking 2326 * queue drop for all VF and PF queues to prevent head of line blocking
2115 * if an un-trusted VF does not provide descriptors to hardware. 2327 * if an un-trusted VF does not provide descriptors to hardware.
2116 */ 2328 */
2117 if (adapter->vfs_allocated_count) { 2329 if (adapter->vfs_allocated_count) {
2118 u32 vmolr;
2119
2120 /* set all queue drop enable bits */ 2330 /* set all queue drop enable bits */
2121 wr32(E1000_QDE, ALL_QUEUES); 2331 wr32(E1000_QDE, ALL_QUEUES);
2122 srrctl |= E1000_SRRCTL_DROP_EN; 2332 }
2123 2333
2124 /* disable queue 0 to prevent tail write w/o re-config */ 2334 wr32(E1000_RCTL, rctl);
2125 wr32(E1000_RXDCTL(0), 0); 2335}
2126 2336
2127 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2337static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2128 if (rctl & E1000_RCTL_LPE) 2338 int vfn)
2129 vmolr |= E1000_VMOLR_LPE; 2339{
2130 if (adapter->num_rx_queues > 1) 2340 struct e1000_hw *hw = &adapter->hw;
2131 vmolr |= E1000_VMOLR_RSSE; 2341 u32 vmolr;
2132 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2133 }
2134 2342
2135 for (i = 0; i < adapter->num_rx_queues; i++) { 2343 /* if it isn't the PF check to see if VFs are enabled and
2136 int j = adapter->rx_ring[i].reg_idx; 2344 * increase the size to support vlan tags */
2137 wr32(E1000_SRRCTL(j), srrctl); 2345 if (vfn < adapter->vfs_allocated_count &&
2138 } 2346 adapter->vf_data[vfn].vlans_enabled)
2347 size += VLAN_TAG_SIZE;
2139 2348
2140 wr32(E1000_RCTL, rctl); 2349 vmolr = rd32(E1000_VMOLR(vfn));
2350 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2351 vmolr |= size | E1000_VMOLR_LPE;
2352 wr32(E1000_VMOLR(vfn), vmolr);
2353
2354 return 0;
2141} 2355}
2142 2356
2143/** 2357/**
@@ -2159,33 +2373,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2159 * size and set the VMOLR RLPML to the size we need */ 2373 * size and set the VMOLR RLPML to the size we need */
2160 if (pf_id) { 2374 if (pf_id) {
2161 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 2375 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2162 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; 2376 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2163 } 2377 }
2164 2378
2165 wr32(E1000_RLPML, max_frame_size); 2379 wr32(E1000_RLPML, max_frame_size);
2166} 2380}
2167 2381
2382static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2383{
2384 struct e1000_hw *hw = &adapter->hw;
2385 u32 vmolr;
2386
2387 /*
2388 * This register exists only on 82576 and newer so if we are older then
2389 * we should exit and do nothing
2390 */
2391 if (hw->mac.type < e1000_82576)
2392 return;
2393
2394 vmolr = rd32(E1000_VMOLR(vfn));
2395 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2396 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2397
2398 /* clear all bits that might not be set */
2399 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2400
2401 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2402 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2403 /*
2404 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2405 * multicast packets
2406 */
2407 if (vfn <= adapter->vfs_allocated_count)
2408 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2409
2410 wr32(E1000_VMOLR(vfn), vmolr);
2411}
2412
2168/** 2413/**
2169 * igb_configure_vt_default_pool - Configure VT default pool 2414 * igb_configure_rx_ring - Configure a receive ring after Reset
2170 * @adapter: board private structure 2415 * @adapter: board private structure
2416 * @ring: receive ring to be configured
2171 * 2417 *
2172 * Configure the default pool 2418 * Configure the Rx unit of the MAC after a reset.
2173 **/ 2419 **/
2174static void igb_configure_vt_default_pool(struct igb_adapter *adapter) 2420void igb_configure_rx_ring(struct igb_adapter *adapter,
2421 struct igb_ring *ring)
2175{ 2422{
2176 struct e1000_hw *hw = &adapter->hw; 2423 struct e1000_hw *hw = &adapter->hw;
2177 u16 pf_id = adapter->vfs_allocated_count; 2424 u64 rdba = ring->dma;
2178 u32 vtctl; 2425 int reg_idx = ring->reg_idx;
2426 u32 srrctl, rxdctl;
2427
2428 /* disable the queue */
2429 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2430 wr32(E1000_RXDCTL(reg_idx),
2431 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2432
2433 /* Set DMA base address registers */
2434 wr32(E1000_RDBAL(reg_idx),
2435 rdba & 0x00000000ffffffffULL);
2436 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2437 wr32(E1000_RDLEN(reg_idx),
2438 ring->count * sizeof(union e1000_adv_rx_desc));
2439
2440 /* initialize head and tail */
2441 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2442 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2443 writel(0, ring->head);
2444 writel(0, ring->tail);
2445
2446 /* set descriptor configuration */
2447 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2448 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2449 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2450#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2451 srrctl |= IGB_RXBUFFER_16384 >>
2452 E1000_SRRCTL_BSIZEPKT_SHIFT;
2453#else
2454 srrctl |= (PAGE_SIZE / 2) >>
2455 E1000_SRRCTL_BSIZEPKT_SHIFT;
2456#endif
2457 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2458 } else {
2459 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2460 E1000_SRRCTL_BSIZEPKT_SHIFT;
2461 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2462 }
2179 2463
2180 /* not in sr-iov mode - do nothing */ 2464 wr32(E1000_SRRCTL(reg_idx), srrctl);
2181 if (!pf_id) 2465
2182 return; 2466 /* set filtering for VMDQ pools */
2467 igb_set_vmolr(adapter, reg_idx & 0x7);
2183 2468
2184 vtctl = rd32(E1000_VT_CTL); 2469 /* enable receive descriptor fetching */
2185 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 2470 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2186 E1000_VT_CTL_DISABLE_DEF_POOL); 2471 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2187 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2472 rxdctl &= 0xFFF00000;
2188 wr32(E1000_VT_CTL, vtctl); 2473 rxdctl |= IGB_RX_PTHRESH;
2474 rxdctl |= IGB_RX_HTHRESH << 8;
2475 rxdctl |= IGB_RX_WTHRESH << 16;
2476 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2189} 2477}
2190 2478
2191/** 2479/**
@@ -2196,112 +2484,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2196 **/ 2484 **/
2197static void igb_configure_rx(struct igb_adapter *adapter) 2485static void igb_configure_rx(struct igb_adapter *adapter)
2198{ 2486{
2199 u64 rdba;
2200 struct e1000_hw *hw = &adapter->hw;
2201 u32 rctl, rxcsum;
2202 u32 rxdctl;
2203 int i; 2487 int i;
2204 2488
2205 /* disable receives while setting up the descriptors */ 2489 /* set UTA to appropriate mode */
2206 rctl = rd32(E1000_RCTL); 2490 igb_set_uta(adapter);
2207 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2208 wrfl();
2209 mdelay(10);
2210 2491
2211 if (adapter->itr_setting > 3) 2492 /* set the correct pool for the PF default MAC address in entry 0 */
2212 wr32(E1000_ITR, adapter->itr); 2493 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2494 adapter->vfs_allocated_count);
2213 2495
2214 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2496 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2215 * the Base and Length of the Rx Descriptor Ring */ 2497 * the Base and Length of the Rx Descriptor Ring */
2216 for (i = 0; i < adapter->num_rx_queues; i++) { 2498 for (i = 0; i < adapter->num_rx_queues; i++)
2217 struct igb_ring *ring = &adapter->rx_ring[i]; 2499 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2218 int j = ring->reg_idx;
2219 rdba = ring->dma;
2220 wr32(E1000_RDBAL(j),
2221 rdba & 0x00000000ffffffffULL);
2222 wr32(E1000_RDBAH(j), rdba >> 32);
2223 wr32(E1000_RDLEN(j),
2224 ring->count * sizeof(union e1000_adv_rx_desc));
2225
2226 ring->head = E1000_RDH(j);
2227 ring->tail = E1000_RDT(j);
2228 writel(0, hw->hw_addr + ring->tail);
2229 writel(0, hw->hw_addr + ring->head);
2230
2231 rxdctl = rd32(E1000_RXDCTL(j));
2232 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2233 rxdctl &= 0xFFF00000;
2234 rxdctl |= IGB_RX_PTHRESH;
2235 rxdctl |= IGB_RX_HTHRESH << 8;
2236 rxdctl |= IGB_RX_WTHRESH << 16;
2237 wr32(E1000_RXDCTL(j), rxdctl);
2238 }
2239
2240 if (adapter->num_rx_queues > 1) {
2241 u32 random[10];
2242 u32 mrqc;
2243 u32 j, shift;
2244 union e1000_reta {
2245 u32 dword;
2246 u8 bytes[4];
2247 } reta;
2248
2249 get_random_bytes(&random[0], 40);
2250
2251 if (hw->mac.type >= e1000_82576)
2252 shift = 0;
2253 else
2254 shift = 6;
2255 for (j = 0; j < (32 * 4); j++) {
2256 reta.bytes[j & 3] =
2257 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2258 if ((j & 3) == 3)
2259 writel(reta.dword,
2260 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2261 }
2262 if (adapter->vfs_allocated_count)
2263 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2264 else
2265 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2266
2267 /* Fill out hash function seeds */
2268 for (j = 0; j < 10; j++)
2269 array_wr32(E1000_RSSRK(0), j, random[j]);
2270
2271 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2272 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2273 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2274 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2275 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2276 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2277 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2278 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2279
2280 wr32(E1000_MRQC, mrqc);
2281 } else if (adapter->vfs_allocated_count) {
2282 /* Enable multi-queue for sr-iov */
2283 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2284 }
2285
2286 /* Enable Receive Checksum Offload for TCP and UDP */
2287 rxcsum = rd32(E1000_RXCSUM);
2288 /* Disable raw packet checksumming */
2289 rxcsum |= E1000_RXCSUM_PCSD;
2290
2291 if (adapter->hw.mac.type == e1000_82576)
2292 /* Enable Receive Checksum Offload for SCTP */
2293 rxcsum |= E1000_RXCSUM_CRCOFL;
2294
2295 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2296 wr32(E1000_RXCSUM, rxcsum);
2297
2298 /* Set the default pool for the PF's first queue */
2299 igb_configure_vt_default_pool(adapter);
2300
2301 igb_rlpml_set(adapter);
2302
2303 /* Enable Receives */
2304 wr32(E1000_RCTL, rctl);
2305} 2500}
2306 2501
2307/** 2502/**
@@ -2312,14 +2507,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2312 **/ 2507 **/
2313void igb_free_tx_resources(struct igb_ring *tx_ring) 2508void igb_free_tx_resources(struct igb_ring *tx_ring)
2314{ 2509{
2315 struct pci_dev *pdev = tx_ring->adapter->pdev;
2316
2317 igb_clean_tx_ring(tx_ring); 2510 igb_clean_tx_ring(tx_ring);
2318 2511
2319 vfree(tx_ring->buffer_info); 2512 vfree(tx_ring->buffer_info);
2320 tx_ring->buffer_info = NULL; 2513 tx_ring->buffer_info = NULL;
2321 2514
2322 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2515 /* if not set, then don't free */
2516 if (!tx_ring->desc)
2517 return;
2518
2519 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2520 tx_ring->desc, tx_ring->dma);
2323 2521
2324 tx_ring->desc = NULL; 2522 tx_ring->desc = NULL;
2325} 2523}
@@ -2338,12 +2536,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2338 igb_free_tx_resources(&adapter->tx_ring[i]); 2536 igb_free_tx_resources(&adapter->tx_ring[i]);
2339} 2537}
2340 2538
2341static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, 2539void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2342 struct igb_buffer *buffer_info) 2540 struct igb_buffer *buffer_info)
2343{ 2541{
2344 buffer_info->dma = 0; 2542 buffer_info->dma = 0;
2345 if (buffer_info->skb) { 2543 if (buffer_info->skb) {
2346 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, 2544 skb_dma_unmap(&tx_ring->pdev->dev,
2545 buffer_info->skb,
2347 DMA_TO_DEVICE); 2546 DMA_TO_DEVICE);
2348 dev_kfree_skb_any(buffer_info->skb); 2547 dev_kfree_skb_any(buffer_info->skb);
2349 buffer_info->skb = NULL; 2548 buffer_info->skb = NULL;
@@ -2358,7 +2557,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2358 **/ 2557 **/
2359static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2558static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2360{ 2559{
2361 struct igb_adapter *adapter = tx_ring->adapter;
2362 struct igb_buffer *buffer_info; 2560 struct igb_buffer *buffer_info;
2363 unsigned long size; 2561 unsigned long size;
2364 unsigned int i; 2562 unsigned int i;
@@ -2369,21 +2567,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2369 2567
2370 for (i = 0; i < tx_ring->count; i++) { 2568 for (i = 0; i < tx_ring->count; i++) {
2371 buffer_info = &tx_ring->buffer_info[i]; 2569 buffer_info = &tx_ring->buffer_info[i];
2372 igb_unmap_and_free_tx_resource(adapter, buffer_info); 2570 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2373 } 2571 }
2374 2572
2375 size = sizeof(struct igb_buffer) * tx_ring->count; 2573 size = sizeof(struct igb_buffer) * tx_ring->count;
2376 memset(tx_ring->buffer_info, 0, size); 2574 memset(tx_ring->buffer_info, 0, size);
2377 2575
2378 /* Zero out the descriptor ring */ 2576 /* Zero out the descriptor ring */
2379
2380 memset(tx_ring->desc, 0, tx_ring->size); 2577 memset(tx_ring->desc, 0, tx_ring->size);
2381 2578
2382 tx_ring->next_to_use = 0; 2579 tx_ring->next_to_use = 0;
2383 tx_ring->next_to_clean = 0; 2580 tx_ring->next_to_clean = 0;
2384
2385 writel(0, adapter->hw.hw_addr + tx_ring->head);
2386 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2387} 2581}
2388 2582
2389/** 2583/**
@@ -2406,14 +2600,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2406 **/ 2600 **/
2407void igb_free_rx_resources(struct igb_ring *rx_ring) 2601void igb_free_rx_resources(struct igb_ring *rx_ring)
2408{ 2602{
2409 struct pci_dev *pdev = rx_ring->adapter->pdev;
2410
2411 igb_clean_rx_ring(rx_ring); 2603 igb_clean_rx_ring(rx_ring);
2412 2604
2413 vfree(rx_ring->buffer_info); 2605 vfree(rx_ring->buffer_info);
2414 rx_ring->buffer_info = NULL; 2606 rx_ring->buffer_info = NULL;
2415 2607
2416 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2608 /* if not set, then don't free */
2609 if (!rx_ring->desc)
2610 return;
2611
2612 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2613 rx_ring->desc, rx_ring->dma);
2417 2614
2418 rx_ring->desc = NULL; 2615 rx_ring->desc = NULL;
2419} 2616}
@@ -2438,26 +2635,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2438 **/ 2635 **/
2439static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2636static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2440{ 2637{
2441 struct igb_adapter *adapter = rx_ring->adapter;
2442 struct igb_buffer *buffer_info; 2638 struct igb_buffer *buffer_info;
2443 struct pci_dev *pdev = adapter->pdev;
2444 unsigned long size; 2639 unsigned long size;
2445 unsigned int i; 2640 unsigned int i;
2446 2641
2447 if (!rx_ring->buffer_info) 2642 if (!rx_ring->buffer_info)
2448 return; 2643 return;
2644
2449 /* Free all the Rx ring sk_buffs */ 2645 /* Free all the Rx ring sk_buffs */
2450 for (i = 0; i < rx_ring->count; i++) { 2646 for (i = 0; i < rx_ring->count; i++) {
2451 buffer_info = &rx_ring->buffer_info[i]; 2647 buffer_info = &rx_ring->buffer_info[i];
2452 if (buffer_info->dma) { 2648 if (buffer_info->dma) {
2453 if (adapter->rx_ps_hdr_size) 2649 pci_unmap_single(rx_ring->pdev,
2454 pci_unmap_single(pdev, buffer_info->dma, 2650 buffer_info->dma,
2455 adapter->rx_ps_hdr_size, 2651 rx_ring->rx_buffer_len,
2456 PCI_DMA_FROMDEVICE); 2652 PCI_DMA_FROMDEVICE);
2457 else
2458 pci_unmap_single(pdev, buffer_info->dma,
2459 adapter->rx_buffer_len,
2460 PCI_DMA_FROMDEVICE);
2461 buffer_info->dma = 0; 2653 buffer_info->dma = 0;
2462 } 2654 }
2463 2655
@@ -2465,14 +2657,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2465 dev_kfree_skb(buffer_info->skb); 2657 dev_kfree_skb(buffer_info->skb);
2466 buffer_info->skb = NULL; 2658 buffer_info->skb = NULL;
2467 } 2659 }
2660 if (buffer_info->page_dma) {
2661 pci_unmap_page(rx_ring->pdev,
2662 buffer_info->page_dma,
2663 PAGE_SIZE / 2,
2664 PCI_DMA_FROMDEVICE);
2665 buffer_info->page_dma = 0;
2666 }
2468 if (buffer_info->page) { 2667 if (buffer_info->page) {
2469 if (buffer_info->page_dma)
2470 pci_unmap_page(pdev, buffer_info->page_dma,
2471 PAGE_SIZE / 2,
2472 PCI_DMA_FROMDEVICE);
2473 put_page(buffer_info->page); 2668 put_page(buffer_info->page);
2474 buffer_info->page = NULL; 2669 buffer_info->page = NULL;
2475 buffer_info->page_dma = 0;
2476 buffer_info->page_offset = 0; 2670 buffer_info->page_offset = 0;
2477 } 2671 }
2478 } 2672 }
@@ -2485,9 +2679,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2485 2679
2486 rx_ring->next_to_clean = 0; 2680 rx_ring->next_to_clean = 0;
2487 rx_ring->next_to_use = 0; 2681 rx_ring->next_to_use = 0;
2488
2489 writel(0, adapter->hw.hw_addr + rx_ring->head);
2490 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2491} 2682}
2492 2683
2493/** 2684/**
@@ -2521,61 +2712,90 @@ static int igb_set_mac(struct net_device *netdev, void *p)
2521 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2712 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2522 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2713 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2523 2714
2524 igb_rar_set(hw, hw->mac.addr, 0); 2715 /* set the correct pool for the new PF MAC address in entry 0 */
2525 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 2716 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2717 adapter->vfs_allocated_count);
2526 2718
2527 return 0; 2719 return 0;
2528} 2720}
2529 2721
2530/** 2722/**
2531 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2723 * igb_write_mc_addr_list - write multicast addresses to MTA
2532 * @netdev: network interface device structure 2724 * @netdev: network interface device structure
2533 * 2725 *
2534 * The set_rx_mode entry point is called whenever the unicast or multicast 2726 * Writes multicast address list to the MTA hash table.
2535 * address lists or the network interface flags are updated. This routine is 2727 * Returns: -ENOMEM on failure
2536 * responsible for configuring the hardware for proper unicast, multicast, 2728 * 0 on no addresses written
2537 * promiscuous mode, and all-multi behavior. 2729 * X on writing X addresses to MTA
2538 **/ 2730 **/
2539static void igb_set_rx_mode(struct net_device *netdev) 2731static int igb_write_mc_addr_list(struct net_device *netdev)
2540{ 2732{
2541 struct igb_adapter *adapter = netdev_priv(netdev); 2733 struct igb_adapter *adapter = netdev_priv(netdev);
2542 struct e1000_hw *hw = &adapter->hw; 2734 struct e1000_hw *hw = &adapter->hw;
2543 unsigned int rar_entries = hw->mac.rar_entry_count -
2544 (adapter->vfs_allocated_count + 1);
2545 struct dev_mc_list *mc_ptr = netdev->mc_list; 2735 struct dev_mc_list *mc_ptr = netdev->mc_list;
2546 u8 *mta_list = NULL; 2736 u8 *mta_list;
2547 u32 rctl; 2737 u32 vmolr = 0;
2548 int i; 2738 int i;
2549 2739
2550 /* Check for Promiscuous and All Multicast modes */ 2740 if (!netdev->mc_count) {
2551 rctl = rd32(E1000_RCTL); 2741 /* nothing to program, so clear mc list */
2742 igb_update_mc_addr_list(hw, NULL, 0);
2743 igb_restore_vf_multicasts(adapter);
2744 return 0;
2745 }
2552 2746
2553 if (netdev->flags & IFF_PROMISC) { 2747 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2554 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2748 if (!mta_list)
2555 rctl &= ~E1000_RCTL_VFE; 2749 return -ENOMEM;
2556 } else {
2557 if (netdev->flags & IFF_ALLMULTI)
2558 rctl |= E1000_RCTL_MPE;
2559 else
2560 rctl &= ~E1000_RCTL_MPE;
2561 2750
2562 if (netdev->uc.count > rar_entries) 2751 /* set vmolr receive overflow multicast bit */
2563 rctl |= E1000_RCTL_UPE; 2752 vmolr |= E1000_VMOLR_ROMPE;
2564 else 2753
2565 rctl &= ~E1000_RCTL_UPE; 2754 /* The shared function expects a packed array of only addresses. */
2566 rctl |= E1000_RCTL_VFE; 2755 mc_ptr = netdev->mc_list;
2756
2757 for (i = 0; i < netdev->mc_count; i++) {
2758 if (!mc_ptr)
2759 break;
2760 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2761 mc_ptr = mc_ptr->next;
2567 } 2762 }
2568 wr32(E1000_RCTL, rctl); 2763 igb_update_mc_addr_list(hw, mta_list, i);
2764 kfree(mta_list);
2765
2766 return netdev->mc_count;
2767}
2768
2769/**
2770 * igb_write_uc_addr_list - write unicast addresses to RAR table
2771 * @netdev: network interface device structure
2772 *
2773 * Writes unicast address list to the RAR table.
2774 * Returns: -ENOMEM on failure/insufficient address space
2775 * 0 on no addresses written
2776 * X on writing X addresses to the RAR table
2777 **/
2778static int igb_write_uc_addr_list(struct net_device *netdev)
2779{
2780 struct igb_adapter *adapter = netdev_priv(netdev);
2781 struct e1000_hw *hw = &adapter->hw;
2782 unsigned int vfn = adapter->vfs_allocated_count;
2783 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2784 int count = 0;
2785
2786 /* return ENOMEM indicating insufficient memory for addresses */
2787 if (netdev->uc.count > rar_entries)
2788 return -ENOMEM;
2569 2789
2570 if (netdev->uc.count && rar_entries) { 2790 if (netdev->uc.count && rar_entries) {
2571 struct netdev_hw_addr *ha; 2791 struct netdev_hw_addr *ha;
2572 list_for_each_entry(ha, &netdev->uc.list, list) { 2792 list_for_each_entry(ha, &netdev->uc.list, list) {
2573 if (!rar_entries) 2793 if (!rar_entries)
2574 break; 2794 break;
2575 igb_rar_set(hw, ha->addr, rar_entries); 2795 igb_rar_set_qsel(adapter, ha->addr,
2576 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 2796 rar_entries--,
2577 rar_entries); 2797 vfn);
2578 rar_entries--; 2798 count++;
2579 } 2799 }
2580 } 2800 }
2581 /* write the addresses in reverse order to avoid write combining */ 2801 /* write the addresses in reverse order to avoid write combining */
@@ -2585,29 +2805,79 @@ static void igb_set_rx_mode(struct net_device *netdev)
2585 } 2805 }
2586 wrfl(); 2806 wrfl();
2587 2807
2588 if (!netdev->mc_count) { 2808 return count;
2589 /* nothing to program, so clear mc list */ 2809}
2590 igb_update_mc_addr_list(hw, NULL, 0); 2810
2591 igb_restore_vf_multicasts(adapter); 2811/**
2592 return; 2812 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2813 * @netdev: network interface device structure
2814 *
2815 * The set_rx_mode entry point is called whenever the unicast or multicast
2816 * address lists or the network interface flags are updated. This routine is
2817 * responsible for configuring the hardware for proper unicast, multicast,
2818 * promiscuous mode, and all-multi behavior.
2819 **/
2820static void igb_set_rx_mode(struct net_device *netdev)
2821{
2822 struct igb_adapter *adapter = netdev_priv(netdev);
2823 struct e1000_hw *hw = &adapter->hw;
2824 unsigned int vfn = adapter->vfs_allocated_count;
2825 u32 rctl, vmolr = 0;
2826 int count;
2827
2828 /* Check for Promiscuous and All Multicast modes */
2829 rctl = rd32(E1000_RCTL);
2830
2831 /* clear the effected bits */
2832 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2833
2834 if (netdev->flags & IFF_PROMISC) {
2835 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2836 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2837 } else {
2838 if (netdev->flags & IFF_ALLMULTI) {
2839 rctl |= E1000_RCTL_MPE;
2840 vmolr |= E1000_VMOLR_MPME;
2841 } else {
2842 /*
2843 * Write addresses to the MTA, if the attempt fails
2844 * then we should just turn on promiscous mode so
2845 * that we can at least receive multicast traffic
2846 */
2847 count = igb_write_mc_addr_list(netdev);
2848 if (count < 0) {
2849 rctl |= E1000_RCTL_MPE;
2850 vmolr |= E1000_VMOLR_MPME;
2851 } else if (count) {
2852 vmolr |= E1000_VMOLR_ROMPE;
2853 }
2854 }
2855 /*
2856 * Write addresses to available RAR registers, if there is not
2857 * sufficient space to store all the addresses then enable
2858 * unicast promiscous mode
2859 */
2860 count = igb_write_uc_addr_list(netdev);
2861 if (count < 0) {
2862 rctl |= E1000_RCTL_UPE;
2863 vmolr |= E1000_VMOLR_ROPE;
2864 }
2865 rctl |= E1000_RCTL_VFE;
2593 } 2866 }
2867 wr32(E1000_RCTL, rctl);
2594 2868
2595 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); 2869 /*
2596 if (!mta_list) { 2870 * In order to support SR-IOV and eventually VMDq it is necessary to set
2597 dev_err(&adapter->pdev->dev, 2871 * the VMOLR to enable the appropriate modes. Without this workaround
2598 "failed to allocate multicast filter list\n"); 2872 * we will have issues with VLAN tag stripping not being done for frames
2873 * that are only arriving because we are the default pool
2874 */
2875 if (hw->mac.type < e1000_82576)
2599 return; 2876 return;
2600 }
2601 2877
2602 /* The shared function expects a packed array of only addresses. */ 2878 vmolr |= rd32(E1000_VMOLR(vfn)) &
2603 for (i = 0; i < netdev->mc_count; i++) { 2879 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2604 if (!mc_ptr) 2880 wr32(E1000_VMOLR(vfn), vmolr);
2605 break;
2606 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2607 mc_ptr = mc_ptr->next;
2608 }
2609 igb_update_mc_addr_list(hw, mta_list, i);
2610 kfree(mta_list);
2611 igb_restore_vf_multicasts(adapter); 2881 igb_restore_vf_multicasts(adapter);
2612} 2882}
2613 2883
@@ -2669,37 +2939,33 @@ static void igb_watchdog(unsigned long data)
2669static void igb_watchdog_task(struct work_struct *work) 2939static void igb_watchdog_task(struct work_struct *work)
2670{ 2940{
2671 struct igb_adapter *adapter = container_of(work, 2941 struct igb_adapter *adapter = container_of(work,
2672 struct igb_adapter, watchdog_task); 2942 struct igb_adapter,
2943 watchdog_task);
2673 struct e1000_hw *hw = &adapter->hw; 2944 struct e1000_hw *hw = &adapter->hw;
2674 struct net_device *netdev = adapter->netdev; 2945 struct net_device *netdev = adapter->netdev;
2675 struct igb_ring *tx_ring = adapter->tx_ring;
2676 u32 link; 2946 u32 link;
2677 u32 eics = 0;
2678 int i; 2947 int i;
2679 2948
2680 link = igb_has_link(adapter); 2949 link = igb_has_link(adapter);
2681 if ((netif_carrier_ok(netdev)) && link)
2682 goto link_up;
2683
2684 if (link) { 2950 if (link) {
2685 if (!netif_carrier_ok(netdev)) { 2951 if (!netif_carrier_ok(netdev)) {
2686 u32 ctrl; 2952 u32 ctrl;
2687 hw->mac.ops.get_speed_and_duplex(&adapter->hw, 2953 hw->mac.ops.get_speed_and_duplex(hw,
2688 &adapter->link_speed, 2954 &adapter->link_speed,
2689 &adapter->link_duplex); 2955 &adapter->link_duplex);
2690 2956
2691 ctrl = rd32(E1000_CTRL); 2957 ctrl = rd32(E1000_CTRL);
2692 /* Links status message must follow this format */ 2958 /* Links status message must follow this format */
2693 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " 2959 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2694 "Flow Control: %s\n", 2960 "Flow Control: %s\n",
2695 netdev->name, 2961 netdev->name,
2696 adapter->link_speed, 2962 adapter->link_speed,
2697 adapter->link_duplex == FULL_DUPLEX ? 2963 adapter->link_duplex == FULL_DUPLEX ?
2698 "Full Duplex" : "Half Duplex", 2964 "Full Duplex" : "Half Duplex",
2699 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2965 ((ctrl & E1000_CTRL_TFCE) &&
2700 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2966 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
2701 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2967 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2702 E1000_CTRL_TFCE) ? "TX" : "None"))); 2968 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
2703 2969
2704 /* tweak tx_queue_len according to speed/duplex and 2970 /* tweak tx_queue_len according to speed/duplex and
2705 * adjust the timeout factor */ 2971 * adjust the timeout factor */
@@ -2743,46 +3009,40 @@ static void igb_watchdog_task(struct work_struct *work)
2743 } 3009 }
2744 } 3010 }
2745 3011
2746link_up:
2747 igb_update_stats(adapter); 3012 igb_update_stats(adapter);
3013 igb_update_adaptive(hw);
2748 3014
2749 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 3015 for (i = 0; i < adapter->num_tx_queues; i++) {
2750 adapter->tpt_old = adapter->stats.tpt; 3016 struct igb_ring *tx_ring = &adapter->tx_ring[i];
2751 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old; 3017 if (!netif_carrier_ok(netdev)) {
2752 adapter->colc_old = adapter->stats.colc;
2753
2754 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2755 adapter->gorc_old = adapter->stats.gorc;
2756 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2757 adapter->gotc_old = adapter->stats.gotc;
2758
2759 igb_update_adaptive(&adapter->hw);
2760
2761 if (!netif_carrier_ok(netdev)) {
2762 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2763 /* We've lost link, so the controller stops DMA, 3018 /* We've lost link, so the controller stops DMA,
2764 * but we've got queued Tx work that's never going 3019 * but we've got queued Tx work that's never going
2765 * to get done, so reset controller to flush Tx. 3020 * to get done, so reset controller to flush Tx.
2766 * (Do the reset outside of interrupt context). */ 3021 * (Do the reset outside of interrupt context). */
2767 adapter->tx_timeout_count++; 3022 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2768 schedule_work(&adapter->reset_task); 3023 adapter->tx_timeout_count++;
2769 /* return immediately since reset is imminent */ 3024 schedule_work(&adapter->reset_task);
2770 return; 3025 /* return immediately since reset is imminent */
3026 return;
3027 }
2771 } 3028 }
3029
3030 /* Force detection of hung controller every watchdog period */
3031 tx_ring->detect_tx_hung = true;
2772 } 3032 }
2773 3033
2774 /* Cause software interrupt to ensure rx ring is cleaned */ 3034 /* Cause software interrupt to ensure rx ring is cleaned */
2775 if (adapter->msix_entries) { 3035 if (adapter->msix_entries) {
2776 for (i = 0; i < adapter->num_rx_queues; i++) 3036 u32 eics = 0;
2777 eics |= adapter->rx_ring[i].eims_value; 3037 for (i = 0; i < adapter->num_q_vectors; i++) {
3038 struct igb_q_vector *q_vector = adapter->q_vector[i];
3039 eics |= q_vector->eims_value;
3040 }
2778 wr32(E1000_EICS, eics); 3041 wr32(E1000_EICS, eics);
2779 } else { 3042 } else {
2780 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3043 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2781 } 3044 }
2782 3045
2783 /* Force detection of hung controller every watchdog period */
2784 tx_ring->detect_tx_hung = true;
2785
2786 /* Reset the timer */ 3046 /* Reset the timer */
2787 if (!test_bit(__IGB_DOWN, &adapter->state)) 3047 if (!test_bit(__IGB_DOWN, &adapter->state))
2788 mod_timer(&adapter->watchdog_timer, 3048 mod_timer(&adapter->watchdog_timer,
@@ -2796,7 +3056,6 @@ enum latency_range {
2796 latency_invalid = 255 3056 latency_invalid = 255
2797}; 3057};
2798 3058
2799
2800/** 3059/**
2801 * igb_update_ring_itr - update the dynamic ITR value based on packet size 3060 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2802 * 3061 *
@@ -2811,25 +3070,37 @@ enum latency_range {
2811 * parameter (see igb_param.c) 3070 * parameter (see igb_param.c)
2812 * NOTE: This function is called only when operating in a multiqueue 3071 * NOTE: This function is called only when operating in a multiqueue
2813 * receive environment. 3072 * receive environment.
2814 * @rx_ring: pointer to ring 3073 * @q_vector: pointer to q_vector
2815 **/ 3074 **/
2816static void igb_update_ring_itr(struct igb_ring *rx_ring) 3075static void igb_update_ring_itr(struct igb_q_vector *q_vector)
2817{ 3076{
2818 int new_val = rx_ring->itr_val; 3077 int new_val = q_vector->itr_val;
2819 int avg_wire_size = 0; 3078 int avg_wire_size = 0;
2820 struct igb_adapter *adapter = rx_ring->adapter; 3079 struct igb_adapter *adapter = q_vector->adapter;
2821
2822 if (!rx_ring->total_packets)
2823 goto clear_counts; /* no packets, so don't do anything */
2824 3080
2825 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3081 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2826 * ints/sec - ITR timer value of 120 ticks. 3082 * ints/sec - ITR timer value of 120 ticks.
2827 */ 3083 */
2828 if (adapter->link_speed != SPEED_1000) { 3084 if (adapter->link_speed != SPEED_1000) {
2829 new_val = 120; 3085 new_val = 976;
2830 goto set_itr_val; 3086 goto set_itr_val;
2831 } 3087 }
2832 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; 3088
3089 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3090 struct igb_ring *ring = q_vector->rx_ring;
3091 avg_wire_size = ring->total_bytes / ring->total_packets;
3092 }
3093
3094 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3095 struct igb_ring *ring = q_vector->tx_ring;
3096 avg_wire_size = max_t(u32, avg_wire_size,
3097 (ring->total_bytes /
3098 ring->total_packets));
3099 }
3100
3101 /* if avg_wire_size isn't set no work was done */
3102 if (!avg_wire_size)
3103 goto clear_counts;
2833 3104
2834 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3105 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2835 avg_wire_size += 24; 3106 avg_wire_size += 24;
@@ -2844,13 +3115,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
2844 new_val = avg_wire_size / 2; 3115 new_val = avg_wire_size / 2;
2845 3116
2846set_itr_val: 3117set_itr_val:
2847 if (new_val != rx_ring->itr_val) { 3118 if (new_val != q_vector->itr_val) {
2848 rx_ring->itr_val = new_val; 3119 q_vector->itr_val = new_val;
2849 rx_ring->set_itr = 1; 3120 q_vector->set_itr = 1;
2850 } 3121 }
2851clear_counts: 3122clear_counts:
2852 rx_ring->total_bytes = 0; 3123 if (q_vector->rx_ring) {
2853 rx_ring->total_packets = 0; 3124 q_vector->rx_ring->total_bytes = 0;
3125 q_vector->rx_ring->total_packets = 0;
3126 }
3127 if (q_vector->tx_ring) {
3128 q_vector->tx_ring->total_bytes = 0;
3129 q_vector->tx_ring->total_packets = 0;
3130 }
2854} 3131}
2855 3132
2856/** 3133/**
@@ -2867,7 +3144,7 @@ clear_counts:
2867 * NOTE: These calculations are only valid when operating in a single- 3144 * NOTE: These calculations are only valid when operating in a single-
2868 * queue environment. 3145 * queue environment.
2869 * @adapter: pointer to adapter 3146 * @adapter: pointer to adapter
2870 * @itr_setting: current adapter->itr 3147 * @itr_setting: current q_vector->itr_val
2871 * @packets: the number of packets during this measurement interval 3148 * @packets: the number of packets during this measurement interval
2872 * @bytes: the number of bytes during this measurement interval 3149 * @bytes: the number of bytes during this measurement interval
2873 **/ 3150 **/
@@ -2919,8 +3196,9 @@ update_itr_done:
2919 3196
2920static void igb_set_itr(struct igb_adapter *adapter) 3197static void igb_set_itr(struct igb_adapter *adapter)
2921{ 3198{
3199 struct igb_q_vector *q_vector = adapter->q_vector[0];
2922 u16 current_itr; 3200 u16 current_itr;
2923 u32 new_itr = adapter->itr; 3201 u32 new_itr = q_vector->itr_val;
2924 3202
2925 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3203 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2926 if (adapter->link_speed != SPEED_1000) { 3204 if (adapter->link_speed != SPEED_1000) {
@@ -2934,18 +3212,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
2934 adapter->rx_ring->total_packets, 3212 adapter->rx_ring->total_packets,
2935 adapter->rx_ring->total_bytes); 3213 adapter->rx_ring->total_bytes);
2936 3214
2937 if (adapter->rx_ring->buddy) { 3215 adapter->tx_itr = igb_update_itr(adapter,
2938 adapter->tx_itr = igb_update_itr(adapter, 3216 adapter->tx_itr,
2939 adapter->tx_itr, 3217 adapter->tx_ring->total_packets,
2940 adapter->tx_ring->total_packets, 3218 adapter->tx_ring->total_bytes);
2941 adapter->tx_ring->total_bytes); 3219 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2942 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2943 } else {
2944 current_itr = adapter->rx_itr;
2945 }
2946 3220
2947 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3221 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2948 if (adapter->itr_setting == 3 && current_itr == lowest_latency) 3222 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
2949 current_itr = low_latency; 3223 current_itr = low_latency;
2950 3224
2951 switch (current_itr) { 3225 switch (current_itr) {
@@ -2966,18 +3240,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
2966set_itr_now: 3240set_itr_now:
2967 adapter->rx_ring->total_bytes = 0; 3241 adapter->rx_ring->total_bytes = 0;
2968 adapter->rx_ring->total_packets = 0; 3242 adapter->rx_ring->total_packets = 0;
2969 if (adapter->rx_ring->buddy) { 3243 adapter->tx_ring->total_bytes = 0;
2970 adapter->rx_ring->buddy->total_bytes = 0; 3244 adapter->tx_ring->total_packets = 0;
2971 adapter->rx_ring->buddy->total_packets = 0;
2972 }
2973 3245
2974 if (new_itr != adapter->itr) { 3246 if (new_itr != q_vector->itr_val) {
2975 /* this attempts to bias the interrupt rate towards Bulk 3247 /* this attempts to bias the interrupt rate towards Bulk
2976 * by adding intermediate steps when interrupt rate is 3248 * by adding intermediate steps when interrupt rate is
2977 * increasing */ 3249 * increasing */
2978 new_itr = new_itr > adapter->itr ? 3250 new_itr = new_itr > q_vector->itr_val ?
2979 max((new_itr * adapter->itr) / 3251 max((new_itr * q_vector->itr_val) /
2980 (new_itr + (adapter->itr >> 2)), new_itr) : 3252 (new_itr + (q_vector->itr_val >> 2)),
3253 new_itr) :
2981 new_itr; 3254 new_itr;
2982 /* Don't write the value here; it resets the adapter's 3255 /* Don't write the value here; it resets the adapter's
2983 * internal timer, and causes us to delay far longer than 3256 * internal timer, and causes us to delay far longer than
@@ -2985,25 +3258,22 @@ set_itr_now:
2985 * value at the beginning of the next interrupt so the timing 3258 * value at the beginning of the next interrupt so the timing
2986 * ends up being correct. 3259 * ends up being correct.
2987 */ 3260 */
2988 adapter->itr = new_itr; 3261 q_vector->itr_val = new_itr;
2989 adapter->rx_ring->itr_val = new_itr; 3262 q_vector->set_itr = 1;
2990 adapter->rx_ring->set_itr = 1;
2991 } 3263 }
2992 3264
2993 return; 3265 return;
2994} 3266}
2995 3267
2996
2997#define IGB_TX_FLAGS_CSUM 0x00000001 3268#define IGB_TX_FLAGS_CSUM 0x00000001
2998#define IGB_TX_FLAGS_VLAN 0x00000002 3269#define IGB_TX_FLAGS_VLAN 0x00000002
2999#define IGB_TX_FLAGS_TSO 0x00000004 3270#define IGB_TX_FLAGS_TSO 0x00000004
3000#define IGB_TX_FLAGS_IPV4 0x00000008 3271#define IGB_TX_FLAGS_IPV4 0x00000008
3001#define IGB_TX_FLAGS_TSTAMP 0x00000010 3272#define IGB_TX_FLAGS_TSTAMP 0x00000010
3002#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3273#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3003#define IGB_TX_FLAGS_VLAN_SHIFT 16 3274#define IGB_TX_FLAGS_VLAN_SHIFT 16
3004 3275
3005static inline int igb_tso_adv(struct igb_adapter *adapter, 3276static inline int igb_tso_adv(struct igb_ring *tx_ring,
3006 struct igb_ring *tx_ring,
3007 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3277 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3008{ 3278{
3009 struct e1000_adv_tx_context_desc *context_desc; 3279 struct e1000_adv_tx_context_desc *context_desc;
@@ -3065,8 +3335,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3065 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 3335 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3066 3336
3067 /* For 82575, context index must be unique per ring. */ 3337 /* For 82575, context index must be unique per ring. */
3068 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3338 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3069 mss_l4len_idx |= tx_ring->queue_index << 4; 3339 mss_l4len_idx |= tx_ring->reg_idx << 4;
3070 3340
3071 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3341 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3072 context_desc->seqnum_seed = 0; 3342 context_desc->seqnum_seed = 0;
@@ -3083,14 +3353,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3083 return true; 3353 return true;
3084} 3354}
3085 3355
3086static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, 3356static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3087 struct igb_ring *tx_ring, 3357 struct sk_buff *skb, u32 tx_flags)
3088 struct sk_buff *skb, u32 tx_flags)
3089{ 3358{
3090 struct e1000_adv_tx_context_desc *context_desc; 3359 struct e1000_adv_tx_context_desc *context_desc;
3091 unsigned int i; 3360 struct pci_dev *pdev = tx_ring->pdev;
3092 struct igb_buffer *buffer_info; 3361 struct igb_buffer *buffer_info;
3093 u32 info = 0, tu_cmd = 0; 3362 u32 info = 0, tu_cmd = 0;
3363 unsigned int i;
3094 3364
3095 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 3365 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3096 (tx_flags & IGB_TX_FLAGS_VLAN)) { 3366 (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3100,6 +3370,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3100 3370
3101 if (tx_flags & IGB_TX_FLAGS_VLAN) 3371 if (tx_flags & IGB_TX_FLAGS_VLAN)
3102 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3372 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3373
3103 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 3374 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3104 if (skb->ip_summed == CHECKSUM_PARTIAL) 3375 if (skb->ip_summed == CHECKSUM_PARTIAL)
3105 info |= skb_network_header_len(skb); 3376 info |= skb_network_header_len(skb);
@@ -3137,7 +3408,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3137 break; 3408 break;
3138 default: 3409 default:
3139 if (unlikely(net_ratelimit())) 3410 if (unlikely(net_ratelimit()))
3140 dev_warn(&adapter->pdev->dev, 3411 dev_warn(&pdev->dev,
3141 "partial checksum but proto=%x!\n", 3412 "partial checksum but proto=%x!\n",
3142 skb->protocol); 3413 skb->protocol);
3143 break; 3414 break;
@@ -3146,11 +3417,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3146 3417
3147 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 3418 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3148 context_desc->seqnum_seed = 0; 3419 context_desc->seqnum_seed = 0;
3149 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3420 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3150 context_desc->mss_l4len_idx = 3421 context_desc->mss_l4len_idx =
3151 cpu_to_le32(tx_ring->queue_index << 4); 3422 cpu_to_le32(tx_ring->reg_idx << 4);
3152 else
3153 context_desc->mss_l4len_idx = 0;
3154 3423
3155 buffer_info->time_stamp = jiffies; 3424 buffer_info->time_stamp = jiffies;
3156 buffer_info->next_to_watch = i; 3425 buffer_info->next_to_watch = i;
@@ -3169,11 +3438,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3169#define IGB_MAX_TXD_PWR 16 3438#define IGB_MAX_TXD_PWR 16
3170#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 3439#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3171 3440
3172static inline int igb_tx_map_adv(struct igb_adapter *adapter, 3441static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3173 struct igb_ring *tx_ring, struct sk_buff *skb,
3174 unsigned int first) 3442 unsigned int first)
3175{ 3443{
3176 struct igb_buffer *buffer_info; 3444 struct igb_buffer *buffer_info;
3445 struct pci_dev *pdev = tx_ring->pdev;
3177 unsigned int len = skb_headlen(skb); 3446 unsigned int len = skb_headlen(skb);
3178 unsigned int count = 0, i; 3447 unsigned int count = 0, i;
3179 unsigned int f; 3448 unsigned int f;
@@ -3181,8 +3450,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3181 3450
3182 i = tx_ring->next_to_use; 3451 i = tx_ring->next_to_use;
3183 3452
3184 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 3453 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3185 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3454 dev_err(&pdev->dev, "TX DMA map failed\n");
3186 return 0; 3455 return 0;
3187 } 3456 }
3188 3457
@@ -3218,18 +3487,17 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3218 tx_ring->buffer_info[i].skb = skb; 3487 tx_ring->buffer_info[i].skb = skb;
3219 tx_ring->buffer_info[first].next_to_watch = i; 3488 tx_ring->buffer_info[first].next_to_watch = i;
3220 3489
3221 return count + 1; 3490 return ++count;
3222} 3491}
3223 3492
3224static inline void igb_tx_queue_adv(struct igb_adapter *adapter, 3493static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3225 struct igb_ring *tx_ring,
3226 int tx_flags, int count, u32 paylen, 3494 int tx_flags, int count, u32 paylen,
3227 u8 hdr_len) 3495 u8 hdr_len)
3228{ 3496{
3229 union e1000_adv_tx_desc *tx_desc = NULL; 3497 union e1000_adv_tx_desc *tx_desc;
3230 struct igb_buffer *buffer_info; 3498 struct igb_buffer *buffer_info;
3231 u32 olinfo_status = 0, cmd_type_len; 3499 u32 olinfo_status = 0, cmd_type_len;
3232 unsigned int i; 3500 unsigned int i = tx_ring->next_to_use;
3233 3501
3234 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 3502 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3235 E1000_ADVTXD_DCMD_DEXT); 3503 E1000_ADVTXD_DCMD_DEXT);
@@ -3254,27 +3522,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3254 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3522 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3255 } 3523 }
3256 3524
3257 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && 3525 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3258 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 3526 (tx_flags & (IGB_TX_FLAGS_CSUM |
3527 IGB_TX_FLAGS_TSO |
3259 IGB_TX_FLAGS_VLAN))) 3528 IGB_TX_FLAGS_VLAN)))
3260 olinfo_status |= tx_ring->queue_index << 4; 3529 olinfo_status |= tx_ring->reg_idx << 4;
3261 3530
3262 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 3531 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3263 3532
3264 i = tx_ring->next_to_use; 3533 do {
3265 while (count--) {
3266 buffer_info = &tx_ring->buffer_info[i]; 3534 buffer_info = &tx_ring->buffer_info[i];
3267 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 3535 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3268 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 3536 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3269 tx_desc->read.cmd_type_len = 3537 tx_desc->read.cmd_type_len =
3270 cpu_to_le32(cmd_type_len | buffer_info->length); 3538 cpu_to_le32(cmd_type_len | buffer_info->length);
3271 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3539 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3540 count--;
3272 i++; 3541 i++;
3273 if (i == tx_ring->count) 3542 if (i == tx_ring->count)
3274 i = 0; 3543 i = 0;
3275 } 3544 } while (count > 0);
3276 3545
3277 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 3546 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3278 /* Force memory writes to complete before letting h/w 3547 /* Force memory writes to complete before letting h/w
3279 * know there are new descriptors to fetch. (Only 3548 * know there are new descriptors to fetch. (Only
3280 * applicable for weak-ordered memory model archs, 3549 * applicable for weak-ordered memory model archs,
@@ -3282,16 +3551,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3282 wmb(); 3551 wmb();
3283 3552
3284 tx_ring->next_to_use = i; 3553 tx_ring->next_to_use = i;
3285 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3554 writel(i, tx_ring->tail);
3286 /* we need this if more than one processor can write to our tail 3555 /* we need this if more than one processor can write to our tail
3287 * at a time, it syncronizes IO on IA64/Altix systems */ 3556 * at a time, it syncronizes IO on IA64/Altix systems */
3288 mmiowb(); 3557 mmiowb();
3289} 3558}
3290 3559
3291static int __igb_maybe_stop_tx(struct net_device *netdev, 3560static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3292 struct igb_ring *tx_ring, int size)
3293{ 3561{
3294 struct igb_adapter *adapter = netdev_priv(netdev); 3562 struct net_device *netdev = tx_ring->netdev;
3295 3563
3296 netif_stop_subqueue(netdev, tx_ring->queue_index); 3564 netif_stop_subqueue(netdev, tx_ring->queue_index);
3297 3565
@@ -3307,66 +3575,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
3307 3575
3308 /* A reprieve! */ 3576 /* A reprieve! */
3309 netif_wake_subqueue(netdev, tx_ring->queue_index); 3577 netif_wake_subqueue(netdev, tx_ring->queue_index);
3310 ++adapter->restart_queue; 3578 tx_ring->tx_stats.restart_queue++;
3311 return 0; 3579 return 0;
3312} 3580}
3313 3581
3314static int igb_maybe_stop_tx(struct net_device *netdev, 3582static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3315 struct igb_ring *tx_ring, int size)
3316{ 3583{
3317 if (igb_desc_unused(tx_ring) >= size) 3584 if (igb_desc_unused(tx_ring) >= size)
3318 return 0; 3585 return 0;
3319 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3586 return __igb_maybe_stop_tx(tx_ring, size);
3320} 3587}
3321 3588
3322static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 3589netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3323 struct net_device *netdev, 3590 struct igb_ring *tx_ring)
3324 struct igb_ring *tx_ring)
3325{ 3591{
3326 struct igb_adapter *adapter = netdev_priv(netdev); 3592 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3327 unsigned int first; 3593 unsigned int first;
3328 unsigned int tx_flags = 0; 3594 unsigned int tx_flags = 0;
3329 u8 hdr_len = 0; 3595 u8 hdr_len = 0;
3330 int count = 0; 3596 int tso = 0, count;
3331 int tso = 0; 3597 union skb_shared_tx *shtx = skb_tx(skb);
3332 union skb_shared_tx *shtx;
3333
3334 if (test_bit(__IGB_DOWN, &adapter->state)) {
3335 dev_kfree_skb_any(skb);
3336 return NETDEV_TX_OK;
3337 }
3338
3339 if (skb->len <= 0) {
3340 dev_kfree_skb_any(skb);
3341 return NETDEV_TX_OK;
3342 }
3343 3598
3344 /* need: 1 descriptor per page, 3599 /* need: 1 descriptor per page,
3345 * + 2 desc gap to keep tail from touching head, 3600 * + 2 desc gap to keep tail from touching head,
3346 * + 1 desc for skb->data, 3601 * + 1 desc for skb->data,
3347 * + 1 desc for context descriptor, 3602 * + 1 desc for context descriptor,
3348 * otherwise try next time */ 3603 * otherwise try next time */
3349 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3604 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3350 /* this is a hard error */ 3605 /* this is a hard error */
3351 return NETDEV_TX_BUSY; 3606 return NETDEV_TX_BUSY;
3352 } 3607 }
3353 3608
3354 /*
3355 * TODO: check that there currently is no other packet with
3356 * time stamping in the queue
3357 *
3358 * When doing time stamping, keep the connection to the socket
3359 * a while longer: it is still needed by skb_hwtstamp_tx(),
3360 * called either in igb_tx_hwtstamp() or by our caller when
3361 * doing software time stamping.
3362 */
3363 shtx = skb_tx(skb);
3364 if (unlikely(shtx->hardware)) { 3609 if (unlikely(shtx->hardware)) {
3365 shtx->in_progress = 1; 3610 shtx->in_progress = 1;
3366 tx_flags |= IGB_TX_FLAGS_TSTAMP; 3611 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3367 } 3612 }
3368 3613
3369 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3614 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3370 tx_flags |= IGB_TX_FLAGS_VLAN; 3615 tx_flags |= IGB_TX_FLAGS_VLAN;
3371 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3616 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3372 } 3617 }
@@ -3375,37 +3620,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3375 tx_flags |= IGB_TX_FLAGS_IPV4; 3620 tx_flags |= IGB_TX_FLAGS_IPV4;
3376 3621
3377 first = tx_ring->next_to_use; 3622 first = tx_ring->next_to_use;
3378 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3623 if (skb_is_gso(skb)) {
3379 &hdr_len) : 0; 3624 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3380 3625
3381 if (tso < 0) { 3626 if (tso < 0) {
3382 dev_kfree_skb_any(skb); 3627 dev_kfree_skb_any(skb);
3383 return NETDEV_TX_OK; 3628 return NETDEV_TX_OK;
3629 }
3384 } 3630 }
3385 3631
3386 if (tso) 3632 if (tso)
3387 tx_flags |= IGB_TX_FLAGS_TSO; 3633 tx_flags |= IGB_TX_FLAGS_TSO;
3388 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && 3634 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3389 (skb->ip_summed == CHECKSUM_PARTIAL)) 3635 (skb->ip_summed == CHECKSUM_PARTIAL))
3390 tx_flags |= IGB_TX_FLAGS_CSUM; 3636 tx_flags |= IGB_TX_FLAGS_CSUM;
3391 3637
3392 /* 3638 /*
3393 * count reflects descriptors mapped, if 0 then mapping error 3639 * count reflects descriptors mapped, if 0 or less then mapping error
3394 * has occured and we need to rewind the descriptor queue 3640 * has occured and we need to rewind the descriptor queue
3395 */ 3641 */
3396 count = igb_tx_map_adv(adapter, tx_ring, skb, first); 3642 count = igb_tx_map_adv(tx_ring, skb, first);
3397 3643 if (count <= 0) {
3398 if (count) {
3399 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3400 skb->len, hdr_len);
3401 /* Make sure there is space in the ring for the next send. */
3402 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3403 } else {
3404 dev_kfree_skb_any(skb); 3644 dev_kfree_skb_any(skb);
3405 tx_ring->buffer_info[first].time_stamp = 0; 3645 tx_ring->buffer_info[first].time_stamp = 0;
3406 tx_ring->next_to_use = first; 3646 tx_ring->next_to_use = first;
3647 return NETDEV_TX_OK;
3407 } 3648 }
3408 3649
3650 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3651
3652 /* Make sure there is space in the ring for the next send. */
3653 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3654
3409 return NETDEV_TX_OK; 3655 return NETDEV_TX_OK;
3410} 3656}
3411 3657
@@ -3414,8 +3660,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3414{ 3660{
3415 struct igb_adapter *adapter = netdev_priv(netdev); 3661 struct igb_adapter *adapter = netdev_priv(netdev);
3416 struct igb_ring *tx_ring; 3662 struct igb_ring *tx_ring;
3417
3418 int r_idx = 0; 3663 int r_idx = 0;
3664
3665 if (test_bit(__IGB_DOWN, &adapter->state)) {
3666 dev_kfree_skb_any(skb);
3667 return NETDEV_TX_OK;
3668 }
3669
3670 if (skb->len <= 0) {
3671 dev_kfree_skb_any(skb);
3672 return NETDEV_TX_OK;
3673 }
3674
3419 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); 3675 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3420 tx_ring = adapter->multi_tx_table[r_idx]; 3676 tx_ring = adapter->multi_tx_table[r_idx];
3421 3677
@@ -3423,7 +3679,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3423 * to a flow. Right now, performance is impacted slightly negatively 3679 * to a flow. Right now, performance is impacted slightly negatively
3424 * if using multiple tx queues. If the stack breaks away from a 3680 * if using multiple tx queues. If the stack breaks away from a
3425 * single qdisc implementation, we can look at this again. */ 3681 * single qdisc implementation, we can look at this again. */
3426 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); 3682 return igb_xmit_frame_ring_adv(skb, tx_ring);
3427} 3683}
3428 3684
3429/** 3685/**
@@ -3437,6 +3693,7 @@ static void igb_tx_timeout(struct net_device *netdev)
3437 3693
3438 /* Do the reset outside of interrupt context */ 3694 /* Do the reset outside of interrupt context */
3439 adapter->tx_timeout_count++; 3695 adapter->tx_timeout_count++;
3696
3440 schedule_work(&adapter->reset_task); 3697 schedule_work(&adapter->reset_task);
3441 wr32(E1000_EICS, 3698 wr32(E1000_EICS,
3442 (adapter->eims_enable_mask & ~adapter->eims_other)); 3699 (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3459,10 +3716,8 @@ static void igb_reset_task(struct work_struct *work)
3459 **/ 3716 **/
3460static struct net_device_stats *igb_get_stats(struct net_device *netdev) 3717static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3461{ 3718{
3462 struct igb_adapter *adapter = netdev_priv(netdev);
3463
3464 /* only return the current stats */ 3719 /* only return the current stats */
3465 return &adapter->net_stats; 3720 return &netdev->stats;
3466} 3721}
3467 3722
3468/** 3723/**
@@ -3475,16 +3730,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3475static int igb_change_mtu(struct net_device *netdev, int new_mtu) 3730static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3476{ 3731{
3477 struct igb_adapter *adapter = netdev_priv(netdev); 3732 struct igb_adapter *adapter = netdev_priv(netdev);
3733 struct pci_dev *pdev = adapter->pdev;
3478 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3734 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3735 u32 rx_buffer_len, i;
3479 3736
3480 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3737 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3481 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3738 dev_err(&pdev->dev, "Invalid MTU setting\n");
3482 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3483 return -EINVAL; 3739 return -EINVAL;
3484 } 3740 }
3485 3741
3486 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3742 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3487 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3743 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3488 return -EINVAL; 3744 return -EINVAL;
3489 } 3745 }
3490 3746
@@ -3493,8 +3749,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3493 3749
3494 /* igb_down has a dependency on max_frame_size */ 3750 /* igb_down has a dependency on max_frame_size */
3495 adapter->max_frame_size = max_frame; 3751 adapter->max_frame_size = max_frame;
3496 if (netif_running(netdev))
3497 igb_down(adapter);
3498 3752
3499 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3753 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3500 * means we reserve 2 more, this pushes us to allocate from the next 3754 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3502,35 +3756,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3502 * i.e. RXBUFFER_2048 --> size-4096 slab 3756 * i.e. RXBUFFER_2048 --> size-4096 slab
3503 */ 3757 */
3504 3758
3505 if (max_frame <= IGB_RXBUFFER_256) 3759 if (max_frame <= IGB_RXBUFFER_1024)
3506 adapter->rx_buffer_len = IGB_RXBUFFER_256; 3760 rx_buffer_len = IGB_RXBUFFER_1024;
3507 else if (max_frame <= IGB_RXBUFFER_512) 3761 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3508 adapter->rx_buffer_len = IGB_RXBUFFER_512; 3762 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3509 else if (max_frame <= IGB_RXBUFFER_1024)
3510 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3511 else if (max_frame <= IGB_RXBUFFER_2048)
3512 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3513 else 3763 else
3514#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3764 rx_buffer_len = IGB_RXBUFFER_128;
3515 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3516#else
3517 adapter->rx_buffer_len = PAGE_SIZE / 2;
3518#endif
3519
3520 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3521 if (adapter->vfs_allocated_count &&
3522 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3523 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3524 3765
3525 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3766 if (netif_running(netdev))
3526 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3767 igb_down(adapter);
3527 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3528 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3529 3768
3530 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 3769 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3531 netdev->mtu, new_mtu); 3770 netdev->mtu, new_mtu);
3532 netdev->mtu = new_mtu; 3771 netdev->mtu = new_mtu;
3533 3772
3773 for (i = 0; i < adapter->num_rx_queues; i++)
3774 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3775
3534 if (netif_running(netdev)) 3776 if (netif_running(netdev))
3535 igb_up(adapter); 3777 igb_up(adapter);
3536 else 3778 else
@@ -3548,9 +3790,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3548 3790
3549void igb_update_stats(struct igb_adapter *adapter) 3791void igb_update_stats(struct igb_adapter *adapter)
3550{ 3792{
3793 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3551 struct e1000_hw *hw = &adapter->hw; 3794 struct e1000_hw *hw = &adapter->hw;
3552 struct pci_dev *pdev = adapter->pdev; 3795 struct pci_dev *pdev = adapter->pdev;
3796 u32 rnbc;
3553 u16 phy_tmp; 3797 u16 phy_tmp;
3798 int i;
3799 u64 bytes, packets;
3554 3800
3555#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3801#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3556 3802
@@ -3563,6 +3809,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3563 if (pci_channel_offline(pdev)) 3809 if (pci_channel_offline(pdev))
3564 return; 3810 return;
3565 3811
3812 bytes = 0;
3813 packets = 0;
3814 for (i = 0; i < adapter->num_rx_queues; i++) {
3815 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3816 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3817 net_stats->rx_fifo_errors += rqdpc_tmp;
3818 bytes += adapter->rx_ring[i].rx_stats.bytes;
3819 packets += adapter->rx_ring[i].rx_stats.packets;
3820 }
3821
3822 net_stats->rx_bytes = bytes;
3823 net_stats->rx_packets = packets;
3824
3825 bytes = 0;
3826 packets = 0;
3827 for (i = 0; i < adapter->num_tx_queues; i++) {
3828 bytes += adapter->tx_ring[i].tx_stats.bytes;
3829 packets += adapter->tx_ring[i].tx_stats.packets;
3830 }
3831 net_stats->tx_bytes = bytes;
3832 net_stats->tx_packets = packets;
3833
3834 /* read stats registers */
3566 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3835 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3567 adapter->stats.gprc += rd32(E1000_GPRC); 3836 adapter->stats.gprc += rd32(E1000_GPRC);
3568 adapter->stats.gorc += rd32(E1000_GORCL); 3837 adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3595,7 +3864,9 @@ void igb_update_stats(struct igb_adapter *adapter)
3595 adapter->stats.gptc += rd32(E1000_GPTC); 3864 adapter->stats.gptc += rd32(E1000_GPTC);
3596 adapter->stats.gotc += rd32(E1000_GOTCL); 3865 adapter->stats.gotc += rd32(E1000_GOTCL);
3597 rd32(E1000_GOTCH); /* clear GOTCL */ 3866 rd32(E1000_GOTCH); /* clear GOTCL */
3598 adapter->stats.rnbc += rd32(E1000_RNBC); 3867 rnbc = rd32(E1000_RNBC);
3868 adapter->stats.rnbc += rnbc;
3869 net_stats->rx_fifo_errors += rnbc;
3599 adapter->stats.ruc += rd32(E1000_RUC); 3870 adapter->stats.ruc += rd32(E1000_RUC);
3600 adapter->stats.rfc += rd32(E1000_RFC); 3871 adapter->stats.rfc += rd32(E1000_RFC);
3601 adapter->stats.rjc += rd32(E1000_RJC); 3872 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3614,7 +3885,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3614 adapter->stats.bptc += rd32(E1000_BPTC); 3885 adapter->stats.bptc += rd32(E1000_BPTC);
3615 3886
3616 /* used for adaptive IFS */ 3887 /* used for adaptive IFS */
3617
3618 hw->mac.tx_packet_delta = rd32(E1000_TPT); 3888 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3619 adapter->stats.tpt += hw->mac.tx_packet_delta; 3889 adapter->stats.tpt += hw->mac.tx_packet_delta;
3620 hw->mac.collision_delta = rd32(E1000_COLC); 3890 hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3637,56 +3907,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3637 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 3907 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3638 3908
3639 /* Fill out the OS statistics structure */ 3909 /* Fill out the OS statistics structure */
3640 adapter->net_stats.multicast = adapter->stats.mprc; 3910 net_stats->multicast = adapter->stats.mprc;
3641 adapter->net_stats.collisions = adapter->stats.colc; 3911 net_stats->collisions = adapter->stats.colc;
3642 3912
3643 /* Rx Errors */ 3913 /* Rx Errors */
3644 3914
3645 if (hw->mac.type != e1000_82575) {
3646 u32 rqdpc_tmp;
3647 u64 rqdpc_total = 0;
3648 int i;
3649 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3650 * Queue Drop Packet Count) stats only gets incremented, if
3651 * the DROP_EN but it set (in the SRRCTL register for that
3652 * queue). If DROP_EN bit is NOT set, then the some what
3653 * equivalent count is stored in RNBC (not per queue basis).
3654 * Also note the drop count is due to lack of available
3655 * descriptors.
3656 */
3657 for (i = 0; i < adapter->num_rx_queues; i++) {
3658 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3659 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3660 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3661 }
3662 adapter->net_stats.rx_fifo_errors = rqdpc_total;
3663 }
3664
3665 /* Note RNBC (Receive No Buffers Count) is an not an exact
3666 * drop count as the hardware FIFO might save the day. Thats
3667 * one of the reason for saving it in rx_fifo_errors, as its
3668 * potentially not a true drop.
3669 */
3670 adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc;
3671
3672 /* RLEC on some newer hardware can be incorrect so build 3915 /* RLEC on some newer hardware can be incorrect so build
3673 * our own version based on RUC and ROC */ 3916 * our own version based on RUC and ROC */
3674 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3917 net_stats->rx_errors = adapter->stats.rxerrc +
3675 adapter->stats.crcerrs + adapter->stats.algnerrc + 3918 adapter->stats.crcerrs + adapter->stats.algnerrc +
3676 adapter->stats.ruc + adapter->stats.roc + 3919 adapter->stats.ruc + adapter->stats.roc +
3677 adapter->stats.cexterr; 3920 adapter->stats.cexterr;
3678 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 3921 net_stats->rx_length_errors = adapter->stats.ruc +
3679 adapter->stats.roc; 3922 adapter->stats.roc;
3680 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3923 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3681 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3924 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3682 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3925 net_stats->rx_missed_errors = adapter->stats.mpc;
3683 3926
3684 /* Tx Errors */ 3927 /* Tx Errors */
3685 adapter->net_stats.tx_errors = adapter->stats.ecol + 3928 net_stats->tx_errors = adapter->stats.ecol +
3686 adapter->stats.latecol; 3929 adapter->stats.latecol;
3687 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3930 net_stats->tx_aborted_errors = adapter->stats.ecol;
3688 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3931 net_stats->tx_window_errors = adapter->stats.latecol;
3689 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3932 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3690 3933
3691 /* Tx Dropped needs to be maintained elsewhere */ 3934 /* Tx Dropped needs to be maintained elsewhere */
3692 3935
@@ -3707,14 +3950,12 @@ void igb_update_stats(struct igb_adapter *adapter)
3707 3950
3708static irqreturn_t igb_msix_other(int irq, void *data) 3951static irqreturn_t igb_msix_other(int irq, void *data)
3709{ 3952{
3710 struct net_device *netdev = data; 3953 struct igb_adapter *adapter = data;
3711 struct igb_adapter *adapter = netdev_priv(netdev);
3712 struct e1000_hw *hw = &adapter->hw; 3954 struct e1000_hw *hw = &adapter->hw;
3713 u32 icr = rd32(E1000_ICR); 3955 u32 icr = rd32(E1000_ICR);
3714
3715 /* reading ICR causes bit 31 of EICR to be cleared */ 3956 /* reading ICR causes bit 31 of EICR to be cleared */
3716 3957
3717 if(icr & E1000_ICR_DOUTSYNC) { 3958 if (icr & E1000_ICR_DOUTSYNC) {
3718 /* HW is reporting DMA is out of sync */ 3959 /* HW is reporting DMA is out of sync */
3719 adapter->stats.doosync++; 3960 adapter->stats.doosync++;
3720 } 3961 }
@@ -3730,125 +3971,90 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3730 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3971 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3731 } 3972 }
3732 3973
3733 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); 3974 if (adapter->vfs_allocated_count)
3975 wr32(E1000_IMS, E1000_IMS_LSC |
3976 E1000_IMS_VMMB |
3977 E1000_IMS_DOUTSYNC);
3978 else
3979 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3734 wr32(E1000_EIMS, adapter->eims_other); 3980 wr32(E1000_EIMS, adapter->eims_other);
3735 3981
3736 return IRQ_HANDLED; 3982 return IRQ_HANDLED;
3737} 3983}
3738 3984
3739static irqreturn_t igb_msix_tx(int irq, void *data) 3985static void igb_write_itr(struct igb_q_vector *q_vector)
3740{ 3986{
3741 struct igb_ring *tx_ring = data; 3987 u32 itr_val = q_vector->itr_val & 0x7FFC;
3742 struct igb_adapter *adapter = tx_ring->adapter;
3743 struct e1000_hw *hw = &adapter->hw;
3744 3988
3745#ifdef CONFIG_IGB_DCA 3989 if (!q_vector->set_itr)
3746 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3990 return;
3747 igb_update_tx_dca(tx_ring);
3748#endif
3749 3991
3750 tx_ring->total_bytes = 0; 3992 if (!itr_val)
3751 tx_ring->total_packets = 0; 3993 itr_val = 0x4;
3752 3994
3753 /* auto mask will automatically reenable the interrupt when we write 3995 if (q_vector->itr_shift)
3754 * EICS */ 3996 itr_val |= itr_val << q_vector->itr_shift;
3755 if (!igb_clean_tx_irq(tx_ring))
3756 /* Ring was not completely cleaned, so fire another interrupt */
3757 wr32(E1000_EICS, tx_ring->eims_value);
3758 else 3997 else
3759 wr32(E1000_EIMS, tx_ring->eims_value); 3998 itr_val |= 0x8000000;
3760 3999
3761 return IRQ_HANDLED; 4000 writel(itr_val, q_vector->itr_register);
4001 q_vector->set_itr = 0;
3762} 4002}
3763 4003
3764static void igb_write_itr(struct igb_ring *ring) 4004static irqreturn_t igb_msix_ring(int irq, void *data)
3765{ 4005{
3766 struct e1000_hw *hw = &ring->adapter->hw; 4006 struct igb_q_vector *q_vector = data;
3767 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3768 switch (hw->mac.type) {
3769 case e1000_82576:
3770 wr32(ring->itr_register, ring->itr_val |
3771 0x80000000);
3772 break;
3773 default:
3774 wr32(ring->itr_register, ring->itr_val |
3775 (ring->itr_val << 16));
3776 break;
3777 }
3778 ring->set_itr = 0;
3779 }
3780}
3781 4007
3782static irqreturn_t igb_msix_rx(int irq, void *data) 4008 /* Write the ITR value calculated from the previous interrupt. */
3783{ 4009 igb_write_itr(q_vector);
3784 struct igb_ring *rx_ring = data;
3785 4010
3786 /* Write the ITR value calculated at the end of the 4011 napi_schedule(&q_vector->napi);
3787 * previous interrupt.
3788 */
3789
3790 igb_write_itr(rx_ring);
3791 4012
3792 if (napi_schedule_prep(&rx_ring->napi)) 4013 return IRQ_HANDLED;
3793 __napi_schedule(&rx_ring->napi);
3794
3795#ifdef CONFIG_IGB_DCA
3796 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3797 igb_update_rx_dca(rx_ring);
3798#endif
3799 return IRQ_HANDLED;
3800} 4014}
3801 4015
3802#ifdef CONFIG_IGB_DCA 4016#ifdef CONFIG_IGB_DCA
3803static void igb_update_rx_dca(struct igb_ring *rx_ring) 4017static void igb_update_dca(struct igb_q_vector *q_vector)
3804{ 4018{
3805 u32 dca_rxctrl; 4019 struct igb_adapter *adapter = q_vector->adapter;
3806 struct igb_adapter *adapter = rx_ring->adapter;
3807 struct e1000_hw *hw = &adapter->hw; 4020 struct e1000_hw *hw = &adapter->hw;
3808 int cpu = get_cpu(); 4021 int cpu = get_cpu();
3809 int q = rx_ring->reg_idx;
3810 4022
3811 if (rx_ring->cpu != cpu) { 4023 if (q_vector->cpu == cpu)
3812 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 4024 goto out_no_update;
3813 if (hw->mac.type == e1000_82576) { 4025
3814 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; 4026 if (q_vector->tx_ring) {
3815 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << 4027 int q = q_vector->tx_ring->reg_idx;
3816 E1000_DCA_RXCTRL_CPUID_SHIFT; 4028 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4029 if (hw->mac.type == e1000_82575) {
4030 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4031 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3817 } else { 4032 } else {
4033 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4034 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4035 E1000_DCA_TXCTRL_CPUID_SHIFT;
4036 }
4037 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4038 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4039 }
4040 if (q_vector->rx_ring) {
4041 int q = q_vector->rx_ring->reg_idx;
4042 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4043 if (hw->mac.type == e1000_82575) {
3818 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 4044 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3819 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4045 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4046 } else {
4047 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4048 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4049 E1000_DCA_RXCTRL_CPUID_SHIFT;
3820 } 4050 }
3821 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; 4051 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3822 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; 4052 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3823 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; 4053 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3824 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); 4054 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3825 rx_ring->cpu = cpu;
3826 }
3827 put_cpu();
3828}
3829
3830static void igb_update_tx_dca(struct igb_ring *tx_ring)
3831{
3832 u32 dca_txctrl;
3833 struct igb_adapter *adapter = tx_ring->adapter;
3834 struct e1000_hw *hw = &adapter->hw;
3835 int cpu = get_cpu();
3836 int q = tx_ring->reg_idx;
3837
3838 if (tx_ring->cpu != cpu) {
3839 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3840 if (hw->mac.type == e1000_82576) {
3841 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3842 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3843 E1000_DCA_TXCTRL_CPUID_SHIFT;
3844 } else {
3845 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3846 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3847 }
3848 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3849 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3850 tx_ring->cpu = cpu;
3851 } 4055 }
4056 q_vector->cpu = cpu;
4057out_no_update:
3852 put_cpu(); 4058 put_cpu();
3853} 4059}
3854 4060
@@ -3863,13 +4069,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
3863 /* Always use CB2 mode, difference is masked in the CB driver. */ 4069 /* Always use CB2 mode, difference is masked in the CB driver. */
3864 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 4070 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3865 4071
3866 for (i = 0; i < adapter->num_tx_queues; i++) { 4072 for (i = 0; i < adapter->num_q_vectors; i++) {
3867 adapter->tx_ring[i].cpu = -1; 4073 struct igb_q_vector *q_vector = adapter->q_vector[i];
3868 igb_update_tx_dca(&adapter->tx_ring[i]); 4074 q_vector->cpu = -1;
3869 } 4075 igb_update_dca(q_vector);
3870 for (i = 0; i < adapter->num_rx_queues; i++) {
3871 adapter->rx_ring[i].cpu = -1;
3872 igb_update_rx_dca(&adapter->rx_ring[i]);
3873 } 4076 }
3874} 4077}
3875 4078
@@ -3877,6 +4080,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3877{ 4080{
3878 struct net_device *netdev = dev_get_drvdata(dev); 4081 struct net_device *netdev = dev_get_drvdata(dev);
3879 struct igb_adapter *adapter = netdev_priv(netdev); 4082 struct igb_adapter *adapter = netdev_priv(netdev);
4083 struct pci_dev *pdev = adapter->pdev;
3880 struct e1000_hw *hw = &adapter->hw; 4084 struct e1000_hw *hw = &adapter->hw;
3881 unsigned long event = *(unsigned long *)data; 4085 unsigned long event = *(unsigned long *)data;
3882 4086
@@ -3885,12 +4089,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3885 /* if already enabled, don't do it again */ 4089 /* if already enabled, don't do it again */
3886 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4090 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3887 break; 4091 break;
3888 /* Always use CB2 mode, difference is masked
3889 * in the CB driver. */
3890 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3891 if (dca_add_requester(dev) == 0) { 4092 if (dca_add_requester(dev) == 0) {
3892 adapter->flags |= IGB_FLAG_DCA_ENABLED; 4093 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3893 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 4094 dev_info(&pdev->dev, "DCA enabled\n");
3894 igb_setup_dca(adapter); 4095 igb_setup_dca(adapter);
3895 break; 4096 break;
3896 } 4097 }
@@ -3898,9 +4099,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3898 case DCA_PROVIDER_REMOVE: 4099 case DCA_PROVIDER_REMOVE:
3899 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 4100 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3900 /* without this a class_device is left 4101 /* without this a class_device is left
3901 * hanging around in the sysfs model */ 4102 * hanging around in the sysfs model */
3902 dca_remove_requester(dev); 4103 dca_remove_requester(dev);
3903 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 4104 dev_info(&pdev->dev, "DCA disabled\n");
3904 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 4105 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3905 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 4106 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3906 } 4107 }
@@ -3930,12 +4131,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
3930 4131
3931 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 4132 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3932 ping = E1000_PF_CONTROL_MSG; 4133 ping = E1000_PF_CONTROL_MSG;
3933 if (adapter->vf_data[i].clear_to_send) 4134 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
3934 ping |= E1000_VT_MSGTYPE_CTS; 4135 ping |= E1000_VT_MSGTYPE_CTS;
3935 igb_write_mbx(hw, &ping, 1, i); 4136 igb_write_mbx(hw, &ping, 1, i);
3936 } 4137 }
3937} 4138}
3938 4139
4140static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4141{
4142 struct e1000_hw *hw = &adapter->hw;
4143 u32 vmolr = rd32(E1000_VMOLR(vf));
4144 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4145
4146 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4147 IGB_VF_FLAG_MULTI_PROMISC);
4148 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4149
4150 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4151 vmolr |= E1000_VMOLR_MPME;
4152 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4153 } else {
4154 /*
4155 * if we have hashes and we are clearing a multicast promisc
4156 * flag we need to write the hashes to the MTA as this step
4157 * was previously skipped
4158 */
4159 if (vf_data->num_vf_mc_hashes > 30) {
4160 vmolr |= E1000_VMOLR_MPME;
4161 } else if (vf_data->num_vf_mc_hashes) {
4162 int j;
4163 vmolr |= E1000_VMOLR_ROMPE;
4164 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4165 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4166 }
4167 }
4168
4169 wr32(E1000_VMOLR(vf), vmolr);
4170
4171 /* there are flags left unprocessed, likely not supported */
4172 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4173 return -EINVAL;
4174
4175 return 0;
4176
4177}
4178
3939static int igb_set_vf_multicasts(struct igb_adapter *adapter, 4179static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3940 u32 *msgbuf, u32 vf) 4180 u32 *msgbuf, u32 vf)
3941{ 4181{
@@ -3944,18 +4184,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3944 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4184 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3945 int i; 4185 int i;
3946 4186
3947 /* only up to 30 hash values supported */ 4187 /* salt away the number of multicast addresses assigned
3948 if (n > 30)
3949 n = 30;
3950
3951 /* salt away the number of multi cast addresses assigned
3952 * to this VF for later use to restore when the PF multi cast 4188 * to this VF for later use to restore when the PF multi cast
3953 * list changes 4189 * list changes
3954 */ 4190 */
3955 vf_data->num_vf_mc_hashes = n; 4191 vf_data->num_vf_mc_hashes = n;
3956 4192
3957 /* VFs are limited to using the MTA hash table for their multicast 4193 /* only up to 30 hash values supported */
3958 * addresses */ 4194 if (n > 30)
4195 n = 30;
4196
4197 /* store the hashes for later use */
3959 for (i = 0; i < n; i++) 4198 for (i = 0; i < n; i++)
3960 vf_data->vf_mc_hashes[i] = hash_list[i]; 4199 vf_data->vf_mc_hashes[i] = hash_list[i];
3961 4200
@@ -3972,9 +4211,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3972 int i, j; 4211 int i, j;
3973 4212
3974 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4213 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4214 u32 vmolr = rd32(E1000_VMOLR(i));
4215 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4216
3975 vf_data = &adapter->vf_data[i]; 4217 vf_data = &adapter->vf_data[i];
3976 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 4218
3977 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 4219 if ((vf_data->num_vf_mc_hashes > 30) ||
4220 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4221 vmolr |= E1000_VMOLR_MPME;
4222 } else if (vf_data->num_vf_mc_hashes) {
4223 vmolr |= E1000_VMOLR_ROMPE;
4224 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4225 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4226 }
4227 wr32(E1000_VMOLR(i), vmolr);
3978 } 4228 }
3979} 4229}
3980 4230
@@ -4012,7 +4262,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4012 struct e1000_hw *hw = &adapter->hw; 4262 struct e1000_hw *hw = &adapter->hw;
4013 u32 reg, i; 4263 u32 reg, i;
4014 4264
4015 /* It is an error to call this function when VFs are not enabled */ 4265 /* The vlvf table only exists on 82576 hardware and newer */
4266 if (hw->mac.type < e1000_82576)
4267 return -1;
4268
4269 /* we only need to do this if VMDq is enabled */
4016 if (!adapter->vfs_allocated_count) 4270 if (!adapter->vfs_allocated_count)
4017 return -1; 4271 return -1;
4018 4272
@@ -4042,16 +4296,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4042 4296
4043 /* if !enabled we need to set this up in vfta */ 4297 /* if !enabled we need to set this up in vfta */
4044 if (!(reg & E1000_VLVF_VLANID_ENABLE)) { 4298 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4045 /* add VID to filter table, if bit already set 4299 /* add VID to filter table */
4046 * PF must have added it outside of table */ 4300 igb_vfta_set(hw, vid, true);
4047 if (igb_vfta_set(hw, vid, true))
4048 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4049 adapter->vfs_allocated_count);
4050 reg |= E1000_VLVF_VLANID_ENABLE; 4301 reg |= E1000_VLVF_VLANID_ENABLE;
4051 } 4302 }
4052 reg &= ~E1000_VLVF_VLANID_MASK; 4303 reg &= ~E1000_VLVF_VLANID_MASK;
4053 reg |= vid; 4304 reg |= vid;
4054
4055 wr32(E1000_VLVF(i), reg); 4305 wr32(E1000_VLVF(i), reg);
4056 4306
4057 /* do not modify RLPML for PF devices */ 4307 /* do not modify RLPML for PF devices */
@@ -4067,8 +4317,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4067 reg |= size; 4317 reg |= size;
4068 wr32(E1000_VMOLR(vf), reg); 4318 wr32(E1000_VMOLR(vf), reg);
4069 } 4319 }
4070 adapter->vf_data[vf].vlans_enabled++;
4071 4320
4321 adapter->vf_data[vf].vlans_enabled++;
4072 return 0; 4322 return 0;
4073 } 4323 }
4074 } else { 4324 } else {
@@ -4110,15 +4360,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4110 return igb_vlvf_set(adapter, vid, add, vf); 4360 return igb_vlvf_set(adapter, vid, add, vf);
4111} 4361}
4112 4362
4113static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 4363static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4114{ 4364{
4115 struct e1000_hw *hw = &adapter->hw; 4365 /* clear all flags */
4116 4366 adapter->vf_data[vf].flags = 0;
4117 /* disable mailbox functionality for vf */ 4367 adapter->vf_data[vf].last_nack = jiffies;
4118 adapter->vf_data[vf].clear_to_send = false;
4119 4368
4120 /* reset offloads to defaults */ 4369 /* reset offloads to defaults */
4121 igb_set_vmolr(hw, vf); 4370 igb_set_vmolr(adapter, vf);
4122 4371
4123 /* reset vlans for device */ 4372 /* reset vlans for device */
4124 igb_clear_vf_vfta(adapter, vf); 4373 igb_clear_vf_vfta(adapter, vf);
@@ -4130,7 +4379,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4130 igb_set_rx_mode(adapter->netdev); 4379 igb_set_rx_mode(adapter->netdev);
4131} 4380}
4132 4381
4133static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 4382static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4383{
4384 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4385
4386 /* generate a new mac address as we were hotplug removed/added */
4387 random_ether_addr(vf_mac);
4388
4389 /* process remaining reset events */
4390 igb_vf_reset(adapter, vf);
4391}
4392
4393static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4134{ 4394{
4135 struct e1000_hw *hw = &adapter->hw; 4395 struct e1000_hw *hw = &adapter->hw;
4136 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 4396 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4139,11 +4399,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4139 u8 *addr = (u8 *)(&msgbuf[1]); 4399 u8 *addr = (u8 *)(&msgbuf[1]);
4140 4400
4141 /* process all the same items cleared in a function level reset */ 4401 /* process all the same items cleared in a function level reset */
4142 igb_vf_reset_event(adapter, vf); 4402 igb_vf_reset(adapter, vf);
4143 4403
4144 /* set vf mac address */ 4404 /* set vf mac address */
4145 igb_rar_set(hw, vf_mac, rar_entry); 4405 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4146 igb_set_rah_pool(hw, vf, rar_entry);
4147 4406
4148 /* enable transmit and receive for vf */ 4407 /* enable transmit and receive for vf */
4149 reg = rd32(E1000_VFTE); 4408 reg = rd32(E1000_VFTE);
@@ -4151,8 +4410,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4151 reg = rd32(E1000_VFRE); 4410 reg = rd32(E1000_VFRE);
4152 wr32(E1000_VFRE, reg | (1 << vf)); 4411 wr32(E1000_VFRE, reg | (1 << vf));
4153 4412
4154 /* enable mailbox functionality for vf */ 4413 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4155 adapter->vf_data[vf].clear_to_send = true;
4156 4414
4157 /* reply to reset with ack and vf mac address */ 4415 /* reply to reset with ack and vf mac address */
4158 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 4416 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4162,66 +4420,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4162 4420
4163static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 4421static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4164{ 4422{
4165 unsigned char *addr = (char *)&msg[1]; 4423 unsigned char *addr = (char *)&msg[1];
4166 int err = -1; 4424 int err = -1;
4167
4168 if (is_valid_ether_addr(addr))
4169 err = igb_set_vf_mac(adapter, vf, addr);
4170 4425
4171 return err; 4426 if (is_valid_ether_addr(addr))
4427 err = igb_set_vf_mac(adapter, vf, addr);
4172 4428
4429 return err;
4173} 4430}
4174 4431
4175static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 4432static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4176{ 4433{
4177 struct e1000_hw *hw = &adapter->hw; 4434 struct e1000_hw *hw = &adapter->hw;
4435 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4178 u32 msg = E1000_VT_MSGTYPE_NACK; 4436 u32 msg = E1000_VT_MSGTYPE_NACK;
4179 4437
4180 /* if device isn't clear to send it shouldn't be reading either */ 4438 /* if device isn't clear to send it shouldn't be reading either */
4181 if (!adapter->vf_data[vf].clear_to_send) 4439 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4440 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4182 igb_write_mbx(hw, &msg, 1, vf); 4441 igb_write_mbx(hw, &msg, 1, vf);
4183} 4442 vf_data->last_nack = jiffies;
4184
4185
4186static void igb_msg_task(struct igb_adapter *adapter)
4187{
4188 struct e1000_hw *hw = &adapter->hw;
4189 u32 vf;
4190
4191 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4192 /* process any reset requests */
4193 if (!igb_check_for_rst(hw, vf)) {
4194 adapter->vf_data[vf].clear_to_send = false;
4195 igb_vf_reset_event(adapter, vf);
4196 }
4197
4198 /* process any messages pending */
4199 if (!igb_check_for_msg(hw, vf))
4200 igb_rcv_msg_from_vf(adapter, vf);
4201
4202 /* process any acks */
4203 if (!igb_check_for_ack(hw, vf))
4204 igb_rcv_ack_from_vf(adapter, vf);
4205
4206 } 4443 }
4207} 4444}
4208 4445
4209static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 4446static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4210{ 4447{
4211 u32 mbx_size = E1000_VFMAILBOX_SIZE; 4448 struct pci_dev *pdev = adapter->pdev;
4212 u32 msgbuf[mbx_size]; 4449 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4213 struct e1000_hw *hw = &adapter->hw; 4450 struct e1000_hw *hw = &adapter->hw;
4451 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4214 s32 retval; 4452 s32 retval;
4215 4453
4216 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); 4454 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4217 4455
4218 if (retval) 4456 if (retval)
4219 dev_err(&adapter->pdev->dev, 4457 dev_err(&pdev->dev, "Error receiving message from VF\n");
4220 "Error receiving message from VF\n");
4221 4458
4222 /* this is a message we already processed, do nothing */ 4459 /* this is a message we already processed, do nothing */
4223 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 4460 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4224 return retval; 4461 return;
4225 4462
4226 /* 4463 /*
4227 * until the vf completes a reset it should not be 4464 * until the vf completes a reset it should not be
@@ -4230,20 +4467,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4230 4467
4231 if (msgbuf[0] == E1000_VF_RESET) { 4468 if (msgbuf[0] == E1000_VF_RESET) {
4232 igb_vf_reset_msg(adapter, vf); 4469 igb_vf_reset_msg(adapter, vf);
4233 4470 return;
4234 return retval;
4235 } 4471 }
4236 4472
4237 if (!adapter->vf_data[vf].clear_to_send) { 4473 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4238 msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 4474 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4239 igb_write_mbx(hw, msgbuf, 1, vf); 4475 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4240 return retval; 4476 igb_write_mbx(hw, msgbuf, 1, vf);
4477 vf_data->last_nack = jiffies;
4478 }
4479 return;
4241 } 4480 }
4242 4481
4243 switch ((msgbuf[0] & 0xFFFF)) { 4482 switch ((msgbuf[0] & 0xFFFF)) {
4244 case E1000_VF_SET_MAC_ADDR: 4483 case E1000_VF_SET_MAC_ADDR:
4245 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 4484 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4246 break; 4485 break;
4486 case E1000_VF_SET_PROMISC:
4487 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4488 break;
4247 case E1000_VF_SET_MULTICAST: 4489 case E1000_VF_SET_MULTICAST:
4248 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 4490 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4249 break; 4491 break;
@@ -4254,7 +4496,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4254 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4496 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4255 break; 4497 break;
4256 default: 4498 default:
4257 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4499 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4258 retval = -1; 4500 retval = -1;
4259 break; 4501 break;
4260 } 4502 }
@@ -4268,8 +4510,53 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4268 msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 4510 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4269 4511
4270 igb_write_mbx(hw, msgbuf, 1, vf); 4512 igb_write_mbx(hw, msgbuf, 1, vf);
4513}
4271 4514
4272 return retval; 4515static void igb_msg_task(struct igb_adapter *adapter)
4516{
4517 struct e1000_hw *hw = &adapter->hw;
4518 u32 vf;
4519
4520 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4521 /* process any reset requests */
4522 if (!igb_check_for_rst(hw, vf))
4523 igb_vf_reset_event(adapter, vf);
4524
4525 /* process any messages pending */
4526 if (!igb_check_for_msg(hw, vf))
4527 igb_rcv_msg_from_vf(adapter, vf);
4528
4529 /* process any acks */
4530 if (!igb_check_for_ack(hw, vf))
4531 igb_rcv_ack_from_vf(adapter, vf);
4532 }
4533}
4534
4535/**
4536 * igb_set_uta - Set unicast filter table address
4537 * @adapter: board private structure
4538 *
4539 * The unicast table address is a register array of 32-bit registers.
4540 * The table is meant to be used in a way similar to how the MTA is used
4541 * however due to certain limitations in the hardware it is necessary to
4542 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4543 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4544 **/
4545static void igb_set_uta(struct igb_adapter *adapter)
4546{
4547 struct e1000_hw *hw = &adapter->hw;
4548 int i;
4549
4550 /* The UTA table only exists on 82576 hardware and newer */
4551 if (hw->mac.type < e1000_82576)
4552 return;
4553
4554 /* we only need to do this if VMDq is enabled */
4555 if (!adapter->vfs_allocated_count)
4556 return;
4557
4558 for (i = 0; i < hw->mac.uta_reg_count; i++)
4559 array_wr32(E1000_UTA, i, ~0);
4273} 4560}
4274 4561
4275/** 4562/**
@@ -4279,15 +4566,15 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4279 **/ 4566 **/
4280static irqreturn_t igb_intr_msi(int irq, void *data) 4567static irqreturn_t igb_intr_msi(int irq, void *data)
4281{ 4568{
4282 struct net_device *netdev = data; 4569 struct igb_adapter *adapter = data;
4283 struct igb_adapter *adapter = netdev_priv(netdev); 4570 struct igb_q_vector *q_vector = adapter->q_vector[0];
4284 struct e1000_hw *hw = &adapter->hw; 4571 struct e1000_hw *hw = &adapter->hw;
4285 /* read ICR disables interrupts using IAM */ 4572 /* read ICR disables interrupts using IAM */
4286 u32 icr = rd32(E1000_ICR); 4573 u32 icr = rd32(E1000_ICR);
4287 4574
4288 igb_write_itr(adapter->rx_ring); 4575 igb_write_itr(q_vector);
4289 4576
4290 if(icr & E1000_ICR_DOUTSYNC) { 4577 if (icr & E1000_ICR_DOUTSYNC) {
4291 /* HW is reporting DMA is out of sync */ 4578 /* HW is reporting DMA is out of sync */
4292 adapter->stats.doosync++; 4579 adapter->stats.doosync++;
4293 } 4580 }
@@ -4298,7 +4585,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4298 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4585 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4299 } 4586 }
4300 4587
4301 napi_schedule(&adapter->rx_ring[0].napi); 4588 napi_schedule(&q_vector->napi);
4302 4589
4303 return IRQ_HANDLED; 4590 return IRQ_HANDLED;
4304} 4591}
@@ -4310,8 +4597,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4310 **/ 4597 **/
4311static irqreturn_t igb_intr(int irq, void *data) 4598static irqreturn_t igb_intr(int irq, void *data)
4312{ 4599{
4313 struct net_device *netdev = data; 4600 struct igb_adapter *adapter = data;
4314 struct igb_adapter *adapter = netdev_priv(netdev); 4601 struct igb_q_vector *q_vector = adapter->q_vector[0];
4315 struct e1000_hw *hw = &adapter->hw; 4602 struct e1000_hw *hw = &adapter->hw;
4316 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4603 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4317 * need for the IMC write */ 4604 * need for the IMC write */
@@ -4319,14 +4606,14 @@ static irqreturn_t igb_intr(int irq, void *data)
4319 if (!icr) 4606 if (!icr)
4320 return IRQ_NONE; /* Not our interrupt */ 4607 return IRQ_NONE; /* Not our interrupt */
4321 4608
4322 igb_write_itr(adapter->rx_ring); 4609 igb_write_itr(q_vector);
4323 4610
4324 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 4611 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4325 * not set, then the adapter didn't send an interrupt */ 4612 * not set, then the adapter didn't send an interrupt */
4326 if (!(icr & E1000_ICR_INT_ASSERTED)) 4613 if (!(icr & E1000_ICR_INT_ASSERTED))
4327 return IRQ_NONE; 4614 return IRQ_NONE;
4328 4615
4329 if(icr & E1000_ICR_DOUTSYNC) { 4616 if (icr & E1000_ICR_DOUTSYNC) {
4330 /* HW is reporting DMA is out of sync */ 4617 /* HW is reporting DMA is out of sync */
4331 adapter->stats.doosync++; 4618 adapter->stats.doosync++;
4332 } 4619 }
@@ -4338,26 +4625,27 @@ static irqreturn_t igb_intr(int irq, void *data)
4338 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4625 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4339 } 4626 }
4340 4627
4341 napi_schedule(&adapter->rx_ring[0].napi); 4628 napi_schedule(&q_vector->napi);
4342 4629
4343 return IRQ_HANDLED; 4630 return IRQ_HANDLED;
4344} 4631}
4345 4632
4346static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) 4633static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4347{ 4634{
4348 struct igb_adapter *adapter = rx_ring->adapter; 4635 struct igb_adapter *adapter = q_vector->adapter;
4349 struct e1000_hw *hw = &adapter->hw; 4636 struct e1000_hw *hw = &adapter->hw;
4350 4637
4351 if (adapter->itr_setting & 3) { 4638 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4352 if (adapter->num_rx_queues == 1) 4639 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4640 if (!adapter->msix_entries)
4353 igb_set_itr(adapter); 4641 igb_set_itr(adapter);
4354 else 4642 else
4355 igb_update_ring_itr(rx_ring); 4643 igb_update_ring_itr(q_vector);
4356 } 4644 }
4357 4645
4358 if (!test_bit(__IGB_DOWN, &adapter->state)) { 4646 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4359 if (adapter->msix_entries) 4647 if (adapter->msix_entries)
4360 wr32(E1000_EIMS, rx_ring->eims_value); 4648 wr32(E1000_EIMS, q_vector->eims_value);
4361 else 4649 else
4362 igb_irq_enable(adapter); 4650 igb_irq_enable(adapter);
4363 } 4651 }
@@ -4370,76 +4658,94 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4370 **/ 4658 **/
4371static int igb_poll(struct napi_struct *napi, int budget) 4659static int igb_poll(struct napi_struct *napi, int budget)
4372{ 4660{
4373 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4661 struct igb_q_vector *q_vector = container_of(napi,
4374 int work_done = 0; 4662 struct igb_q_vector,
4663 napi);
4664 int tx_clean_complete = 1, work_done = 0;
4375 4665
4376#ifdef CONFIG_IGB_DCA 4666#ifdef CONFIG_IGB_DCA
4377 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4667 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4378 igb_update_rx_dca(rx_ring); 4668 igb_update_dca(q_vector);
4379#endif 4669#endif
4380 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 4670 if (q_vector->tx_ring)
4671 tx_clean_complete = igb_clean_tx_irq(q_vector);
4381 4672
4382 if (rx_ring->buddy) { 4673 if (q_vector->rx_ring)
4383#ifdef CONFIG_IGB_DCA 4674 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4384 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4675
4385 igb_update_tx_dca(rx_ring->buddy); 4676 if (!tx_clean_complete)
4386#endif 4677 work_done = budget;
4387 if (!igb_clean_tx_irq(rx_ring->buddy))
4388 work_done = budget;
4389 }
4390 4678
4391 /* If not enough Rx work done, exit the polling mode */ 4679 /* If not enough Rx work done, exit the polling mode */
4392 if (work_done < budget) { 4680 if (work_done < budget) {
4393 napi_complete(napi); 4681 napi_complete(napi);
4394 igb_rx_irq_enable(rx_ring); 4682 igb_ring_irq_enable(q_vector);
4395 } 4683 }
4396 4684
4397 return work_done; 4685 return work_done;
4398} 4686}
4399 4687
4400/** 4688/**
4401 * igb_hwtstamp - utility function which checks for TX time stamp 4689 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4402 * @adapter: board private structure 4690 * @adapter: board private structure
4691 * @shhwtstamps: timestamp structure to update
4692 * @regval: unsigned 64bit system time value.
4693 *
4694 * We need to convert the system time value stored in the RX/TXSTMP registers
4695 * into a hwtstamp which can be used by the upper level timestamping functions
4696 */
4697static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4698 struct skb_shared_hwtstamps *shhwtstamps,
4699 u64 regval)
4700{
4701 u64 ns;
4702
4703 ns = timecounter_cyc2time(&adapter->clock, regval);
4704 timecompare_update(&adapter->compare, ns);
4705 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4706 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4707 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4708}
4709
4710/**
4711 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4712 * @q_vector: pointer to q_vector containing needed info
4403 * @skb: packet that was just sent 4713 * @skb: packet that was just sent
4404 * 4714 *
4405 * If we were asked to do hardware stamping and such a time stamp is 4715 * If we were asked to do hardware stamping and such a time stamp is
4406 * available, then it must have been for this skb here because we only 4716 * available, then it must have been for this skb here because we only
4407 * allow only one such packet into the queue. 4717 * allow only one such packet into the queue.
4408 */ 4718 */
4409static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) 4719static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4410{ 4720{
4721 struct igb_adapter *adapter = q_vector->adapter;
4411 union skb_shared_tx *shtx = skb_tx(skb); 4722 union skb_shared_tx *shtx = skb_tx(skb);
4412 struct e1000_hw *hw = &adapter->hw; 4723 struct e1000_hw *hw = &adapter->hw;
4724 struct skb_shared_hwtstamps shhwtstamps;
4725 u64 regval;
4413 4726
4414 if (unlikely(shtx->hardware)) { 4727 /* if skb does not support hw timestamp or TX stamp not valid exit */
4415 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; 4728 if (likely(!shtx->hardware) ||
4416 if (valid) { 4729 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4417 u64 regval = rd32(E1000_TXSTMPL); 4730 return;
4418 u64 ns; 4731
4419 struct skb_shared_hwtstamps shhwtstamps; 4732 regval = rd32(E1000_TXSTMPL);
4420 4733 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4421 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 4734
4422 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 4735 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4423 ns = timecounter_cyc2time(&adapter->clock, 4736 skb_tstamp_tx(skb, &shhwtstamps);
4424 regval);
4425 timecompare_update(&adapter->compare, ns);
4426 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4427 shhwtstamps.syststamp =
4428 timecompare_transform(&adapter->compare, ns);
4429 skb_tstamp_tx(skb, &shhwtstamps);
4430 }
4431 }
4432} 4737}
4433 4738
4434/** 4739/**
4435 * igb_clean_tx_irq - Reclaim resources after transmit completes 4740 * igb_clean_tx_irq - Reclaim resources after transmit completes
4436 * @adapter: board private structure 4741 * @q_vector: pointer to q_vector containing needed info
4437 * returns true if ring is completely cleaned 4742 * returns true if ring is completely cleaned
4438 **/ 4743 **/
4439static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 4744static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4440{ 4745{
4441 struct igb_adapter *adapter = tx_ring->adapter; 4746 struct igb_adapter *adapter = q_vector->adapter;
4442 struct net_device *netdev = adapter->netdev; 4747 struct igb_ring *tx_ring = q_vector->tx_ring;
4748 struct net_device *netdev = tx_ring->netdev;
4443 struct e1000_hw *hw = &adapter->hw; 4749 struct e1000_hw *hw = &adapter->hw;
4444 struct igb_buffer *buffer_info; 4750 struct igb_buffer *buffer_info;
4445 struct sk_buff *skb; 4751 struct sk_buff *skb;
@@ -4470,10 +4776,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4470 total_packets += segs; 4776 total_packets += segs;
4471 total_bytes += bytecount; 4777 total_bytes += bytecount;
4472 4778
4473 igb_tx_hwtstamp(adapter, skb); 4779 igb_tx_hwtstamp(q_vector, skb);
4474 } 4780 }
4475 4781
4476 igb_unmap_and_free_tx_resource(adapter, buffer_info); 4782 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4477 tx_desc->wb.status = 0; 4783 tx_desc->wb.status = 0;
4478 4784
4479 i++; 4785 i++;
@@ -4496,7 +4802,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4496 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 4802 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4497 !(test_bit(__IGB_DOWN, &adapter->state))) { 4803 !(test_bit(__IGB_DOWN, &adapter->state))) {
4498 netif_wake_subqueue(netdev, tx_ring->queue_index); 4804 netif_wake_subqueue(netdev, tx_ring->queue_index);
4499 ++adapter->restart_queue; 4805 tx_ring->tx_stats.restart_queue++;
4500 } 4806 }
4501 } 4807 }
4502 4808
@@ -4511,7 +4817,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4511 E1000_STATUS_TXOFF)) { 4817 E1000_STATUS_TXOFF)) {
4512 4818
4513 /* detected Tx unit hang */ 4819 /* detected Tx unit hang */
4514 dev_err(&adapter->pdev->dev, 4820 dev_err(&tx_ring->pdev->dev,
4515 "Detected Tx Unit Hang\n" 4821 "Detected Tx Unit Hang\n"
4516 " Tx Queue <%d>\n" 4822 " Tx Queue <%d>\n"
4517 " TDH <%x>\n" 4823 " TDH <%x>\n"
@@ -4524,11 +4830,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4524 " jiffies <%lx>\n" 4830 " jiffies <%lx>\n"
4525 " desc.status <%x>\n", 4831 " desc.status <%x>\n",
4526 tx_ring->queue_index, 4832 tx_ring->queue_index,
4527 readl(adapter->hw.hw_addr + tx_ring->head), 4833 readl(tx_ring->head),
4528 readl(adapter->hw.hw_addr + tx_ring->tail), 4834 readl(tx_ring->tail),
4529 tx_ring->next_to_use, 4835 tx_ring->next_to_use,
4530 tx_ring->next_to_clean, 4836 tx_ring->next_to_clean,
4531 tx_ring->buffer_info[i].time_stamp, 4837 tx_ring->buffer_info[eop].time_stamp,
4532 eop, 4838 eop,
4533 jiffies, 4839 jiffies,
4534 eop_desc->wb.status); 4840 eop_desc->wb.status);
@@ -4539,43 +4845,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4539 tx_ring->total_packets += total_packets; 4845 tx_ring->total_packets += total_packets;
4540 tx_ring->tx_stats.bytes += total_bytes; 4846 tx_ring->tx_stats.bytes += total_bytes;
4541 tx_ring->tx_stats.packets += total_packets; 4847 tx_ring->tx_stats.packets += total_packets;
4542 adapter->net_stats.tx_bytes += total_bytes;
4543 adapter->net_stats.tx_packets += total_packets;
4544 return (count < tx_ring->count); 4848 return (count < tx_ring->count);
4545} 4849}
4546 4850
4547/** 4851/**
4548 * igb_receive_skb - helper function to handle rx indications 4852 * igb_receive_skb - helper function to handle rx indications
4549 * @ring: pointer to receive ring receving this packet 4853 * @q_vector: structure containing interrupt and ring information
4550 * @status: descriptor status field as written by hardware 4854 * @skb: packet to send up
4551 * @rx_desc: receive descriptor containing vlan and type information. 4855 * @vlan_tag: vlan tag for packet
4552 * @skb: pointer to sk_buff to be indicated to stack
4553 **/ 4856 **/
4554static void igb_receive_skb(struct igb_ring *ring, u8 status, 4857static void igb_receive_skb(struct igb_q_vector *q_vector,
4555 union e1000_adv_rx_desc * rx_desc, 4858 struct sk_buff *skb,
4556 struct sk_buff *skb) 4859 u16 vlan_tag)
4557{ 4860{
4558 struct igb_adapter * adapter = ring->adapter; 4861 struct igb_adapter *adapter = q_vector->adapter;
4559 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 4862
4560 4863 if (vlan_tag)
4561 skb_record_rx_queue(skb, ring->queue_index); 4864 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4562 if (vlan_extracted) 4865 vlan_tag, skb);
4563 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4564 le16_to_cpu(rx_desc->wb.upper.vlan),
4565 skb);
4566 else 4866 else
4567 napi_gro_receive(&ring->napi, skb); 4867 napi_gro_receive(&q_vector->napi, skb);
4568} 4868}
4569 4869
4570static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 4870static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4571 u32 status_err, struct sk_buff *skb) 4871 u32 status_err, struct sk_buff *skb)
4572{ 4872{
4573 skb->ip_summed = CHECKSUM_NONE; 4873 skb->ip_summed = CHECKSUM_NONE;
4574 4874
4575 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 4875 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4576 if ((status_err & E1000_RXD_STAT_IXSM) || 4876 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4577 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) 4877 (status_err & E1000_RXD_STAT_IXSM))
4578 return; 4878 return;
4879
4579 /* TCP/UDP checksum error bit is set */ 4880 /* TCP/UDP checksum error bit is set */
4580 if (status_err & 4881 if (status_err &
4581 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 4882 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4584,9 +4885,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4584 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 4885 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4585 * packets, (aka let the stack check the crc32c) 4886 * packets, (aka let the stack check the crc32c)
4586 */ 4887 */
4587 if (!((adapter->hw.mac.type == e1000_82576) && 4888 if ((skb->len == 60) &&
4588 (skb->len == 60))) 4889 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
4589 adapter->hw_csum_err++; 4890 ring->rx_stats.csum_err++;
4891
4590 /* let the stack verify checksum errors */ 4892 /* let the stack verify checksum errors */
4591 return; 4893 return;
4592 } 4894 }
@@ -4594,11 +4896,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4594 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 4896 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4595 skb->ip_summed = CHECKSUM_UNNECESSARY; 4897 skb->ip_summed = CHECKSUM_UNNECESSARY;
4596 4898
4597 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); 4899 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
4598 adapter->hw_csum_good++;
4599} 4900}
4600 4901
4601static inline u16 igb_get_hlen(struct igb_adapter *adapter, 4902static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4903 struct sk_buff *skb)
4904{
4905 struct igb_adapter *adapter = q_vector->adapter;
4906 struct e1000_hw *hw = &adapter->hw;
4907 u64 regval;
4908
4909 /*
4910 * If this bit is set, then the RX registers contain the time stamp. No
4911 * other packet will be time stamped until we read these registers, so
4912 * read the registers to make them available again. Because only one
4913 * packet can be time stamped at a time, we know that the register
4914 * values must belong to this one here and therefore we don't need to
4915 * compare any of the additional attributes stored for it.
4916 *
4917 * If nothing went wrong, then it should have a skb_shared_tx that we
4918 * can turn into a skb_shared_hwtstamps.
4919 */
4920 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4921 return;
4922 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4923 return;
4924
4925 regval = rd32(E1000_RXSTMPL);
4926 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4927
4928 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4929}
4930static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4602 union e1000_adv_rx_desc *rx_desc) 4931 union e1000_adv_rx_desc *rx_desc)
4603{ 4932{
4604 /* HW will not DMA in data larger than the given buffer, even if it 4933 /* HW will not DMA in data larger than the given buffer, even if it
@@ -4607,27 +4936,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4607 */ 4936 */
4608 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 4937 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4609 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 4938 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4610 if (hlen > adapter->rx_ps_hdr_size) 4939 if (hlen > rx_ring->rx_buffer_len)
4611 hlen = adapter->rx_ps_hdr_size; 4940 hlen = rx_ring->rx_buffer_len;
4612 return hlen; 4941 return hlen;
4613} 4942}
4614 4943
4615static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, 4944static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4616 int *work_done, int budget) 4945 int *work_done, int budget)
4617{ 4946{
4618 struct igb_adapter *adapter = rx_ring->adapter; 4947 struct igb_ring *rx_ring = q_vector->rx_ring;
4619 struct net_device *netdev = adapter->netdev; 4948 struct net_device *netdev = rx_ring->netdev;
4620 struct e1000_hw *hw = &adapter->hw; 4949 struct pci_dev *pdev = rx_ring->pdev;
4621 struct pci_dev *pdev = adapter->pdev;
4622 union e1000_adv_rx_desc *rx_desc , *next_rxd; 4950 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4623 struct igb_buffer *buffer_info , *next_buffer; 4951 struct igb_buffer *buffer_info , *next_buffer;
4624 struct sk_buff *skb; 4952 struct sk_buff *skb;
4625 bool cleaned = false; 4953 bool cleaned = false;
4626 int cleaned_count = 0; 4954 int cleaned_count = 0;
4955 int current_node = numa_node_id();
4627 unsigned int total_bytes = 0, total_packets = 0; 4956 unsigned int total_bytes = 0, total_packets = 0;
4628 unsigned int i; 4957 unsigned int i;
4629 u32 staterr; 4958 u32 staterr;
4630 u16 length; 4959 u16 length;
4960 u16 vlan_tag;
4631 4961
4632 i = rx_ring->next_to_clean; 4962 i = rx_ring->next_to_clean;
4633 buffer_info = &rx_ring->buffer_info[i]; 4963 buffer_info = &rx_ring->buffer_info[i];
@@ -4646,6 +4976,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4646 i++; 4976 i++;
4647 if (i == rx_ring->count) 4977 if (i == rx_ring->count)
4648 i = 0; 4978 i = 0;
4979
4649 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 4980 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4650 prefetch(next_rxd); 4981 prefetch(next_rxd);
4651 next_buffer = &rx_ring->buffer_info[i]; 4982 next_buffer = &rx_ring->buffer_info[i];
@@ -4654,23 +4985,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4654 cleaned = true; 4985 cleaned = true;
4655 cleaned_count++; 4986 cleaned_count++;
4656 4987
4657 /* this is the fast path for the non-packet split case */
4658 if (!adapter->rx_ps_hdr_size) {
4659 pci_unmap_single(pdev, buffer_info->dma,
4660 adapter->rx_buffer_len,
4661 PCI_DMA_FROMDEVICE);
4662 buffer_info->dma = 0;
4663 skb_put(skb, length);
4664 goto send_up;
4665 }
4666
4667 if (buffer_info->dma) { 4988 if (buffer_info->dma) {
4668 u16 hlen = igb_get_hlen(adapter, rx_desc);
4669 pci_unmap_single(pdev, buffer_info->dma, 4989 pci_unmap_single(pdev, buffer_info->dma,
4670 adapter->rx_ps_hdr_size, 4990 rx_ring->rx_buffer_len,
4671 PCI_DMA_FROMDEVICE); 4991 PCI_DMA_FROMDEVICE);
4672 buffer_info->dma = 0; 4992 buffer_info->dma = 0;
4673 skb_put(skb, hlen); 4993 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
4994 skb_put(skb, length);
4995 goto send_up;
4996 }
4997 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4674 } 4998 }
4675 4999
4676 if (length) { 5000 if (length) {
@@ -4683,15 +5007,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4683 buffer_info->page_offset, 5007 buffer_info->page_offset,
4684 length); 5008 length);
4685 5009
4686 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 5010 if ((page_count(buffer_info->page) != 1) ||
4687 (page_count(buffer_info->page) != 1)) 5011 (page_to_nid(buffer_info->page) != current_node))
4688 buffer_info->page = NULL; 5012 buffer_info->page = NULL;
4689 else 5013 else
4690 get_page(buffer_info->page); 5014 get_page(buffer_info->page);
4691 5015
4692 skb->len += length; 5016 skb->len += length;
4693 skb->data_len += length; 5017 skb->data_len += length;
4694
4695 skb->truesize += length; 5018 skb->truesize += length;
4696 } 5019 }
4697 5020
@@ -4703,60 +5026,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4703 goto next_desc; 5026 goto next_desc;
4704 } 5027 }
4705send_up: 5028send_up:
4706 /*
4707 * If this bit is set, then the RX registers contain
4708 * the time stamp. No other packet will be time
4709 * stamped until we read these registers, so read the
4710 * registers to make them available again. Because
4711 * only one packet can be time stamped at a time, we
4712 * know that the register values must belong to this
4713 * one here and therefore we don't need to compare
4714 * any of the additional attributes stored for it.
4715 *
4716 * If nothing went wrong, then it should have a
4717 * skb_shared_tx that we can turn into a
4718 * skb_shared_hwtstamps.
4719 *
4720 * TODO: can time stamping be triggered (thus locking
4721 * the registers) without the packet reaching this point
4722 * here? In that case RX time stamping would get stuck.
4723 *
4724 * TODO: in "time stamp all packets" mode this bit is
4725 * not set. Need a global flag for this mode and then
4726 * always read the registers. Cannot be done without
4727 * a race condition.
4728 */
4729 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4730 u64 regval;
4731 u64 ns;
4732 struct skb_shared_hwtstamps *shhwtstamps =
4733 skb_hwtstamps(skb);
4734
4735 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4736 "igb: no RX time stamp available for time stamped packet");
4737 regval = rd32(E1000_RXSTMPL);
4738 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4739 ns = timecounter_cyc2time(&adapter->clock, regval);
4740 timecompare_update(&adapter->compare, ns);
4741 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4742 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4743 shhwtstamps->syststamp =
4744 timecompare_transform(&adapter->compare, ns);
4745 }
4746
4747 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 5029 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4748 dev_kfree_skb_irq(skb); 5030 dev_kfree_skb_irq(skb);
4749 goto next_desc; 5031 goto next_desc;
4750 } 5032 }
4751 5033
5034 igb_rx_hwtstamp(q_vector, staterr, skb);
4752 total_bytes += skb->len; 5035 total_bytes += skb->len;
4753 total_packets++; 5036 total_packets++;
4754 5037
4755 igb_rx_checksum_adv(adapter, staterr, skb); 5038 igb_rx_checksum_adv(rx_ring, staterr, skb);
4756 5039
4757 skb->protocol = eth_type_trans(skb, netdev); 5040 skb->protocol = eth_type_trans(skb, netdev);
5041 skb_record_rx_queue(skb, rx_ring->queue_index);
4758 5042
4759 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 5043 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5044 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5045
5046 igb_receive_skb(q_vector, skb, vlan_tag);
4760 5047
4761next_desc: 5048next_desc:
4762 rx_desc->wb.upper.status_error = 0; 5049 rx_desc->wb.upper.status_error = 0;
@@ -4783,8 +5070,6 @@ next_desc:
4783 rx_ring->total_bytes += total_bytes; 5070 rx_ring->total_bytes += total_bytes;
4784 rx_ring->rx_stats.packets += total_packets; 5071 rx_ring->rx_stats.packets += total_packets;
4785 rx_ring->rx_stats.bytes += total_bytes; 5072 rx_ring->rx_stats.bytes += total_bytes;
4786 adapter->net_stats.rx_bytes += total_bytes;
4787 adapter->net_stats.rx_packets += total_packets;
4788 return cleaned; 5073 return cleaned;
4789} 5074}
4790 5075
@@ -4792,12 +5077,9 @@ next_desc:
4792 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 5077 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4793 * @adapter: address of board private structure 5078 * @adapter: address of board private structure
4794 **/ 5079 **/
4795static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, 5080void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
4796 int cleaned_count)
4797{ 5081{
4798 struct igb_adapter *adapter = rx_ring->adapter; 5082 struct net_device *netdev = rx_ring->netdev;
4799 struct net_device *netdev = adapter->netdev;
4800 struct pci_dev *pdev = adapter->pdev;
4801 union e1000_adv_rx_desc *rx_desc; 5083 union e1000_adv_rx_desc *rx_desc;
4802 struct igb_buffer *buffer_info; 5084 struct igb_buffer *buffer_info;
4803 struct sk_buff *skb; 5085 struct sk_buff *skb;
@@ -4807,19 +5089,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4807 i = rx_ring->next_to_use; 5089 i = rx_ring->next_to_use;
4808 buffer_info = &rx_ring->buffer_info[i]; 5090 buffer_info = &rx_ring->buffer_info[i];
4809 5091
4810 if (adapter->rx_ps_hdr_size) 5092 bufsz = rx_ring->rx_buffer_len;
4811 bufsz = adapter->rx_ps_hdr_size;
4812 else
4813 bufsz = adapter->rx_buffer_len;
4814 5093
4815 while (cleaned_count--) { 5094 while (cleaned_count--) {
4816 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5095 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4817 5096
4818 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 5097 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
4819 if (!buffer_info->page) { 5098 if (!buffer_info->page) {
4820 buffer_info->page = alloc_page(GFP_ATOMIC); 5099 buffer_info->page = netdev_alloc_page(netdev);
4821 if (!buffer_info->page) { 5100 if (!buffer_info->page) {
4822 adapter->alloc_rx_buff_failed++; 5101 rx_ring->rx_stats.alloc_failed++;
4823 goto no_buffers; 5102 goto no_buffers;
4824 } 5103 }
4825 buffer_info->page_offset = 0; 5104 buffer_info->page_offset = 0;
@@ -4827,39 +5106,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4827 buffer_info->page_offset ^= PAGE_SIZE / 2; 5106 buffer_info->page_offset ^= PAGE_SIZE / 2;
4828 } 5107 }
4829 buffer_info->page_dma = 5108 buffer_info->page_dma =
4830 pci_map_page(pdev, buffer_info->page, 5109 pci_map_page(rx_ring->pdev, buffer_info->page,
4831 buffer_info->page_offset, 5110 buffer_info->page_offset,
4832 PAGE_SIZE / 2, 5111 PAGE_SIZE / 2,
4833 PCI_DMA_FROMDEVICE); 5112 PCI_DMA_FROMDEVICE);
5113 if (pci_dma_mapping_error(rx_ring->pdev,
5114 buffer_info->page_dma)) {
5115 buffer_info->page_dma = 0;
5116 rx_ring->rx_stats.alloc_failed++;
5117 goto no_buffers;
5118 }
4834 } 5119 }
4835 5120
4836 if (!buffer_info->skb) { 5121 skb = buffer_info->skb;
4837 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 5122 if (!skb) {
5123 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4838 if (!skb) { 5124 if (!skb) {
4839 adapter->alloc_rx_buff_failed++; 5125 rx_ring->rx_stats.alloc_failed++;
4840 goto no_buffers; 5126 goto no_buffers;
4841 } 5127 }
4842 5128
4843 /* Make buffer alignment 2 beyond a 16 byte boundary
4844 * this will result in a 16 byte aligned IP header after
4845 * the 14 byte MAC header is removed
4846 */
4847 skb_reserve(skb, NET_IP_ALIGN);
4848
4849 buffer_info->skb = skb; 5129 buffer_info->skb = skb;
4850 buffer_info->dma = pci_map_single(pdev, skb->data, 5130 }
5131 if (!buffer_info->dma) {
5132 buffer_info->dma = pci_map_single(rx_ring->pdev,
5133 skb->data,
4851 bufsz, 5134 bufsz,
4852 PCI_DMA_FROMDEVICE); 5135 PCI_DMA_FROMDEVICE);
5136 if (pci_dma_mapping_error(rx_ring->pdev,
5137 buffer_info->dma)) {
5138 buffer_info->dma = 0;
5139 rx_ring->rx_stats.alloc_failed++;
5140 goto no_buffers;
5141 }
4853 } 5142 }
4854 /* Refresh the desc even if buffer_addrs didn't change because 5143 /* Refresh the desc even if buffer_addrs didn't change because
4855 * each write-back erases this info. */ 5144 * each write-back erases this info. */
4856 if (adapter->rx_ps_hdr_size) { 5145 if (bufsz < IGB_RXBUFFER_1024) {
4857 rx_desc->read.pkt_addr = 5146 rx_desc->read.pkt_addr =
4858 cpu_to_le64(buffer_info->page_dma); 5147 cpu_to_le64(buffer_info->page_dma);
4859 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5148 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4860 } else { 5149 } else {
4861 rx_desc->read.pkt_addr = 5150 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
4862 cpu_to_le64(buffer_info->dma);
4863 rx_desc->read.hdr_addr = 0; 5151 rx_desc->read.hdr_addr = 0;
4864 } 5152 }
4865 5153
@@ -4882,7 +5170,7 @@ no_buffers:
4882 * applicable for weak-ordered memory model archs, 5170 * applicable for weak-ordered memory model archs,
4883 * such as IA-64). */ 5171 * such as IA-64). */
4884 wmb(); 5172 wmb();
4885 writel(i, adapter->hw.hw_addr + rx_ring->tail); 5173 writel(i, rx_ring->tail);
4886 } 5174 }
4887} 5175}
4888 5176
@@ -4941,13 +5229,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4941 struct igb_adapter *adapter = netdev_priv(netdev); 5229 struct igb_adapter *adapter = netdev_priv(netdev);
4942 struct e1000_hw *hw = &adapter->hw; 5230 struct e1000_hw *hw = &adapter->hw;
4943 struct hwtstamp_config config; 5231 struct hwtstamp_config config;
4944 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; 5232 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
4945 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; 5233 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
4946 u32 tsync_rx_ctl_type = 0;
4947 u32 tsync_rx_cfg = 0; 5234 u32 tsync_rx_cfg = 0;
4948 int is_l4 = 0; 5235 bool is_l4 = false;
4949 int is_l2 = 0; 5236 bool is_l2 = false;
4950 short port = 319; /* PTP */
4951 u32 regval; 5237 u32 regval;
4952 5238
4953 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5239 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -4959,10 +5245,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4959 5245
4960 switch (config.tx_type) { 5246 switch (config.tx_type) {
4961 case HWTSTAMP_TX_OFF: 5247 case HWTSTAMP_TX_OFF:
4962 tsync_tx_ctl_bit = 0; 5248 tsync_tx_ctl = 0;
4963 break;
4964 case HWTSTAMP_TX_ON: 5249 case HWTSTAMP_TX_ON:
4965 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4966 break; 5250 break;
4967 default: 5251 default:
4968 return -ERANGE; 5252 return -ERANGE;
@@ -4970,7 +5254,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4970 5254
4971 switch (config.rx_filter) { 5255 switch (config.rx_filter) {
4972 case HWTSTAMP_FILTER_NONE: 5256 case HWTSTAMP_FILTER_NONE:
4973 tsync_rx_ctl_bit = 0; 5257 tsync_rx_ctl = 0;
4974 break; 5258 break;
4975 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 5259 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4976 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 5260 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -4981,86 +5265,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4981 * possible to time stamp both Sync and Delay_Req messages 5265 * possible to time stamp both Sync and Delay_Req messages
4982 * => fall back to time stamping all packets 5266 * => fall back to time stamping all packets
4983 */ 5267 */
4984 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; 5268 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
4985 config.rx_filter = HWTSTAMP_FILTER_ALL; 5269 config.rx_filter = HWTSTAMP_FILTER_ALL;
4986 break; 5270 break;
4987 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 5271 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4988 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5272 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4989 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; 5273 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4990 is_l4 = 1; 5274 is_l4 = true;
4991 break; 5275 break;
4992 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 5276 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4993 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5277 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4994 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; 5278 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4995 is_l4 = 1; 5279 is_l4 = true;
4996 break; 5280 break;
4997 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 5281 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4998 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 5282 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4999 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5283 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5000 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; 5284 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5001 is_l2 = 1; 5285 is_l2 = true;
5002 is_l4 = 1; 5286 is_l4 = true;
5003 config.rx_filter = HWTSTAMP_FILTER_SOME; 5287 config.rx_filter = HWTSTAMP_FILTER_SOME;
5004 break; 5288 break;
5005 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 5289 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5006 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 5290 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5007 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5291 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5008 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; 5292 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5009 is_l2 = 1; 5293 is_l2 = true;
5010 is_l4 = 1; 5294 is_l4 = true;
5011 config.rx_filter = HWTSTAMP_FILTER_SOME; 5295 config.rx_filter = HWTSTAMP_FILTER_SOME;
5012 break; 5296 break;
5013 case HWTSTAMP_FILTER_PTP_V2_EVENT: 5297 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5014 case HWTSTAMP_FILTER_PTP_V2_SYNC: 5298 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5015 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 5299 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5016 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; 5300 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5017 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 5301 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5018 is_l2 = 1; 5302 is_l2 = true;
5019 break; 5303 break;
5020 default: 5304 default:
5021 return -ERANGE; 5305 return -ERANGE;
5022 } 5306 }
5023 5307
5308 if (hw->mac.type == e1000_82575) {
5309 if (tsync_rx_ctl | tsync_tx_ctl)
5310 return -EINVAL;
5311 return 0;
5312 }
5313
5024 /* enable/disable TX */ 5314 /* enable/disable TX */
5025 regval = rd32(E1000_TSYNCTXCTL); 5315 regval = rd32(E1000_TSYNCTXCTL);
5026 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; 5316 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5317 regval |= tsync_tx_ctl;
5027 wr32(E1000_TSYNCTXCTL, regval); 5318 wr32(E1000_TSYNCTXCTL, regval);
5028 5319
5029 /* enable/disable RX, define which PTP packets are time stamped */ 5320 /* enable/disable RX */
5030 regval = rd32(E1000_TSYNCRXCTL); 5321 regval = rd32(E1000_TSYNCRXCTL);
5031 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; 5322 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5032 regval = (regval & ~0xE) | tsync_rx_ctl_type; 5323 regval |= tsync_rx_ctl;
5033 wr32(E1000_TSYNCRXCTL, regval); 5324 wr32(E1000_TSYNCRXCTL, regval);
5034 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5035 5325
5036 /* 5326 /* define which PTP packets are time stamped */
5037 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 5327 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5038 * (Ethertype to filter on)
5039 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5040 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5041 */
5042 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5043
5044 /* L4 Queue Filter[0]: only filter by source and destination port */
5045 wr32(E1000_SPQF0, htons(port));
5046 wr32(E1000_IMIREXT(0), is_l4 ?
5047 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5048 wr32(E1000_IMIR(0), is_l4 ?
5049 (htons(port)
5050 | (0<<16) /* immediate interrupt disabled */
5051 | 0 /* (1<<17) bit cleared: do not bypass
5052 destination port check */)
5053 : 0);
5054 wr32(E1000_FTQF0, is_l4 ?
5055 (0x11 /* UDP */
5056 | (1<<15) /* VF not compared */
5057 | (1<<27) /* Enable Timestamping */
5058 | (7<<28) /* only source port filter enabled,
5059 source/target address and protocol
5060 masked */)
5061 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5062 enabled */));
5063 5328
5329 /* define ethertype filter for timestamped packets */
5330 if (is_l2)
5331 wr32(E1000_ETQF(3),
5332 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5333 E1000_ETQF_1588 | /* enable timestamping */
5334 ETH_P_1588)); /* 1588 eth protocol type */
5335 else
5336 wr32(E1000_ETQF(3), 0);
5337
5338#define PTP_PORT 319
5339 /* L4 Queue Filter[3]: filter by destination port and protocol */
5340 if (is_l4) {
5341 u32 ftqf = (IPPROTO_UDP /* UDP */
5342 | E1000_FTQF_VF_BP /* VF not compared */
5343 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5344 | E1000_FTQF_MASK); /* mask all inputs */
5345 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5346
5347 wr32(E1000_IMIR(3), htons(PTP_PORT));
5348 wr32(E1000_IMIREXT(3),
5349 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5350 if (hw->mac.type == e1000_82576) {
5351 /* enable source port check */
5352 wr32(E1000_SPQF(3), htons(PTP_PORT));
5353 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5354 }
5355 wr32(E1000_FTQF(3), ftqf);
5356 } else {
5357 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5358 }
5064 wrfl(); 5359 wrfl();
5065 5360
5066 adapter->hwtstamp_config = config; 5361 adapter->hwtstamp_config = config;
@@ -5137,21 +5432,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
5137 ctrl |= E1000_CTRL_VME; 5432 ctrl |= E1000_CTRL_VME;
5138 wr32(E1000_CTRL, ctrl); 5433 wr32(E1000_CTRL, ctrl);
5139 5434
5140 /* enable VLAN receive filtering */ 5435 /* Disable CFI check */
5141 rctl = rd32(E1000_RCTL); 5436 rctl = rd32(E1000_RCTL);
5142 rctl &= ~E1000_RCTL_CFIEN; 5437 rctl &= ~E1000_RCTL_CFIEN;
5143 wr32(E1000_RCTL, rctl); 5438 wr32(E1000_RCTL, rctl);
5144 igb_update_mng_vlan(adapter);
5145 } else { 5439 } else {
5146 /* disable VLAN tag insert/strip */ 5440 /* disable VLAN tag insert/strip */
5147 ctrl = rd32(E1000_CTRL); 5441 ctrl = rd32(E1000_CTRL);
5148 ctrl &= ~E1000_CTRL_VME; 5442 ctrl &= ~E1000_CTRL_VME;
5149 wr32(E1000_CTRL, ctrl); 5443 wr32(E1000_CTRL, ctrl);
5150
5151 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5152 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5153 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5154 }
5155 } 5444 }
5156 5445
5157 igb_rlpml_set(adapter); 5446 igb_rlpml_set(adapter);
@@ -5166,16 +5455,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5166 struct e1000_hw *hw = &adapter->hw; 5455 struct e1000_hw *hw = &adapter->hw;
5167 int pf_id = adapter->vfs_allocated_count; 5456 int pf_id = adapter->vfs_allocated_count;
5168 5457
5169 if ((hw->mng_cookie.status & 5458 /* attempt to add filter to vlvf array */
5170 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5459 igb_vlvf_set(adapter, vid, true, pf_id);
5171 (vid == adapter->mng_vlan_id))
5172 return;
5173
5174 /* add vid to vlvf if sr-iov is enabled,
5175 * if that fails add directly to filter table */
5176 if (igb_vlvf_set(adapter, vid, true, pf_id))
5177 igb_vfta_set(hw, vid, true);
5178 5460
5461 /* add the filter since PF can receive vlans w/o entry in vlvf */
5462 igb_vfta_set(hw, vid, true);
5179} 5463}
5180 5464
5181static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 5465static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5183,6 +5467,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5183 struct igb_adapter *adapter = netdev_priv(netdev); 5467 struct igb_adapter *adapter = netdev_priv(netdev);
5184 struct e1000_hw *hw = &adapter->hw; 5468 struct e1000_hw *hw = &adapter->hw;
5185 int pf_id = adapter->vfs_allocated_count; 5469 int pf_id = adapter->vfs_allocated_count;
5470 s32 err;
5186 5471
5187 igb_irq_disable(adapter); 5472 igb_irq_disable(adapter);
5188 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5473 vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5190,17 +5475,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5190 if (!test_bit(__IGB_DOWN, &adapter->state)) 5475 if (!test_bit(__IGB_DOWN, &adapter->state))
5191 igb_irq_enable(adapter); 5476 igb_irq_enable(adapter);
5192 5477
5193 if ((adapter->hw.mng_cookie.status & 5478 /* remove vlan from VLVF table array */
5194 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5479 err = igb_vlvf_set(adapter, vid, false, pf_id);
5195 (vid == adapter->mng_vlan_id)) {
5196 /* release control to f/w */
5197 igb_release_hw_control(adapter);
5198 return;
5199 }
5200 5480
5201 /* remove vid from vlvf if sr-iov is enabled, 5481 /* if vid was not present in VLVF just remove it from table */
5202 * if not in vlvf remove from vfta */ 5482 if (err)
5203 if (igb_vlvf_set(adapter, vid, false, pf_id))
5204 igb_vfta_set(hw, vid, false); 5483 igb_vfta_set(hw, vid, false);
5205} 5484}
5206 5485
@@ -5220,6 +5499,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
5220 5499
5221int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 5500int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5222{ 5501{
5502 struct pci_dev *pdev = adapter->pdev;
5223 struct e1000_mac_info *mac = &adapter->hw.mac; 5503 struct e1000_mac_info *mac = &adapter->hw.mac;
5224 5504
5225 mac->autoneg = 0; 5505 mac->autoneg = 0;
@@ -5243,8 +5523,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5243 break; 5523 break;
5244 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5524 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5245 default: 5525 default:
5246 dev_err(&adapter->pdev->dev, 5526 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5247 "Unsupported Speed/Duplex configuration\n");
5248 return -EINVAL; 5527 return -EINVAL;
5249 } 5528 }
5250 return 0; 5529 return 0;
@@ -5266,9 +5545,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5266 if (netif_running(netdev)) 5545 if (netif_running(netdev))
5267 igb_close(netdev); 5546 igb_close(netdev);
5268 5547
5269 igb_reset_interrupt_capability(adapter); 5548 igb_clear_interrupt_scheme(adapter);
5270
5271 igb_free_queues(adapter);
5272 5549
5273#ifdef CONFIG_PM 5550#ifdef CONFIG_PM
5274 retval = pci_save_state(pdev); 5551 retval = pci_save_state(pdev);
@@ -5300,7 +5577,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5300 wr32(E1000_CTRL, ctrl); 5577 wr32(E1000_CTRL, ctrl);
5301 5578
5302 /* Allow time for pending master requests to run */ 5579 /* Allow time for pending master requests to run */
5303 igb_disable_pcie_master(&adapter->hw); 5580 igb_disable_pcie_master(hw);
5304 5581
5305 wr32(E1000_WUC, E1000_WUC_PME_EN); 5582 wr32(E1000_WUC, E1000_WUC_PME_EN);
5306 wr32(E1000_WUFC, wufc); 5583 wr32(E1000_WUFC, wufc);
@@ -5363,9 +5640,7 @@ static int igb_resume(struct pci_dev *pdev)
5363 pci_enable_wake(pdev, PCI_D3hot, 0); 5640 pci_enable_wake(pdev, PCI_D3hot, 0);
5364 pci_enable_wake(pdev, PCI_D3cold, 0); 5641 pci_enable_wake(pdev, PCI_D3cold, 0);
5365 5642
5366 igb_set_interrupt_capability(adapter); 5643 if (igb_init_interrupt_scheme(adapter)) {
5367
5368 if (igb_alloc_queues(adapter)) {
5369 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5644 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5370 return -ENOMEM; 5645 return -ENOMEM;
5371 } 5646 }
@@ -5417,22 +5692,16 @@ static void igb_netpoll(struct net_device *netdev)
5417 int i; 5692 int i;
5418 5693
5419 if (!adapter->msix_entries) { 5694 if (!adapter->msix_entries) {
5695 struct igb_q_vector *q_vector = adapter->q_vector[0];
5420 igb_irq_disable(adapter); 5696 igb_irq_disable(adapter);
5421 napi_schedule(&adapter->rx_ring[0].napi); 5697 napi_schedule(&q_vector->napi);
5422 return; 5698 return;
5423 } 5699 }
5424 5700
5425 for (i = 0; i < adapter->num_tx_queues; i++) { 5701 for (i = 0; i < adapter->num_q_vectors; i++) {
5426 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 5702 struct igb_q_vector *q_vector = adapter->q_vector[i];
5427 wr32(E1000_EIMC, tx_ring->eims_value); 5703 wr32(E1000_EIMC, q_vector->eims_value);
5428 igb_clean_tx_irq(tx_ring); 5704 napi_schedule(&q_vector->napi);
5429 wr32(E1000_EIMS, tx_ring->eims_value);
5430 }
5431
5432 for (i = 0; i < adapter->num_rx_queues; i++) {
5433 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5434 wr32(E1000_EIMC, rx_ring->eims_value);
5435 napi_schedule(&rx_ring->napi);
5436 } 5705 }
5437} 5706}
5438#endif /* CONFIG_NET_POLL_CONTROLLER */ 5707#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5532,6 +5801,33 @@ static void igb_io_resume(struct pci_dev *pdev)
5532 igb_get_hw_control(adapter); 5801 igb_get_hw_control(adapter);
5533} 5802}
5534 5803
5804static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5805 u8 qsel)
5806{
5807 u32 rar_low, rar_high;
5808 struct e1000_hw *hw = &adapter->hw;
5809
5810 /* HW expects these in little endian so we reverse the byte order
5811 * from network order (big endian) to little endian
5812 */
5813 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5814 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5815 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5816
5817 /* Indicate to hardware the Address is Valid. */
5818 rar_high |= E1000_RAH_AV;
5819
5820 if (hw->mac.type == e1000_82575)
5821 rar_high |= E1000_RAH_POOL_1 * qsel;
5822 else
5823 rar_high |= E1000_RAH_POOL_1 << qsel;
5824
5825 wr32(E1000_RAL(index), rar_low);
5826 wrfl();
5827 wr32(E1000_RAH(index), rar_high);
5828 wrfl();
5829}
5830
5535static int igb_set_vf_mac(struct igb_adapter *adapter, 5831static int igb_set_vf_mac(struct igb_adapter *adapter,
5536 int vf, unsigned char *mac_addr) 5832 int vf, unsigned char *mac_addr)
5537{ 5833{
@@ -5542,8 +5838,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5542 5838
5543 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 5839 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5544 5840
5545 igb_rar_set(hw, mac_addr, rar_entry); 5841 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5546 igb_set_rah_pool(hw, vf, rar_entry);
5547 5842
5548 return 0; 5843 return 0;
5549} 5844}
@@ -5551,19 +5846,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5551static void igb_vmm_control(struct igb_adapter *adapter) 5846static void igb_vmm_control(struct igb_adapter *adapter)
5552{ 5847{
5553 struct e1000_hw *hw = &adapter->hw; 5848 struct e1000_hw *hw = &adapter->hw;
5554 u32 reg_data; 5849 u32 reg;
5555 5850
5556 if (!adapter->vfs_allocated_count) 5851 /* replication is not supported for 82575 */
5852 if (hw->mac.type == e1000_82575)
5557 return; 5853 return;
5558 5854
5559 /* VF's need PF reset indication before they 5855 /* enable replication vlan tag stripping */
5560 * can send/receive mail */ 5856 reg = rd32(E1000_RPLOLR);
5561 reg_data = rd32(E1000_CTRL_EXT); 5857 reg |= E1000_RPLOLR_STRVLAN;
5562 reg_data |= E1000_CTRL_EXT_PFRSTD; 5858 wr32(E1000_RPLOLR, reg);
5563 wr32(E1000_CTRL_EXT, reg_data);
5564 5859
5565 igb_vmdq_set_loopback_pf(hw, true); 5860 /* notify HW that the MAC is adding vlan tags */
5566 igb_vmdq_set_replication_pf(hw, true); 5861 reg = rd32(E1000_DTXCTL);
5862 reg |= E1000_DTXCTL_VLAN_ADDED;
5863 wr32(E1000_DTXCTL, reg);
5864
5865 if (adapter->vfs_allocated_count) {
5866 igb_vmdq_set_loopback_pf(hw, true);
5867 igb_vmdq_set_replication_pf(hw, true);
5868 } else {
5869 igb_vmdq_set_loopback_pf(hw, false);
5870 igb_vmdq_set_replication_pf(hw, false);
5871 }
5567} 5872}
5568 5873
5569/* igb_main.c */ 5874/* igb_main.c */