aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c78
-rw-r--r--drivers/net/e1000e/lib.c54
-rw-r--r--drivers/net/e1000e/netdev.c87
-rw-r--r--drivers/net/e1000e/phy.c85
8 files changed, 161 insertions, 150 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b979464091bb..02d67d047d96 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
237 /* Set if manageability features are enabled. */ 237 /* Set if manageability features are enabled. */
238 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 238 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
239 ? true : false; 239 ? true : false;
240 /* Adaptive IFS supported */
241 mac->adaptive_ifs = true;
240 242
241 /* check for link */ 243 /* check for link */
242 switch (hw->phy.media_type) { 244 switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cebbd9079d53..d236efaf7478 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
421/* CRC Stripping defines */ 421/* CRC Stripping defines */
422#define FLAG2_CRC_STRIPPING (1 << 0) 422#define FLAG2_CRC_STRIPPING (1 << 0)
423#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
424#define FLAG2_IS_DISCARDING (1 << 2)
424 425
425#define E1000_RX_DESC_PS(R, i) \ 426#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -582,7 +583,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
582extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 583extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
583extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, 584extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
584 u16 data); 585 u16 data);
585extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
586extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 586extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
587extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 587extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
588extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); 588extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23da891..e2aa3b788564 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
224 /* Set if manageability features are enabled. */ 224 /* Set if manageability features are enabled. */
225 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 225 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
226 ? true : false; 226 ? true : false;
227 /* Adaptive IFS not supported */
228 mac->adaptive_ifs = false;
227 229
228 /* check for link */ 230 /* check for link */
229 switch (hw->phy.media_type) { 231 switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf44a6f3..eccf29b75c41 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -818,6 +818,7 @@ struct e1000_mac_info {
818 818
819 u8 forced_speed_duplex; 819 u8 forced_speed_duplex;
820 820
821 bool adaptive_ifs;
821 bool arc_subsystem_valid; 822 bool arc_subsystem_valid;
822 bool autoneg; 823 bool autoneg;
823 bool autoneg_failed; 824 bool autoneg_failed;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246af064..8b6ecd127889 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
138#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ 138#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
139#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ 139#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
140 140
141/* KMRN Mode Control */
142#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
143#define HV_KMRN_MDIO_SLOW 0x0400
144
141/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 145/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
142/* Offset 04h HSFSTS */ 146/* Offset 04h HSFSTS */
143union ich8_hws_flash_status { 147union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
219static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 223static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
220static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 224static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
221static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 225static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
226static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
222 227
223static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 228static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
224{ 229{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
270 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
271 276
272 phy->id = e1000_phy_unknown; 277 phy->id = e1000_phy_unknown;
273 e1000e_get_phy_id(hw); 278 ret_val = e1000e_get_phy_id(hw);
279 if (ret_val)
280 goto out;
281 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
282 /*
283 * In case the PHY needs to be in mdio slow mode (eg. 82577),
284 * set slow mode and try to get the PHY id again.
285 */
286 ret_val = e1000_set_mdio_slow_mode_hv(hw);
287 if (ret_val)
288 goto out;
289 ret_val = e1000e_get_phy_id(hw);
290 if (ret_val)
291 goto out;
292 }
274 phy->type = e1000e_get_phy_type_from_id(phy->id); 293 phy->type = e1000e_get_phy_type_from_id(phy->id);
275 294
276 switch (phy->type) { 295 switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
292 break; 311 break;
293 } 312 }
294 313
314out:
295 return ret_val; 315 return ret_val;
296} 316}
297 317
@@ -454,6 +474,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
454 mac->rar_entry_count--; 474 mac->rar_entry_count--;
455 /* Set if manageability features are enabled. */ 475 /* Set if manageability features are enabled. */
456 mac->arc_subsystem_valid = true; 476 mac->arc_subsystem_valid = true;
477 /* Adaptive IFS supported */
478 mac->adaptive_ifs = true;
457 479
458 /* LED operations */ 480 /* LED operations */
459 switch (mac->type) { 481 switch (mac->type) {
@@ -1074,16 +1096,44 @@ out:
1074 1096
1075 1097
1076/** 1098/**
1099 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1100 * @hw: pointer to the HW structure
1101 **/
1102static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1103{
1104 s32 ret_val;
1105 u16 data;
1106
1107 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1108 if (ret_val)
1109 return ret_val;
1110
1111 data |= HV_KMRN_MDIO_SLOW;
1112
1113 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1114
1115 return ret_val;
1116}
1117
1118/**
1077 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1119 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1078 * done after every PHY reset. 1120 * done after every PHY reset.
1079 **/ 1121 **/
1080static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 1122static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1081{ 1123{
1082 s32 ret_val = 0; 1124 s32 ret_val = 0;
1125 u16 phy_data;
1083 1126
1084 if (hw->mac.type != e1000_pchlan) 1127 if (hw->mac.type != e1000_pchlan)
1085 return ret_val; 1128 return ret_val;
1086 1129
1130 /* Set MDIO slow mode before any other MDIO access */
1131 if (hw->phy.type == e1000_phy_82577) {
1132 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1133 if (ret_val)
1134 goto out;
1135 }
1136
1087 if (((hw->phy.type == e1000_phy_82577) && 1137 if (((hw->phy.type == e1000_phy_82577) &&
1088 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 1138 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1089 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 1139 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1116,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1116 1166
1117 hw->phy.addr = 1; 1167 hw->phy.addr = 1;
1118 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1168 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1169 hw->phy.ops.release(hw);
1119 if (ret_val) 1170 if (ret_val)
1120 goto out; 1171 goto out;
1121 hw->phy.ops.release(hw);
1122 1172
1123 /* 1173 /*
1124 * Configure the K1 Si workaround during phy reset assuming there is 1174 * Configure the K1 Si workaround during phy reset assuming there is
1125 * link so that it disables K1 if link is in 1Gbps. 1175 * link so that it disables K1 if link is in 1Gbps.
1126 */ 1176 */
1127 ret_val = e1000_k1_gig_workaround_hv(hw, true); 1177 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1178 if (ret_val)
1179 goto out;
1128 1180
1181 /* Workaround for link disconnects on a busy hub in half duplex */
1182 ret_val = hw->phy.ops.acquire(hw);
1183 if (ret_val)
1184 goto out;
1185 ret_val = hw->phy.ops.read_reg_locked(hw,
1186 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1187 &phy_data);
1188 if (ret_val)
1189 goto release;
1190 ret_val = hw->phy.ops.write_reg_locked(hw,
1191 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1192 phy_data & 0x00FF);
1193release:
1194 hw->phy.ops.release(hw);
1129out: 1195out:
1130 return ret_val; 1196 return ret_val;
1131} 1197}
@@ -1182,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1182 /* Allow time for h/w to get to a quiescent state after reset */ 1248 /* Allow time for h/w to get to a quiescent state after reset */
1183 mdelay(10); 1249 mdelay(10);
1184 1250
1251 /* Perform any necessary post-reset workarounds */
1185 if (hw->mac.type == e1000_pchlan) { 1252 if (hw->mac.type == e1000_pchlan) {
1186 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 1253 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1187 if (ret_val) 1254 if (ret_val)
@@ -2482,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2482 if (!ret_val) 2549 if (!ret_val)
2483 e1000_release_swflag_ich8lan(hw); 2550 e1000_release_swflag_ich8lan(hw);
2484 2551
2552 /* Perform any necessary post-reset workarounds */
2553 if (hw->mac.type == e1000_pchlan)
2554 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2555
2485 if (ctrl & E1000_CTRL_PHY_RST) 2556 if (ctrl & E1000_CTRL_PHY_RST)
2486 ret_val = hw->phy.ops.get_cfg_done(hw); 2557 ret_val = hw->phy.ops.get_cfg_done(hw);
2487 2558
@@ -2526,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2526 kab |= E1000_KABGTXD_BGSQLBIAS; 2597 kab |= E1000_KABGTXD_BGSQLBIAS;
2527 ew32(KABGTXD, kab); 2598 ew32(KABGTXD, kab);
2528 2599
2529 if (hw->mac.type == e1000_pchlan)
2530 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2531
2532out: 2600out:
2533 return ret_val; 2601 return ret_val;
2534} 2602}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c17548c1e..2fa9b36a2c5a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -125,6 +125,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
125void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 125void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
126{ 126{
127 u32 i; 127 u32 i;
128 u8 mac_addr[ETH_ALEN] = {0};
128 129
129 /* Setup the receive address */ 130 /* Setup the receive address */
130 e_dbg("Programming MAC Address into RAR[0]\n"); 131 e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +134,8 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
133 134
134 /* Zero out the other (rar_entry_count - 1) receive addresses */ 135 /* Zero out the other (rar_entry_count - 1) receive addresses */
135 e_dbg("Clearing RAR[1-%u]\n", rar_count-1); 136 e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
136 for (i = 1; i < rar_count; i++) { 137 for (i = 1; i < rar_count; i++)
137 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); 138 e1000e_rar_set(hw, mac_addr, i);
138 e1e_flush();
139 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
140 e1e_flush();
141 }
142} 139}
143 140
144/** 141/**
@@ -164,10 +161,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
164 161
165 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 162 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
166 163
167 rar_high |= E1000_RAH_AV; 164 /* If MAC address zero, no need to set the AV bit */
165 if (rar_low || rar_high)
166 rar_high |= E1000_RAH_AV;
168 167
169 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); 168 /*
170 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); 169 * Some bridges will combine consecutive 32-bit writes into
170 * a single burst write, which will malfunction on some parts.
171 * The flushes avoid this.
172 */
173 ew32(RAL(index), rar_low);
174 e1e_flush();
175 ew32(RAH(index), rar_high);
176 e1e_flush();
171} 177}
172 178
173/** 179/**
@@ -1609,6 +1615,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
1609{ 1615{
1610 struct e1000_mac_info *mac = &hw->mac; 1616 struct e1000_mac_info *mac = &hw->mac;
1611 1617
1618 if (!mac->adaptive_ifs) {
1619 e_dbg("Not in Adaptive IFS mode!\n");
1620 goto out;
1621 }
1622
1612 mac->current_ifs_val = 0; 1623 mac->current_ifs_val = 0;
1613 mac->ifs_min_val = IFS_MIN; 1624 mac->ifs_min_val = IFS_MIN;
1614 mac->ifs_max_val = IFS_MAX; 1625 mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1628,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
1617 1628
1618 mac->in_ifs_mode = false; 1629 mac->in_ifs_mode = false;
1619 ew32(AIT, 0); 1630 ew32(AIT, 0);
1631out:
1632 return;
1620} 1633}
1621 1634
1622/** 1635/**
@@ -1630,6 +1643,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1630{ 1643{
1631 struct e1000_mac_info *mac = &hw->mac; 1644 struct e1000_mac_info *mac = &hw->mac;
1632 1645
1646 if (!mac->adaptive_ifs) {
1647 e_dbg("Not in Adaptive IFS mode!\n");
1648 goto out;
1649 }
1650
1633 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { 1651 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1634 if (mac->tx_packet_delta > MIN_NUM_XMITS) { 1652 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1635 mac->in_ifs_mode = true; 1653 mac->in_ifs_mode = true;
@@ -1650,6 +1668,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1650 ew32(AIT, 0); 1668 ew32(AIT, 0);
1651 } 1669 }
1652 } 1670 }
1671out:
1672 return;
1653} 1673}
1654 1674
1655/** 1675/**
@@ -2287,10 +2307,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2287 s32 ret_val, hdr_csum, csum; 2307 s32 ret_val, hdr_csum, csum;
2288 u8 i, len; 2308 u8 i, len;
2289 2309
2310 hw->mac.tx_pkt_filtering = true;
2311
2290 /* No manageability, no filtering */ 2312 /* No manageability, no filtering */
2291 if (!e1000e_check_mng_mode(hw)) { 2313 if (!e1000e_check_mng_mode(hw)) {
2292 hw->mac.tx_pkt_filtering = false; 2314 hw->mac.tx_pkt_filtering = false;
2293 return 0; 2315 goto out;
2294 } 2316 }
2295 2317
2296 /* 2318 /*
@@ -2298,9 +2320,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2298 * reason, disable filtering. 2320 * reason, disable filtering.
2299 */ 2321 */
2300 ret_val = e1000_mng_enable_host_if(hw); 2322 ret_val = e1000_mng_enable_host_if(hw);
2301 if (ret_val != 0) { 2323 if (ret_val) {
2302 hw->mac.tx_pkt_filtering = false; 2324 hw->mac.tx_pkt_filtering = false;
2303 return ret_val; 2325 goto out;
2304 } 2326 }
2305 2327
2306 /* Read in the header. Length and offset are in dwords. */ 2328 /* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2341,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2319 */ 2341 */
2320 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { 2342 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2321 hw->mac.tx_pkt_filtering = true; 2343 hw->mac.tx_pkt_filtering = true;
2322 return 1; 2344 goto out;
2323 } 2345 }
2324 2346
2325 /* Cookie area is valid, make the final check for filtering. */ 2347 /* Cookie area is valid, make the final check for filtering. */
2326 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { 2348 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2327 hw->mac.tx_pkt_filtering = false; 2349 hw->mac.tx_pkt_filtering = false;
2328 return 0; 2350 goto out;
2329 } 2351 }
2330 2352
2331 hw->mac.tx_pkt_filtering = true; 2353out:
2332 return 1; 2354 return hw->mac.tx_pkt_filtering;
2333} 2355}
2334 2356
2335/** 2357/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697ce731..57f149b75fbe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
450 450
451 length = le16_to_cpu(rx_desc->length); 451 length = le16_to_cpu(rx_desc->length);
452 452
453 /* !EOP means multiple descriptors were used to store a single 453 /*
454 * packet, also make sure the frame isn't just CRC only */ 454 * !EOP means multiple descriptors were used to store a single
455 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 455 * packet, if that's the case we need to toss it. In fact, we
456 * need to toss every packet with the EOP bit clear and the
457 * next frame that _does_ have the EOP bit set, as it is by
458 * definition only a frame fragment
459 */
460 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
461 adapter->flags2 |= FLAG2_IS_DISCARDING;
462
463 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
456 /* All receives must fit into a single buffer */ 464 /* All receives must fit into a single buffer */
457 e_dbg("Receive packet consumed multiple buffers\n"); 465 e_dbg("Receive packet consumed multiple buffers\n");
458 /* recycle */ 466 /* recycle */
459 buffer_info->skb = skb; 467 buffer_info->skb = skb;
468 if (status & E1000_RXD_STAT_EOP)
469 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
460 goto next_desc; 470 goto next_desc;
461 } 471 }
462 472
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
745 PCI_DMA_FROMDEVICE); 755 PCI_DMA_FROMDEVICE);
746 buffer_info->dma = 0; 756 buffer_info->dma = 0;
747 757
748 if (!(staterr & E1000_RXD_STAT_EOP)) { 758 /* see !EOP comment in other rx routine */
759 if (!(staterr & E1000_RXD_STAT_EOP))
760 adapter->flags2 |= FLAG2_IS_DISCARDING;
761
762 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
749 e_dbg("Packet Split buffers didn't pick up the full " 763 e_dbg("Packet Split buffers didn't pick up the full "
750 "packet\n"); 764 "packet\n");
751 dev_kfree_skb_irq(skb); 765 dev_kfree_skb_irq(skb);
766 if (staterr & E1000_RXD_STAT_EOP)
767 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
752 goto next_desc; 768 goto next_desc;
753 } 769 }
754 770
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 1134
1119 rx_ring->next_to_clean = 0; 1135 rx_ring->next_to_clean = 0;
1120 rx_ring->next_to_use = 0; 1136 rx_ring->next_to_use = 0;
1137 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1121 1138
1122 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 writel(0, adapter->hw.hw_addr + rx_ring->head);
1123 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1140 writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2333 rctl &= ~E1000_RCTL_SZ_4096; 2350 rctl &= ~E1000_RCTL_SZ_4096;
2334 rctl |= E1000_RCTL_BSEX; 2351 rctl |= E1000_RCTL_BSEX;
2335 switch (adapter->rx_buffer_len) { 2352 switch (adapter->rx_buffer_len) {
2336 case 256:
2337 rctl |= E1000_RCTL_SZ_256;
2338 rctl &= ~E1000_RCTL_BSEX;
2339 break;
2340 case 512:
2341 rctl |= E1000_RCTL_SZ_512;
2342 rctl &= ~E1000_RCTL_BSEX;
2343 break;
2344 case 1024:
2345 rctl |= E1000_RCTL_SZ_1024;
2346 rctl &= ~E1000_RCTL_BSEX;
2347 break;
2348 case 2048: 2353 case 2048:
2349 default: 2354 default:
2350 rctl |= E1000_RCTL_SZ_2048; 2355 rctl |= E1000_RCTL_SZ_2048;
@@ -3315,24 +3320,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3315 if ((hw->phy.type == e1000_phy_82578) || 3320 if ((hw->phy.type == e1000_phy_82578) ||
3316 (hw->phy.type == e1000_phy_82577)) { 3321 (hw->phy.type == e1000_phy_82577)) {
3317 e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3322 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3318 e1e_rphy(hw, HV_SCC_LOWER, &phy_data); 3323 if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
3319 adapter->stats.scc += phy_data; 3324 adapter->stats.scc += phy_data;
3320 3325
3321 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3326 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3322 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); 3327 if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
3323 adapter->stats.ecol += phy_data; 3328 adapter->stats.ecol += phy_data;
3324 3329
3325 e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3330 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3326 e1e_rphy(hw, HV_MCC_LOWER, &phy_data); 3331 if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
3327 adapter->stats.mcc += phy_data; 3332 adapter->stats.mcc += phy_data;
3328 3333
3329 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3334 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3330 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); 3335 if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
3331 adapter->stats.latecol += phy_data; 3336 adapter->stats.latecol += phy_data;
3332 3337
3333 e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3338 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3334 e1e_rphy(hw, HV_DC_LOWER, &phy_data); 3339 if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
3335 adapter->stats.dc += phy_data; 3340 adapter->stats.dc += phy_data;
3336 } else { 3341 } else {
3337 adapter->stats.scc += er32(SCC); 3342 adapter->stats.scc += er32(SCC);
3338 adapter->stats.ecol += er32(ECOL); 3343 adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3365,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3360 if ((hw->phy.type == e1000_phy_82578) || 3365 if ((hw->phy.type == e1000_phy_82578) ||
3361 (hw->phy.type == e1000_phy_82577)) { 3366 (hw->phy.type == e1000_phy_82577)) {
3362 e1e_rphy(hw, HV_COLC_UPPER, &phy_data); 3367 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3363 e1e_rphy(hw, HV_COLC_LOWER, &phy_data); 3368 if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
3364 hw->mac.collision_delta = phy_data; 3369 hw->mac.collision_delta = phy_data;
3365 } else { 3370 } else {
3366 hw->mac.collision_delta = er32(COLC); 3371 hw->mac.collision_delta = er32(COLC);
3367 } 3372 }
@@ -3372,8 +3377,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3372 if ((hw->phy.type == e1000_phy_82578) || 3377 if ((hw->phy.type == e1000_phy_82578) ||
3373 (hw->phy.type == e1000_phy_82577)) { 3378 (hw->phy.type == e1000_phy_82577)) {
3374 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); 3379 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3375 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); 3380 if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
3376 adapter->stats.tncrs += phy_data; 3381 adapter->stats.tncrs += phy_data;
3377 } else { 3382 } else {
3378 if ((hw->mac.type != e1000_82574) && 3383 if ((hw->mac.type != e1000_82574) &&
3379 (hw->mac.type != e1000_82583)) 3384 (hw->mac.type != e1000_82583))
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
3781 0, IPPROTO_TCP, 0); 3786 0, IPPROTO_TCP, 0);
3782 cmd_length = E1000_TXD_CMD_IP; 3787 cmd_length = E1000_TXD_CMD_IP;
3783 ipcse = skb_transport_offset(skb) - 1; 3788 ipcse = skb_transport_offset(skb) - 1;
3784 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3789 } else if (skb_is_gso_v6(skb)) {
3785 ipv6_hdr(skb)->payload_len = 0; 3790 ipv6_hdr(skb)->payload_len = 0;
3786 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3791 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3787 &ipv6_hdr(skb)->daddr, 3792 &ipv6_hdr(skb)->daddr,
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3962dma_error: 3967dma_error:
3963 dev_err(&pdev->dev, "TX DMA map failed\n"); 3968 dev_err(&pdev->dev, "TX DMA map failed\n");
3964 buffer_info->dma = 0; 3969 buffer_info->dma = 0;
3965 count--; 3970 if (count)
3966
3967 while (count >= 0) {
3968 count--; 3971 count--;
3969 i--; 3972
3970 if (i < 0) 3973 while (count--) {
3974 if (i==0)
3971 i += tx_ring->count; 3975 i += tx_ring->count;
3976 i--;
3972 buffer_info = &tx_ring->buffer_info[i]; 3977 buffer_info = &tx_ring->buffer_info[i];
3973 e1000_put_txbuf(adapter, buffer_info);; 3978 e1000_put_txbuf(adapter, buffer_info);;
3974 } 3979 }
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4317 * fragmented skbs 4322 * fragmented skbs
4318 */ 4323 */
4319 4324
4320 if (max_frame <= 256) 4325 if (max_frame <= 2048)
4321 adapter->rx_buffer_len = 256;
4322 else if (max_frame <= 512)
4323 adapter->rx_buffer_len = 512;
4324 else if (max_frame <= 1024)
4325 adapter->rx_buffer_len = 1024;
4326 else if (max_frame <= 2048)
4327 adapter->rx_buffer_len = 2048; 4326 adapter->rx_buffer_len = 2048;
4328 else 4327 else
4329 adapter->rx_buffer_len = 4096; 4328 adapter->rx_buffer_len = 4096;
@@ -4674,6 +4673,7 @@ static int e1000_resume(struct pci_dev *pdev)
4674 4673
4675 pci_set_power_state(pdev, PCI_D0); 4674 pci_set_power_state(pdev, PCI_D0);
4676 pci_restore_state(pdev); 4675 pci_restore_state(pdev);
4676 pci_save_state(pdev);
4677 e1000e_disable_l1aspm(pdev); 4677 e1000e_disable_l1aspm(pdev);
4678 4678
4679 err = pci_enable_device_mem(pdev); 4679 err = pci_enable_device_mem(pdev);
@@ -4825,6 +4825,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4825 } else { 4825 } else {
4826 pci_set_master(pdev); 4826 pci_set_master(pdev);
4827 pci_restore_state(pdev); 4827 pci_restore_state(pdev);
4828 pci_save_state(pdev);
4828 4829
4829 pci_enable_wake(pdev, PCI_D3hot, 0); 4830 pci_enable_wake(pdev, PCI_D3hot, 0);
4830 pci_enable_wake(pdev, PCI_D3cold, 0); 4831 pci_enable_wake(pdev, PCI_D3cold, 0);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0acfee7..7f3ceb9dad6a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK) 152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
153 goto out; 153 goto out;
154 154
155 /*
156 * If the PHY ID is still unknown, we may have an 82577
157 * without link. We will try again after setting Slow MDIC
158 * mode. No harm in trying again in this case since the PHY
159 * ID is unknown at this point anyway.
160 */
161 ret_val = phy->ops.acquire(hw);
162 if (ret_val)
163 goto out;
164 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
165 if (ret_val)
166 goto out;
167 phy->ops.release(hw);
168
169 retry_count++; 155 retry_count++;
170 } 156 }
171out: 157out:
172 /* Revert to MDIO fast mode, if applicable */
173 if (retry_count) {
174 ret_val = phy->ops.acquire(hw);
175 if (ret_val)
176 return ret_val;
177 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
178 phy->ops.release(hw);
179 }
180
181 return ret_val; 158 return ret_val;
182} 159}
183 160
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2791} 2768}
2792 2769
2793/** 2770/**
2794 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2795 * @hw: pointer to the HW structure
2796 * @slow: true for slow mode, false for normal mode
2797 *
2798 * Assumes semaphore already acquired.
2799 **/
2800s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2801{
2802 s32 ret_val = 0;
2803 u16 data = 0;
2804
2805 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2806 hw->phy.addr = 1;
2807 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2808 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2809 if (ret_val)
2810 goto out;
2811
2812 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2813 (0x2180 | (slow << 10)));
2814 if (ret_val)
2815 goto out;
2816
2817 /* dummy read when reverting to fast mode - throw away result */
2818 if (!slow)
2819 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2820
2821out:
2822 return ret_val;
2823}
2824
2825/**
2826 * __e1000_read_phy_reg_hv - Read HV PHY register 2771 * __e1000_read_phy_reg_hv - Read HV PHY register
2827 * @hw: pointer to the HW structure 2772 * @hw: pointer to the HW structure
2828 * @offset: register offset to be read 2773 * @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2839 s32 ret_val; 2784 s32 ret_val;
2840 u16 page = BM_PHY_REG_PAGE(offset); 2785 u16 page = BM_PHY_REG_PAGE(offset);
2841 u16 reg = BM_PHY_REG_NUM(offset); 2786 u16 reg = BM_PHY_REG_NUM(offset);
2842 bool in_slow_mode = false;
2843 2787
2844 if (!locked) { 2788 if (!locked) {
2845 ret_val = hw->phy.ops.acquire(hw); 2789 ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2847 return ret_val; 2791 return ret_val;
2848 } 2792 }
2849 2793
2850 /* Workaround failure in MDIO access while cable is disconnected */
2851 if ((hw->phy.type == e1000_phy_82577) &&
2852 !(er32(STATUS) & E1000_STATUS_LU)) {
2853 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2854 if (ret_val)
2855 goto out;
2856
2857 in_slow_mode = true;
2858 }
2859
2860 /* Page 800 works differently than the rest so it has its own func */ 2794 /* Page 800 works differently than the rest so it has its own func */
2861 if (page == BM_WUC_PAGE) { 2795 if (page == BM_WUC_PAGE) {
2862 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, 2796 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2893 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2827 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2894 data); 2828 data);
2895out: 2829out:
2896 /* Revert to MDIO fast mode, if applicable */
2897 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2898 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2899
2900 if (!locked) 2830 if (!locked)
2901 hw->phy.ops.release(hw); 2831 hw->phy.ops.release(hw);
2902 2832
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2948 s32 ret_val; 2878 s32 ret_val;
2949 u16 page = BM_PHY_REG_PAGE(offset); 2879 u16 page = BM_PHY_REG_PAGE(offset);
2950 u16 reg = BM_PHY_REG_NUM(offset); 2880 u16 reg = BM_PHY_REG_NUM(offset);
2951 bool in_slow_mode = false;
2952 2881
2953 if (!locked) { 2882 if (!locked) {
2954 ret_val = hw->phy.ops.acquire(hw); 2883 ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2956 return ret_val; 2885 return ret_val;
2957 } 2886 }
2958 2887
2959 /* Workaround failure in MDIO access while cable is disconnected */
2960 if ((hw->phy.type == e1000_phy_82577) &&
2961 !(er32(STATUS) & E1000_STATUS_LU)) {
2962 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2963 if (ret_val)
2964 goto out;
2965
2966 in_slow_mode = true;
2967 }
2968
2969 /* Page 800 works differently than the rest so it has its own func */ 2888 /* Page 800 works differently than the rest so it has its own func */
2970 if (page == BM_WUC_PAGE) { 2889 if (page == BM_WUC_PAGE) {
2971 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, 2890 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
3019 data); 2938 data);
3020 2939
3021out: 2940out:
3022 /* Revert to MDIO fast mode, if applicable */
3023 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
3024 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
3025
3026 if (!locked) 2941 if (!locked)
3027 hw->phy.ops.release(hw); 2942 hw->phy.ops.release(hw);
3028 2943