aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/e1000/e1000_ethtool.c307
-rw-r--r--drivers/net/e1000/e1000_hw.c18
-rw-r--r--drivers/net/e1000/e1000_hw.h20
-rw-r--r--drivers/net/e1000/e1000_main.c802
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c44
7 files changed, 684 insertions, 518 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1421941487c4..0c69918671ca 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1914,6 +1914,15 @@ config E1000_NAPI
1914 1914
1915 If in doubt, say N. 1915 If in doubt, say N.
1916 1916
1917config E1000_DISABLE_PACKET_SPLIT
1918 bool "Disable Packet Split for PCI express adapters"
1919 depends on E1000
1920 help
1921 Say Y here if you want to use the legacy receive path for PCI express
1922 hadware.
1923
1924 If in doubt, say N.
1925
1917source "drivers/net/ixp2000/Kconfig" 1926source "drivers/net/ixp2000/Kconfig"
1918 1927
1919config MYRI_SBUS 1928config MYRI_SBUS
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d252297e4db0..5cedc81786e3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -121,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
121 struct e1000_adapter *adapter = netdev_priv(netdev); 121 struct e1000_adapter *adapter = netdev_priv(netdev);
122 struct e1000_hw *hw = &adapter->hw; 122 struct e1000_hw *hw = &adapter->hw;
123 123
124 if(hw->media_type == e1000_media_type_copper) { 124 if (hw->media_type == e1000_media_type_copper) {
125 125
126 ecmd->supported = (SUPPORTED_10baseT_Half | 126 ecmd->supported = (SUPPORTED_10baseT_Half |
127 SUPPORTED_10baseT_Full | 127 SUPPORTED_10baseT_Full |
@@ -133,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
133 133
134 ecmd->advertising = ADVERTISED_TP; 134 ecmd->advertising = ADVERTISED_TP;
135 135
136 if(hw->autoneg == 1) { 136 if (hw->autoneg == 1) {
137 ecmd->advertising |= ADVERTISED_Autoneg; 137 ecmd->advertising |= ADVERTISED_Autoneg;
138 138
139 /* the e1000 autoneg seems to match ethtool nicely */ 139 /* the e1000 autoneg seems to match ethtool nicely */
@@ -144,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 ecmd->port = PORT_TP; 144 ecmd->port = PORT_TP;
145 ecmd->phy_address = hw->phy_addr; 145 ecmd->phy_address = hw->phy_addr;
146 146
147 if(hw->mac_type == e1000_82543) 147 if (hw->mac_type == e1000_82543)
148 ecmd->transceiver = XCVR_EXTERNAL; 148 ecmd->transceiver = XCVR_EXTERNAL;
149 else 149 else
150 ecmd->transceiver = XCVR_INTERNAL; 150 ecmd->transceiver = XCVR_INTERNAL;
@@ -160,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
160 160
161 ecmd->port = PORT_FIBRE; 161 ecmd->port = PORT_FIBRE;
162 162
163 if(hw->mac_type >= e1000_82545) 163 if (hw->mac_type >= e1000_82545)
164 ecmd->transceiver = XCVR_INTERNAL; 164 ecmd->transceiver = XCVR_INTERNAL;
165 else 165 else
166 ecmd->transceiver = XCVR_EXTERNAL; 166 ecmd->transceiver = XCVR_EXTERNAL;
167 } 167 }
168 168
169 if(netif_carrier_ok(adapter->netdev)) { 169 if (netif_carrier_ok(adapter->netdev)) {
170 170
171 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 171 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
172 &adapter->link_duplex); 172 &adapter->link_duplex);
@@ -175,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL 175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
176 * and HALF_DUPLEX != DUPLEX_HALF */ 176 * and HALF_DUPLEX != DUPLEX_HALF */
177 177
178 if(adapter->link_duplex == FULL_DUPLEX) 178 if (adapter->link_duplex == FULL_DUPLEX)
179 ecmd->duplex = DUPLEX_FULL; 179 ecmd->duplex = DUPLEX_FULL;
180 else 180 else
181 ecmd->duplex = DUPLEX_HALF; 181 ecmd->duplex = DUPLEX_HALF;
@@ -205,11 +205,11 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
205 205
206 if (ecmd->autoneg == AUTONEG_ENABLE) { 206 if (ecmd->autoneg == AUTONEG_ENABLE) {
207 hw->autoneg = 1; 207 hw->autoneg = 1;
208 if(hw->media_type == e1000_media_type_fiber) 208 if (hw->media_type == e1000_media_type_fiber)
209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
210 ADVERTISED_FIBRE | 210 ADVERTISED_FIBRE |
211 ADVERTISED_Autoneg; 211 ADVERTISED_Autoneg;
212 else 212 else
213 hw->autoneg_advertised = ADVERTISED_10baseT_Half | 213 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
214 ADVERTISED_10baseT_Full | 214 ADVERTISED_10baseT_Full |
215 ADVERTISED_100baseT_Half | 215 ADVERTISED_100baseT_Half |
@@ -219,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
219 ADVERTISED_TP; 219 ADVERTISED_TP;
220 ecmd->advertising = hw->autoneg_advertised; 220 ecmd->advertising = hw->autoneg_advertised;
221 } else 221 } else
222 if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 222 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
223 return -EINVAL; 223 return -EINVAL;
224 224
225 /* reset the link */ 225 /* reset the link */
226 226
227 if(netif_running(adapter->netdev)) { 227 if (netif_running(adapter->netdev)) {
228 e1000_down(adapter); 228 e1000_down(adapter);
229 e1000_reset(adapter); 229 e1000_reset(adapter);
230 e1000_up(adapter); 230 e1000_up(adapter);
@@ -241,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
241 struct e1000_adapter *adapter = netdev_priv(netdev); 241 struct e1000_adapter *adapter = netdev_priv(netdev);
242 struct e1000_hw *hw = &adapter->hw; 242 struct e1000_hw *hw = &adapter->hw;
243 243
244 pause->autoneg = 244 pause->autoneg =
245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
246 246
247 if(hw->fc == e1000_fc_rx_pause) 247 if (hw->fc == e1000_fc_rx_pause)
248 pause->rx_pause = 1; 248 pause->rx_pause = 1;
249 else if(hw->fc == e1000_fc_tx_pause) 249 else if (hw->fc == e1000_fc_tx_pause)
250 pause->tx_pause = 1; 250 pause->tx_pause = 1;
251 else if(hw->fc == e1000_fc_full) { 251 else if (hw->fc == e1000_fc_full) {
252 pause->rx_pause = 1; 252 pause->rx_pause = 1;
253 pause->tx_pause = 1; 253 pause->tx_pause = 1;
254 } 254 }
@@ -260,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
260{ 260{
261 struct e1000_adapter *adapter = netdev_priv(netdev); 261 struct e1000_adapter *adapter = netdev_priv(netdev);
262 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
263 263
264 adapter->fc_autoneg = pause->autoneg; 264 adapter->fc_autoneg = pause->autoneg;
265 265
266 if(pause->rx_pause && pause->tx_pause) 266 if (pause->rx_pause && pause->tx_pause)
267 hw->fc = e1000_fc_full; 267 hw->fc = e1000_fc_full;
268 else if(pause->rx_pause && !pause->tx_pause) 268 else if (pause->rx_pause && !pause->tx_pause)
269 hw->fc = e1000_fc_rx_pause; 269 hw->fc = e1000_fc_rx_pause;
270 else if(!pause->rx_pause && pause->tx_pause) 270 else if (!pause->rx_pause && pause->tx_pause)
271 hw->fc = e1000_fc_tx_pause; 271 hw->fc = e1000_fc_tx_pause;
272 else if(!pause->rx_pause && !pause->tx_pause) 272 else if (!pause->rx_pause && !pause->tx_pause)
273 hw->fc = e1000_fc_none; 273 hw->fc = e1000_fc_none;
274 274
275 hw->original_fc = hw->fc; 275 hw->original_fc = hw->fc;
276 276
277 if(adapter->fc_autoneg == AUTONEG_ENABLE) { 277 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
278 if(netif_running(adapter->netdev)) { 278 if (netif_running(adapter->netdev)) {
279 e1000_down(adapter); 279 e1000_down(adapter);
280 e1000_up(adapter); 280 e1000_up(adapter);
281 } else 281 } else
282 e1000_reset(adapter); 282 e1000_reset(adapter);
283 } 283 } else
284 else
285 return ((hw->media_type == e1000_media_type_fiber) ? 284 return ((hw->media_type == e1000_media_type_fiber) ?
286 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 285 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
287 286
288 return 0; 287 return 0;
289} 288}
290 289
@@ -301,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
301 struct e1000_adapter *adapter = netdev_priv(netdev); 300 struct e1000_adapter *adapter = netdev_priv(netdev);
302 adapter->rx_csum = data; 301 adapter->rx_csum = data;
303 302
304 if(netif_running(netdev)) { 303 if (netif_running(netdev)) {
305 e1000_down(adapter); 304 e1000_down(adapter);
306 e1000_up(adapter); 305 e1000_up(adapter);
307 } else 306 } else
308 e1000_reset(adapter); 307 e1000_reset(adapter);
309 return 0; 308 return 0;
310} 309}
311 310
312static uint32_t 311static uint32_t
313e1000_get_tx_csum(struct net_device *netdev) 312e1000_get_tx_csum(struct net_device *netdev)
314{ 313{
@@ -320,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
320{ 319{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 320 struct e1000_adapter *adapter = netdev_priv(netdev);
322 321
323 if(adapter->hw.mac_type < e1000_82543) { 322 if (adapter->hw.mac_type < e1000_82543) {
324 if (!data) 323 if (!data)
325 return -EINVAL; 324 return -EINVAL;
326 return 0; 325 return 0;
@@ -339,8 +338,8 @@ static int
339e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, uint32_t data)
340{ 339{
341 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
342 if((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
343 (adapter->hw.mac_type == e1000_82547)) 342 (adapter->hw.mac_type == e1000_82547))
344 return data ? -EINVAL : 0; 343 return data ? -EINVAL : 0;
345 344
346 if (data) 345 if (data)
@@ -348,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
348 else 347 else
349 netdev->features &= ~NETIF_F_TSO; 348 netdev->features &= ~NETIF_F_TSO;
350 return 0; 349 return 0;
351} 350}
352#endif /* NETIF_F_TSO */ 351#endif /* NETIF_F_TSO */
353 352
354static uint32_t 353static uint32_t
@@ -365,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
365 adapter->msg_enable = data; 364 adapter->msg_enable = data;
366} 365}
367 366
368static int 367static int
369e1000_get_regs_len(struct net_device *netdev) 368e1000_get_regs_len(struct net_device *netdev)
370{ 369{
371#define E1000_REGS_LEN 32 370#define E1000_REGS_LEN 32
@@ -401,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
401 regs_buff[11] = E1000_READ_REG(hw, TIDV); 400 regs_buff[11] = E1000_READ_REG(hw, TIDV);
402 401
403 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ 402 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
404 if(hw->phy_type == e1000_phy_igp) { 403 if (hw->phy_type == e1000_phy_igp) {
405 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 404 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
406 IGP01E1000_PHY_AGC_A); 405 IGP01E1000_PHY_AGC_A);
407 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 406 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -455,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
455 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 454 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
456 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 455 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
457 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 456 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
458 if(hw->mac_type >= e1000_82540 && 457 if (hw->mac_type >= e1000_82540 &&
459 hw->media_type == e1000_media_type_copper) { 458 hw->media_type == e1000_media_type_copper) {
460 regs_buff[26] = E1000_READ_REG(hw, MANC); 459 regs_buff[26] = E1000_READ_REG(hw, MANC);
461 } 460 }
@@ -479,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
479 int ret_val = 0; 478 int ret_val = 0;
480 uint16_t i; 479 uint16_t i;
481 480
482 if(eeprom->len == 0) 481 if (eeprom->len == 0)
483 return -EINVAL; 482 return -EINVAL;
484 483
485 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 484 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -489,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
489 488
490 eeprom_buff = kmalloc(sizeof(uint16_t) * 489 eeprom_buff = kmalloc(sizeof(uint16_t) *
491 (last_word - first_word + 1), GFP_KERNEL); 490 (last_word - first_word + 1), GFP_KERNEL);
492 if(!eeprom_buff) 491 if (!eeprom_buff)
493 return -ENOMEM; 492 return -ENOMEM;
494 493
495 if(hw->eeprom.type == e1000_eeprom_spi) 494 if (hw->eeprom.type == e1000_eeprom_spi)
496 ret_val = e1000_read_eeprom(hw, first_word, 495 ret_val = e1000_read_eeprom(hw, first_word,
497 last_word - first_word + 1, 496 last_word - first_word + 1,
498 eeprom_buff); 497 eeprom_buff);
499 else { 498 else {
500 for (i = 0; i < last_word - first_word + 1; i++) 499 for (i = 0; i < last_word - first_word + 1; i++)
501 if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, 500 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
502 &eeprom_buff[i]))) 501 &eeprom_buff[i])))
503 break; 502 break;
504 } 503 }
@@ -525,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
525 int max_len, first_word, last_word, ret_val = 0; 524 int max_len, first_word, last_word, ret_val = 0;
526 uint16_t i; 525 uint16_t i;
527 526
528 if(eeprom->len == 0) 527 if (eeprom->len == 0)
529 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
530 529
531 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 530 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
532 return -EFAULT; 531 return -EFAULT;
533 532
534 max_len = hw->eeprom.word_size * 2; 533 max_len = hw->eeprom.word_size * 2;
@@ -536,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
536 first_word = eeprom->offset >> 1; 535 first_word = eeprom->offset >> 1;
537 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 536 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
538 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 537 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
539 if(!eeprom_buff) 538 if (!eeprom_buff)
540 return -ENOMEM; 539 return -ENOMEM;
541 540
542 ptr = (void *)eeprom_buff; 541 ptr = (void *)eeprom_buff;
543 542
544 if(eeprom->offset & 1) { 543 if (eeprom->offset & 1) {
545 /* need read/modify/write of first changed EEPROM word */ 544 /* need read/modify/write of first changed EEPROM word */
546 /* only the second byte of the word is being modified */ 545 /* only the second byte of the word is being modified */
547 ret_val = e1000_read_eeprom(hw, first_word, 1, 546 ret_val = e1000_read_eeprom(hw, first_word, 1,
548 &eeprom_buff[0]); 547 &eeprom_buff[0]);
549 ptr++; 548 ptr++;
550 } 549 }
551 if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 550 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
552 /* need read/modify/write of last changed EEPROM word */ 551 /* need read/modify/write of last changed EEPROM word */
553 /* only the first byte of the word is being modified */ 552 /* only the first byte of the word is being modified */
554 ret_val = e1000_read_eeprom(hw, last_word, 1, 553 ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -567,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
567 ret_val = e1000_write_eeprom(hw, first_word, 566 ret_val = e1000_write_eeprom(hw, first_word,
568 last_word - first_word + 1, eeprom_buff); 567 last_word - first_word + 1, eeprom_buff);
569 568
570 /* Update the checksum over the first part of the EEPROM if needed 569 /* Update the checksum over the first part of the EEPROM if needed
571 * and flush shadow RAM for 82573 conrollers */ 570 * and flush shadow RAM for 82573 conrollers */
572 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 571 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
573 (hw->mac_type == e1000_82573))) 572 (hw->mac_type == e1000_82573)))
574 e1000_update_eeprom_checksum(hw); 573 e1000_update_eeprom_checksum(hw);
575 574
@@ -633,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
633 ring->rx_jumbo_pending = 0; 632 ring->rx_jumbo_pending = 0;
634} 633}
635 634
636static int 635static int
637e1000_set_ringparam(struct net_device *netdev, 636e1000_set_ringparam(struct net_device *netdev,
638 struct ethtool_ringparam *ring) 637 struct ethtool_ringparam *ring)
639{ 638{
@@ -670,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
670 txdr = adapter->tx_ring; 669 txdr = adapter->tx_ring;
671 rxdr = adapter->rx_ring; 670 rxdr = adapter->rx_ring;
672 671
673 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
674 return -EINVAL; 673 return -EINVAL;
675 674
676 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
677 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
678 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 677 E1000_MAX_RXD : E1000_MAX_82544_RXD));
679 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 678 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
680 679
681 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 680 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
682 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 681 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
683 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 682 E1000_MAX_TXD : E1000_MAX_82544_TXD));
684 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 683 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
685 684
686 for (i = 0; i < adapter->num_tx_queues; i++) 685 for (i = 0; i < adapter->num_tx_queues; i++)
687 txdr[i].count = txdr->count; 686 txdr[i].count = txdr->count;
688 for (i = 0; i < adapter->num_rx_queues; i++) 687 for (i = 0; i < adapter->num_rx_queues; i++)
689 rxdr[i].count = rxdr->count; 688 rxdr[i].count = rxdr->count;
690 689
691 if(netif_running(adapter->netdev)) { 690 if (netif_running(adapter->netdev)) {
692 /* Try to get new resources before deleting old */ 691 /* Try to get new resources before deleting old */
693 if ((err = e1000_setup_all_rx_resources(adapter))) 692 if ((err = e1000_setup_all_rx_resources(adapter)))
694 goto err_setup_rx; 693 goto err_setup_rx;
@@ -708,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
708 kfree(rx_old); 707 kfree(rx_old);
709 adapter->rx_ring = rx_new; 708 adapter->rx_ring = rx_new;
710 adapter->tx_ring = tx_new; 709 adapter->tx_ring = tx_new;
711 if((err = e1000_up(adapter))) 710 if ((err = e1000_up(adapter)))
712 return err; 711 return err;
713 } 712 }
714 713
@@ -727,10 +726,10 @@ err_setup_rx:
727 uint32_t pat, value; \ 726 uint32_t pat, value; \
728 uint32_t test[] = \ 727 uint32_t test[] = \
729 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 728 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
730 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ 729 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
731 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ 730 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
732 value = E1000_READ_REG(&adapter->hw, R); \ 731 value = E1000_READ_REG(&adapter->hw, R); \
733 if(value != (test[pat] & W & M)) { \ 732 if (value != (test[pat] & W & M)) { \
734 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ 733 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
735 "0x%08X expected 0x%08X\n", \ 734 "0x%08X expected 0x%08X\n", \
736 E1000_##R, value, (test[pat] & W & M)); \ 735 E1000_##R, value, (test[pat] & W & M)); \
@@ -746,7 +745,7 @@ err_setup_rx:
746 uint32_t value; \ 745 uint32_t value; \
747 E1000_WRITE_REG(&adapter->hw, R, W & M); \ 746 E1000_WRITE_REG(&adapter->hw, R, W & M); \
748 value = E1000_READ_REG(&adapter->hw, R); \ 747 value = E1000_READ_REG(&adapter->hw, R); \
749 if((W & M) != (value & M)) { \ 748 if ((W & M) != (value & M)) { \
750 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 749 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
751 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ 750 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
752 *data = (adapter->hw.mac_type < e1000_82543) ? \ 751 *data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -782,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
782 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); 781 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
783 E1000_WRITE_REG(&adapter->hw, STATUS, toggle); 782 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
784 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; 783 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
785 if(value != after) { 784 if (value != after) {
786 DPRINTK(DRV, ERR, "failed STATUS register test got: " 785 DPRINTK(DRV, ERR, "failed STATUS register test got: "
787 "0x%08X expected: 0x%08X\n", after, value); 786 "0x%08X expected: 0x%08X\n", after, value);
788 *data = 1; 787 *data = 1;
@@ -810,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
810 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 809 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
811 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 810 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
812 811
813 if(adapter->hw.mac_type >= e1000_82543) { 812 if (adapter->hw.mac_type >= e1000_82543) {
814 813
815 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 814 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
816 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 815 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -818,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
818 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 817 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
819 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
820 819
821 for(i = 0; i < E1000_RAR_ENTRIES; i++) { 820 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
822 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 821 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
823 0xFFFFFFFF); 822 0xFFFFFFFF);
824 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 823 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -834,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
834 833
835 } 834 }
836 835
837 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 836 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
838 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 837 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
839 838
840 *data = 0; 839 *data = 0;
@@ -850,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
850 849
851 *data = 0; 850 *data = 0;
852 /* Read and add up the contents of the EEPROM */ 851 /* Read and add up the contents of the EEPROM */
853 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 852 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
854 if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { 853 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
855 *data = 1; 854 *data = 1;
856 break; 855 break;
857 } 856 }
@@ -859,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
859 } 858 }
860 859
861 /* If Checksum is not Correct return error else test passed */ 860 /* If Checksum is not Correct return error else test passed */
862 if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 861 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
863 *data = 2; 862 *data = 2;
864 863
865 return *data; 864 return *data;
@@ -888,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
888 *data = 0; 887 *data = 0;
889 888
890 /* Hook up test interrupt handler just for this test */ 889 /* Hook up test interrupt handler just for this test */
891 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 890 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
892 shared_int = FALSE; 891 shared_int = FALSE;
893 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 892 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
894 netdev->name, netdev)){ 893 netdev->name, netdev)){
895 *data = 1; 894 *data = 1;
896 return -1; 895 return -1;
@@ -901,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
901 msec_delay(10); 900 msec_delay(10);
902 901
903 /* Test each interrupt */ 902 /* Test each interrupt */
904 for(; i < 10; i++) { 903 for (; i < 10; i++) {
905 904
906 /* Interrupt to test */ 905 /* Interrupt to test */
907 mask = 1 << i; 906 mask = 1 << i;
908 907
909 if(!shared_int) { 908 if (!shared_int) {
910 /* Disable the interrupt to be reported in 909 /* Disable the interrupt to be reported in
911 * the cause register and then force the same 910 * the cause register and then force the same
912 * interrupt and see if one gets posted. If 911 * interrupt and see if one gets posted. If
@@ -917,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
917 E1000_WRITE_REG(&adapter->hw, IMC, mask); 916 E1000_WRITE_REG(&adapter->hw, IMC, mask);
918 E1000_WRITE_REG(&adapter->hw, ICS, mask); 917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
919 msec_delay(10); 918 msec_delay(10);
920 919
921 if(adapter->test_icr & mask) { 920 if (adapter->test_icr & mask) {
922 *data = 3; 921 *data = 3;
923 break; 922 break;
924 } 923 }
@@ -935,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
935 E1000_WRITE_REG(&adapter->hw, ICS, mask); 934 E1000_WRITE_REG(&adapter->hw, ICS, mask);
936 msec_delay(10); 935 msec_delay(10);
937 936
938 if(!(adapter->test_icr & mask)) { 937 if (!(adapter->test_icr & mask)) {
939 *data = 4; 938 *data = 4;
940 break; 939 break;
941 } 940 }
942 941
943 if(!shared_int) { 942 if (!shared_int) {
944 /* Disable the other interrupts to be reported in 943 /* Disable the other interrupts to be reported in
945 * the cause register and then force the other 944 * the cause register and then force the other
946 * interrupts and see if any get posted. If 945 * interrupts and see if any get posted. If
@@ -952,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
952 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 951 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
953 msec_delay(10); 952 msec_delay(10);
954 953
955 if(adapter->test_icr) { 954 if (adapter->test_icr) {
956 *data = 5; 955 *data = 5;
957 break; 956 break;
958 } 957 }
@@ -977,24 +976,24 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
977 struct pci_dev *pdev = adapter->pdev; 976 struct pci_dev *pdev = adapter->pdev;
978 int i; 977 int i;
979 978
980 if(txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
981 for(i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
982 if(txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
983 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
984 txdr->buffer_info[i].length, 983 txdr->buffer_info[i].length,
985 PCI_DMA_TODEVICE); 984 PCI_DMA_TODEVICE);
986 if(txdr->buffer_info[i].skb) 985 if (txdr->buffer_info[i].skb)
987 dev_kfree_skb(txdr->buffer_info[i].skb); 986 dev_kfree_skb(txdr->buffer_info[i].skb);
988 } 987 }
989 } 988 }
990 989
991 if(rxdr->desc && rxdr->buffer_info) { 990 if (rxdr->desc && rxdr->buffer_info) {
992 for(i = 0; i < rxdr->count; i++) { 991 for (i = 0; i < rxdr->count; i++) {
993 if(rxdr->buffer_info[i].dma) 992 if (rxdr->buffer_info[i].dma)
994 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 993 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
995 rxdr->buffer_info[i].length, 994 rxdr->buffer_info[i].length,
996 PCI_DMA_FROMDEVICE); 995 PCI_DMA_FROMDEVICE);
997 if(rxdr->buffer_info[i].skb) 996 if (rxdr->buffer_info[i].skb)
998 dev_kfree_skb(rxdr->buffer_info[i].skb); 997 dev_kfree_skb(rxdr->buffer_info[i].skb);
999 } 998 }
1000 } 999 }
@@ -1027,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1027 1026
1028 /* Setup Tx descriptor ring and Tx buffers */ 1027 /* Setup Tx descriptor ring and Tx buffers */
1029 1028
1030 if(!txdr->count) 1029 if (!txdr->count)
1031 txdr->count = E1000_DEFAULT_TXD; 1030 txdr->count = E1000_DEFAULT_TXD;
1032 1031
1033 size = txdr->count * sizeof(struct e1000_buffer); 1032 size = txdr->count * sizeof(struct e1000_buffer);
1034 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1033 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1035 ret_val = 1; 1034 ret_val = 1;
1036 goto err_nomem; 1035 goto err_nomem;
1037 } 1036 }
@@ -1039,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 1038
1040 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1039 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1041 E1000_ROUNDUP(txdr->size, 4096); 1040 E1000_ROUNDUP(txdr->size, 4096);
1042 if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { 1041 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1043 ret_val = 2; 1042 ret_val = 2;
1044 goto err_nomem; 1043 goto err_nomem;
1045 } 1044 }
@@ -1058,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1058 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1057 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1059 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1058 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1060 1059
1061 for(i = 0; i < txdr->count; i++) { 1060 for (i = 0; i < txdr->count; i++) {
1062 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); 1061 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1063 struct sk_buff *skb; 1062 struct sk_buff *skb;
1064 unsigned int size = 1024; 1063 unsigned int size = 1024;
1065 1064
1066 if(!(skb = alloc_skb(size, GFP_KERNEL))) { 1065 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1067 ret_val = 3; 1066 ret_val = 3;
1068 goto err_nomem; 1067 goto err_nomem;
1069 } 1068 }
@@ -1083,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1083 1082
1084 /* Setup Rx descriptor ring and Rx buffers */ 1083 /* Setup Rx descriptor ring and Rx buffers */
1085 1084
1086 if(!rxdr->count) 1085 if (!rxdr->count)
1087 rxdr->count = E1000_DEFAULT_RXD; 1086 rxdr->count = E1000_DEFAULT_RXD;
1088 1087
1089 size = rxdr->count * sizeof(struct e1000_buffer); 1088 size = rxdr->count * sizeof(struct e1000_buffer);
1090 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1089 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1091 ret_val = 4; 1090 ret_val = 4;
1092 goto err_nomem; 1091 goto err_nomem;
1093 } 1092 }
1094 memset(rxdr->buffer_info, 0, size); 1093 memset(rxdr->buffer_info, 0, size);
1095 1094
1096 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1097 if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { 1096 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1098 ret_val = 5; 1097 ret_val = 5;
1099 goto err_nomem; 1098 goto err_nomem;
1100 } 1099 }
@@ -1114,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1114 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1113 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1115 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1114 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1116 1115
1117 for(i = 0; i < rxdr->count; i++) { 1116 for (i = 0; i < rxdr->count; i++) {
1118 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1117 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1119 struct sk_buff *skb; 1118 struct sk_buff *skb;
1120 1119
1121 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1120 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1122 GFP_KERNEL))) { 1121 GFP_KERNEL))) {
1123 ret_val = 6; 1122 ret_val = 6;
1124 goto err_nomem; 1123 goto err_nomem;
@@ -1227,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1227 1226
1228 /* Check Phy Configuration */ 1227 /* Check Phy Configuration */
1229 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1228 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1230 if(phy_reg != 0x4100) 1229 if (phy_reg != 0x4100)
1231 return 9; 1230 return 9;
1232 1231
1233 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1232 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1234 if(phy_reg != 0x0070) 1233 if (phy_reg != 0x0070)
1235 return 10; 1234 return 10;
1236 1235
1237 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); 1236 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1238 if(phy_reg != 0x001A) 1237 if (phy_reg != 0x001A)
1239 return 11; 1238 return 11;
1240 1239
1241 return 0; 1240 return 0;
@@ -1249,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1249 1248
1250 adapter->hw.autoneg = FALSE; 1249 adapter->hw.autoneg = FALSE;
1251 1250
1252 if(adapter->hw.phy_type == e1000_phy_m88) { 1251 if (adapter->hw.phy_type == e1000_phy_m88) {
1253 /* Auto-MDI/MDIX Off */ 1252 /* Auto-MDI/MDIX Off */
1254 e1000_write_phy_reg(&adapter->hw, 1253 e1000_write_phy_reg(&adapter->hw,
1255 M88E1000_PHY_SPEC_CTRL, 0x0808); 1254 M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1269,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1269 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1268 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1270 E1000_CTRL_FD); /* Force Duplex to FULL */ 1269 E1000_CTRL_FD); /* Force Duplex to FULL */
1271 1270
1272 if(adapter->hw.media_type == e1000_media_type_copper && 1271 if (adapter->hw.media_type == e1000_media_type_copper &&
1273 adapter->hw.phy_type == e1000_phy_m88) { 1272 adapter->hw.phy_type == e1000_phy_m88) {
1274 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1273 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1275 } else { 1274 } else {
1276 /* Set the ILOS bit on the fiber Nic is half 1275 /* Set the ILOS bit on the fiber Nic is half
1277 * duplex link is detected. */ 1276 * duplex link is detected. */
1278 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1277 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1279 if((stat_reg & E1000_STATUS_FD) == 0) 1278 if ((stat_reg & E1000_STATUS_FD) == 0)
1280 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1279 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1281 } 1280 }
1282 1281
@@ -1285,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1285 /* Disable the receiver on the PHY so when a cable is plugged in, the 1284 /* Disable the receiver on the PHY so when a cable is plugged in, the
1286 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1285 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1287 */ 1286 */
1288 if(adapter->hw.phy_type == e1000_phy_m88) 1287 if (adapter->hw.phy_type == e1000_phy_m88)
1289 e1000_phy_disable_receiver(adapter); 1288 e1000_phy_disable_receiver(adapter);
1290 1289
1291 udelay(500); 1290 udelay(500);
@@ -1301,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1301 1300
1302 switch (adapter->hw.mac_type) { 1301 switch (adapter->hw.mac_type) {
1303 case e1000_82543: 1302 case e1000_82543:
1304 if(adapter->hw.media_type == e1000_media_type_copper) { 1303 if (adapter->hw.media_type == e1000_media_type_copper) {
1305 /* Attempt to setup Loopback mode on Non-integrated PHY. 1304 /* Attempt to setup Loopback mode on Non-integrated PHY.
1306 * Some PHY registers get corrupted at random, so 1305 * Some PHY registers get corrupted at random, so
1307 * attempt this 10 times. 1306 * attempt this 10 times.
1308 */ 1307 */
1309 while(e1000_nonintegrated_phy_loopback(adapter) && 1308 while (e1000_nonintegrated_phy_loopback(adapter) &&
1310 count++ < 10); 1309 count++ < 10);
1311 if(count < 11) 1310 if (count < 11)
1312 return 0; 1311 return 0;
1313 } 1312 }
1314 break; 1313 break;
@@ -1430,8 +1429,8 @@ static int
1430e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1429e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1431{ 1430{
1432 frame_size &= ~1; 1431 frame_size &= ~1;
1433 if(*(skb->data + 3) == 0xFF) { 1432 if (*(skb->data + 3) == 0xFF) {
1434 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1433 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1435 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1434 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1436 return 0; 1435 return 0;
1437 } 1436 }
@@ -1450,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1450 1449
1451 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1450 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1452 1451
1453 /* Calculate the loop count based on the largest descriptor ring 1452 /* Calculate the loop count based on the largest descriptor ring
1454 * The idea is to wrap the largest ring a number of times using 64 1453 * The idea is to wrap the largest ring a number of times using 64
1455 * send/receive pairs during each loop 1454 * send/receive pairs during each loop
1456 */ 1455 */
1457 1456
1458 if(rxdr->count <= txdr->count) 1457 if (rxdr->count <= txdr->count)
1459 lc = ((txdr->count / 64) * 2) + 1; 1458 lc = ((txdr->count / 64) * 2) + 1;
1460 else 1459 else
1461 lc = ((rxdr->count / 64) * 2) + 1; 1460 lc = ((rxdr->count / 64) * 2) + 1;
1462 1461
1463 k = l = 0; 1462 k = l = 0;
1464 for(j = 0; j <= lc; j++) { /* loop count loop */ 1463 for (j = 0; j <= lc; j++) { /* loop count loop */
1465 for(i = 0; i < 64; i++) { /* send the packets */ 1464 for (i = 0; i < 64; i++) { /* send the packets */
1466 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1465 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1467 1024); 1466 1024);
1468 pci_dma_sync_single_for_device(pdev, 1467 pci_dma_sync_single_for_device(pdev,
1469 txdr->buffer_info[k].dma, 1468 txdr->buffer_info[k].dma,
1470 txdr->buffer_info[k].length, 1469 txdr->buffer_info[k].length,
1471 PCI_DMA_TODEVICE); 1470 PCI_DMA_TODEVICE);
1472 if(unlikely(++k == txdr->count)) k = 0; 1471 if (unlikely(++k == txdr->count)) k = 0;
1473 } 1472 }
1474 E1000_WRITE_REG(&adapter->hw, TDT, k); 1473 E1000_WRITE_REG(&adapter->hw, TDT, k);
1475 msec_delay(200); 1474 msec_delay(200);
1476 time = jiffies; /* set the start time for the receive */ 1475 time = jiffies; /* set the start time for the receive */
1477 good_cnt = 0; 1476 good_cnt = 0;
1478 do { /* receive the sent packets */ 1477 do { /* receive the sent packets */
1479 pci_dma_sync_single_for_cpu(pdev, 1478 pci_dma_sync_single_for_cpu(pdev,
1480 rxdr->buffer_info[l].dma, 1479 rxdr->buffer_info[l].dma,
1481 rxdr->buffer_info[l].length, 1480 rxdr->buffer_info[l].length,
1482 PCI_DMA_FROMDEVICE); 1481 PCI_DMA_FROMDEVICE);
1483 1482
1484 ret_val = e1000_check_lbtest_frame( 1483 ret_val = e1000_check_lbtest_frame(
1485 rxdr->buffer_info[l].skb, 1484 rxdr->buffer_info[l].skb,
1486 1024); 1485 1024);
1487 if(!ret_val) 1486 if (!ret_val)
1488 good_cnt++; 1487 good_cnt++;
1489 if(unlikely(++l == rxdr->count)) l = 0; 1488 if (unlikely(++l == rxdr->count)) l = 0;
1490 /* time + 20 msecs (200 msecs on 2.4) is more than 1489 /* time + 20 msecs (200 msecs on 2.4) is more than
1491 * enough time to complete the receives, if it's 1490 * enough time to complete the receives, if it's
1492 * exceeded, break and error off 1491 * exceeded, break and error off
1493 */ 1492 */
1494 } while (good_cnt < 64 && jiffies < (time + 20)); 1493 } while (good_cnt < 64 && jiffies < (time + 20));
1495 if(good_cnt != 64) { 1494 if (good_cnt != 64) {
1496 ret_val = 13; /* ret_val is the same as mis-compare */ 1495 ret_val = 13; /* ret_val is the same as mis-compare */
1497 break; 1496 break;
1498 } 1497 }
1499 if(jiffies >= (time + 2)) { 1498 if (jiffies >= (time + 2)) {
1500 ret_val = 14; /* error code for time out error */ 1499 ret_val = 14; /* error code for time out error */
1501 break; 1500 break;
1502 } 1501 }
@@ -1549,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1549 *data = 1; 1548 *data = 1;
1550 } else { 1549 } else {
1551 e1000_check_for_link(&adapter->hw); 1550 e1000_check_for_link(&adapter->hw);
1552 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1551 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1553 msec_delay(4000); 1552 msec_delay(4000);
1554 1553
1555 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1554 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1556 *data = 1; 1555 *data = 1;
1557 } 1556 }
1558 } 1557 }
1559 return *data; 1558 return *data;
1560} 1559}
1561 1560
1562static int 1561static int
1563e1000_diag_test_count(struct net_device *netdev) 1562e1000_diag_test_count(struct net_device *netdev)
1564{ 1563{
1565 return E1000_TEST_LEN; 1564 return E1000_TEST_LEN;
@@ -1572,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
1572 struct e1000_adapter *adapter = netdev_priv(netdev); 1571 struct e1000_adapter *adapter = netdev_priv(netdev);
1573 boolean_t if_running = netif_running(netdev); 1572 boolean_t if_running = netif_running(netdev);
1574 1573
1575 if(eth_test->flags == ETH_TEST_FL_OFFLINE) { 1574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1576 /* Offline tests */ 1575 /* Offline tests */
1577 1576
1578 /* save speed, duplex, autoneg settings */ 1577 /* save speed, duplex, autoneg settings */
@@ -1582,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
1582 1581
1583 /* Link test performed before hardware reset so autoneg doesn't 1582 /* Link test performed before hardware reset so autoneg doesn't
1584 * interfere with test result */ 1583 * interfere with test result */
1585 if(e1000_link_test(adapter, &data[4])) 1584 if (e1000_link_test(adapter, &data[4]))
1586 eth_test->flags |= ETH_TEST_FL_FAILED; 1585 eth_test->flags |= ETH_TEST_FL_FAILED;
1587 1586
1588 if(if_running) 1587 if (if_running)
1589 e1000_down(adapter); 1588 e1000_down(adapter);
1590 else 1589 else
1591 e1000_reset(adapter); 1590 e1000_reset(adapter);
1592 1591
1593 if(e1000_reg_test(adapter, &data[0])) 1592 if (e1000_reg_test(adapter, &data[0]))
1594 eth_test->flags |= ETH_TEST_FL_FAILED; 1593 eth_test->flags |= ETH_TEST_FL_FAILED;
1595 1594
1596 e1000_reset(adapter); 1595 e1000_reset(adapter);
1597 if(e1000_eeprom_test(adapter, &data[1])) 1596 if (e1000_eeprom_test(adapter, &data[1]))
1598 eth_test->flags |= ETH_TEST_FL_FAILED; 1597 eth_test->flags |= ETH_TEST_FL_FAILED;
1599 1598
1600 e1000_reset(adapter); 1599 e1000_reset(adapter);
1601 if(e1000_intr_test(adapter, &data[2])) 1600 if (e1000_intr_test(adapter, &data[2]))
1602 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1603 1602
1604 e1000_reset(adapter); 1603 e1000_reset(adapter);
1605 if(e1000_loopback_test(adapter, &data[3])) 1604 if (e1000_loopback_test(adapter, &data[3]))
1606 eth_test->flags |= ETH_TEST_FL_FAILED; 1605 eth_test->flags |= ETH_TEST_FL_FAILED;
1607 1606
1608 /* restore speed, duplex, autoneg settings */ 1607 /* restore speed, duplex, autoneg settings */
@@ -1611,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
1611 adapter->hw.autoneg = autoneg; 1610 adapter->hw.autoneg = autoneg;
1612 1611
1613 e1000_reset(adapter); 1612 e1000_reset(adapter);
1614 if(if_running) 1613 if (if_running)
1615 e1000_up(adapter); 1614 e1000_up(adapter);
1616 } else { 1615 } else {
1617 /* Online tests */ 1616 /* Online tests */
1618 if(e1000_link_test(adapter, &data[4])) 1617 if (e1000_link_test(adapter, &data[4]))
1619 eth_test->flags |= ETH_TEST_FL_FAILED; 1618 eth_test->flags |= ETH_TEST_FL_FAILED;
1620 1619
1621 /* Offline tests aren't run; pass by default */ 1620 /* Offline tests aren't run; pass by default */
@@ -1633,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1633 struct e1000_adapter *adapter = netdev_priv(netdev); 1632 struct e1000_adapter *adapter = netdev_priv(netdev);
1634 struct e1000_hw *hw = &adapter->hw; 1633 struct e1000_hw *hw = &adapter->hw;
1635 1634
1636 switch(adapter->hw.device_id) { 1635 switch (adapter->hw.device_id) {
1637 case E1000_DEV_ID_82542: 1636 case E1000_DEV_ID_82542:
1638 case E1000_DEV_ID_82543GC_FIBER: 1637 case E1000_DEV_ID_82543GC_FIBER:
1639 case E1000_DEV_ID_82543GC_COPPER: 1638 case E1000_DEV_ID_82543GC_COPPER:
@@ -1649,7 +1648,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1649 case E1000_DEV_ID_82546GB_FIBER: 1648 case E1000_DEV_ID_82546GB_FIBER:
1650 case E1000_DEV_ID_82571EB_FIBER: 1649 case E1000_DEV_ID_82571EB_FIBER:
1651 /* Wake events only supported on port A for dual fiber */ 1650 /* Wake events only supported on port A for dual fiber */
1652 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1651 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1653 wol->supported = 0; 1652 wol->supported = 0;
1654 wol->wolopts = 0; 1653 wol->wolopts = 0;
1655 return; 1654 return;
@@ -1661,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1661 WAKE_BCAST | WAKE_MAGIC; 1660 WAKE_BCAST | WAKE_MAGIC;
1662 1661
1663 wol->wolopts = 0; 1662 wol->wolopts = 0;
1664 if(adapter->wol & E1000_WUFC_EX) 1663 if (adapter->wol & E1000_WUFC_EX)
1665 wol->wolopts |= WAKE_UCAST; 1664 wol->wolopts |= WAKE_UCAST;
1666 if(adapter->wol & E1000_WUFC_MC) 1665 if (adapter->wol & E1000_WUFC_MC)
1667 wol->wolopts |= WAKE_MCAST; 1666 wol->wolopts |= WAKE_MCAST;
1668 if(adapter->wol & E1000_WUFC_BC) 1667 if (adapter->wol & E1000_WUFC_BC)
1669 wol->wolopts |= WAKE_BCAST; 1668 wol->wolopts |= WAKE_BCAST;
1670 if(adapter->wol & E1000_WUFC_MAG) 1669 if (adapter->wol & E1000_WUFC_MAG)
1671 wol->wolopts |= WAKE_MAGIC; 1670 wol->wolopts |= WAKE_MAGIC;
1672 return; 1671 return;
1673 } 1672 }
@@ -1679,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1679 struct e1000_adapter *adapter = netdev_priv(netdev); 1678 struct e1000_adapter *adapter = netdev_priv(netdev);
1680 struct e1000_hw *hw = &adapter->hw; 1679 struct e1000_hw *hw = &adapter->hw;
1681 1680
1682 switch(adapter->hw.device_id) { 1681 switch (adapter->hw.device_id) {
1683 case E1000_DEV_ID_82542: 1682 case E1000_DEV_ID_82542:
1684 case E1000_DEV_ID_82543GC_FIBER: 1683 case E1000_DEV_ID_82543GC_FIBER:
1685 case E1000_DEV_ID_82543GC_COPPER: 1684 case E1000_DEV_ID_82543GC_COPPER:
@@ -1693,23 +1692,23 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1693 case E1000_DEV_ID_82546GB_FIBER: 1692 case E1000_DEV_ID_82546GB_FIBER:
1694 case E1000_DEV_ID_82571EB_FIBER: 1693 case E1000_DEV_ID_82571EB_FIBER:
1695 /* Wake events only supported on port A for dual fiber */ 1694 /* Wake events only supported on port A for dual fiber */
1696 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1695 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1697 return wol->wolopts ? -EOPNOTSUPP : 0; 1696 return wol->wolopts ? -EOPNOTSUPP : 0;
1698 /* Fall Through */ 1697 /* Fall Through */
1699 1698
1700 default: 1699 default:
1701 if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1700 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1702 return -EOPNOTSUPP; 1701 return -EOPNOTSUPP;
1703 1702
1704 adapter->wol = 0; 1703 adapter->wol = 0;
1705 1704
1706 if(wol->wolopts & WAKE_UCAST) 1705 if (wol->wolopts & WAKE_UCAST)
1707 adapter->wol |= E1000_WUFC_EX; 1706 adapter->wol |= E1000_WUFC_EX;
1708 if(wol->wolopts & WAKE_MCAST) 1707 if (wol->wolopts & WAKE_MCAST)
1709 adapter->wol |= E1000_WUFC_MC; 1708 adapter->wol |= E1000_WUFC_MC;
1710 if(wol->wolopts & WAKE_BCAST) 1709 if (wol->wolopts & WAKE_BCAST)
1711 adapter->wol |= E1000_WUFC_BC; 1710 adapter->wol |= E1000_WUFC_BC;
1712 if(wol->wolopts & WAKE_MAGIC) 1711 if (wol->wolopts & WAKE_MAGIC)
1713 adapter->wol |= E1000_WUFC_MAG; 1712 adapter->wol |= E1000_WUFC_MAG;
1714 } 1713 }
1715 1714
@@ -1727,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
1727{ 1726{
1728 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1727 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1729 1728
1730 if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1729 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1731 e1000_led_off(&adapter->hw); 1730 e1000_led_off(&adapter->hw);
1732 else 1731 else
1733 e1000_led_on(&adapter->hw); 1732 e1000_led_on(&adapter->hw);
@@ -1740,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1740{ 1739{
1741 struct e1000_adapter *adapter = netdev_priv(netdev); 1740 struct e1000_adapter *adapter = netdev_priv(netdev);
1742 1741
1743 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1742 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1744 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1743 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1745 1744
1746 if(adapter->hw.mac_type < e1000_82571) { 1745 if (adapter->hw.mac_type < e1000_82571) {
1747 if(!adapter->blink_timer.function) { 1746 if (!adapter->blink_timer.function) {
1748 init_timer(&adapter->blink_timer); 1747 init_timer(&adapter->blink_timer);
1749 adapter->blink_timer.function = e1000_led_blink_callback; 1748 adapter->blink_timer.function = e1000_led_blink_callback;
1750 adapter->blink_timer.data = (unsigned long) adapter; 1749 adapter->blink_timer.data = (unsigned long) adapter;
@@ -1782,21 +1781,21 @@ static int
1782e1000_nway_reset(struct net_device *netdev) 1781e1000_nway_reset(struct net_device *netdev)
1783{ 1782{
1784 struct e1000_adapter *adapter = netdev_priv(netdev); 1783 struct e1000_adapter *adapter = netdev_priv(netdev);
1785 if(netif_running(netdev)) { 1784 if (netif_running(netdev)) {
1786 e1000_down(adapter); 1785 e1000_down(adapter);
1787 e1000_up(adapter); 1786 e1000_up(adapter);
1788 } 1787 }
1789 return 0; 1788 return 0;
1790} 1789}
1791 1790
1792static int 1791static int
1793e1000_get_stats_count(struct net_device *netdev) 1792e1000_get_stats_count(struct net_device *netdev)
1794{ 1793{
1795 return E1000_STATS_LEN; 1794 return E1000_STATS_LEN;
1796} 1795}
1797 1796
1798static void 1797static void
1799e1000_get_ethtool_stats(struct net_device *netdev, 1798e1000_get_ethtool_stats(struct net_device *netdev,
1800 struct ethtool_stats *stats, uint64_t *data) 1799 struct ethtool_stats *stats, uint64_t *data)
1801{ 1800{
1802 struct e1000_adapter *adapter = netdev_priv(netdev); 1801 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1830,7 +1829,7 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1830/* BUG_ON(i != E1000_STATS_LEN); */ 1829/* BUG_ON(i != E1000_STATS_LEN); */
1831} 1830}
1832 1831
1833static void 1832static void
1834e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1835{ 1834{
1836#ifdef CONFIG_E1000_MQ 1835#ifdef CONFIG_E1000_MQ
@@ -1839,9 +1838,9 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1839 uint8_t *p = data; 1838 uint8_t *p = data;
1840 int i; 1839 int i;
1841 1840
1842 switch(stringset) { 1841 switch (stringset) {
1843 case ETH_SS_TEST: 1842 case ETH_SS_TEST:
1844 memcpy(data, *e1000_gstrings_test, 1843 memcpy(data, *e1000_gstrings_test,
1845 E1000_TEST_LEN*ETH_GSTRING_LEN); 1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1846 break; 1845 break;
1847 case ETH_SS_STATS: 1846 case ETH_SS_STATS:
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 2437d362ff63..beeec0fbbeac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1600,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1600 if(ret_val) 1600 if(ret_val)
1601 return ret_val; 1601 return ret_val;
1602 1602
1603 /* Read the MII 1000Base-T Control Register (Address 9). */ 1603 /* Read the MII 1000Base-T Control Register (Address 9). */
1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1605 if(ret_val) 1605 if(ret_val)
1606 return ret_val; 1606 return ret_val;
1607 1607
1608 /* Need to parse both autoneg_advertised and fc and set up 1608 /* Need to parse both autoneg_advertised and fc and set up
1609 * the appropriate PHY registers. First we will parse for 1609 * the appropriate PHY registers. First we will parse for
@@ -3916,7 +3916,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
3916 } 3916 }
3917 } 3917 }
3918 3918
3919 if(eeprom->use_eerd == TRUE) { 3919 if (eeprom->use_eerd == TRUE) {
3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3922 (hw->mac_type != e1000_82573)) 3922 (hw->mac_type != e1000_82573))
@@ -4423,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4423 return -E1000_ERR_EEPROM; 4423 return -E1000_ERR_EEPROM;
4424 } 4424 }
4425 4425
4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4429 } 4429 }
@@ -4431,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4431 /* Perform the flash update */ 4431 /* Perform the flash update */
4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4433 4433
4434 for (i=0; i < attempts; i++) { 4434 for (i=0; i < attempts; i++) {
4435 eecd = E1000_READ_REG(hw, EECD); 4435 eecd = E1000_READ_REG(hw, EECD);
4436 if ((eecd & E1000_EECD_FLUPD) == 0) { 4436 if ((eecd & E1000_EECD_FLUPD) == 0) {
4437 break; 4437 break;
@@ -4504,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4506 } 4506 }
4507
4507 switch (hw->mac_type) { 4508 switch (hw->mac_type) {
4508 default: 4509 default:
4509 break; 4510 break;
@@ -6840,7 +6841,8 @@ int32_t
6840e1000_check_phy_reset_block(struct e1000_hw *hw) 6841e1000_check_phy_reset_block(struct e1000_hw *hw)
6841{ 6842{
6842 uint32_t manc = 0; 6843 uint32_t manc = 0;
6843 if(hw->mac_type > e1000_82547_rev_2) 6844
6845 if (hw->mac_type > e1000_82547_rev_2)
6844 manc = E1000_READ_REG(hw, MANC); 6846 manc = E1000_READ_REG(hw, MANC);
6845 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 6847 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6846 E1000_BLK_PHY_RESET : E1000_SUCCESS; 6848 E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 0b8f6f2b774b..f1219dd9dbac 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 377void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
378 378
379/* Filters (multicast, vlan, receive) */ 379/* Filters (multicast, vlan, receive) */
380void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
380uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 381uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
381void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 382void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
382void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 383void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
401void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 402void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
402/* Port I/O is only supported on 82544 and newer */ 403/* Port I/O is only supported on 82544 and newer */
403uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); 404uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
405uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
404void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 406void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
407void e1000_enable_pciex_master(struct e1000_hw *hw);
405int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 408int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
406int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 409int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
407void e1000_release_software_semaphore(struct e1000_hw *hw); 410void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -899,14 +902,14 @@ struct e1000_ffvt_entry {
899#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
900#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
901#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
902#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
903#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
904#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
905#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
906#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
907#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
908#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
909#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
910#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
911#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
912#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1761,7 +1764,6 @@ struct e1000_hw {
1761#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1762#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1763 still to be processed. */ 1766 still to be processed. */
1764
1765/* Transmit Configuration Word */ 1767/* Transmit Configuration Word */
1766#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1768#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1767#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1769#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d0a5d1656c5f..31e332935e5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -29,11 +29,71 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 6.0.58 4/20/05 32 * 6.3.9 12/16/2005
33 * o Accepted ethtool cleanup patch from Stephen Hemminger 33 * o incorporate fix for recycled skbs from IBM LTC
34 * 6.0.44+ 2/15/05 34 * 6.3.7 11/18/2005
35 * o applied Anton's patch to resolve tx hang in hardware 35 * o Honor eeprom setting for enabling/disabling Wake On Lan
36 * o Applied Andrew Mortons patch - e1000 stops working after resume 36 * 6.3.5 11/17/2005
37 * o Fix memory leak in rx ring handling for PCI Express adapters
38 * 6.3.4 11/8/05
39 * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
40 * 6.3.2 9/20/05
41 * o Render logic that sets/resets DRV_LOAD as inline functions to
42 * avoid code replication. If f/w is AMT then set DRV_LOAD only when
43 * network interface is open.
44 * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
45 * o Adjust PBA partioning for Jumbo frames using MTU size and not
46 * rx_buffer_len
47 * 6.3.1 9/19/05
48 * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
49 (e1000_clean_tx_irq)
50 * o Support for 8086:10B5 device (Quad Port)
51 * 6.2.14 9/15/05
52 * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface
53 * open/close
54 * 6.2.13 9/14/05
55 * o Invoke e1000_check_mng_mode only for 8257x controllers since it
56 * accesses the FWSM that is not supported in other controllers
57 * 6.2.12 9/9/05
58 * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
59 * o set RCTL:SECRC only for controllers newer than 82543.
60 * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
61 * This code was moved from e1000_remove to e1000_close
62 * 6.2.10 9/6/05
63 * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
64 * o Enable fc by default on 82573 controllers (do not read eeprom)
65 * o Fix rx_errors statistic not to include missed_packet_count
66 * o Fix rx_dropped statistic not to include missed_packet_count
67 (Padraig Brady)
68 * 6.2.9 8/30/05
69 * o Remove call to update statistics from the controller ib e1000_get_stats
70 * 6.2.8 8/30/05
71 * o Improved algorithm for rx buffer allocation/rdt update
72 * o Flow control watermarks relative to rx PBA size
73 * o Simplified 'Tx Hung' detect logic
74 * 6.2.7 8/17/05
75 * o Report rx buffer allocation failures and tx timeout counts in stats
76 * 6.2.6 8/16/05
77 * o Implement workaround for controller erratum -- linear non-tso packet
78 * following a TSO gets written back prematurely
79 * 6.2.5 8/15/05
80 * o Set netdev->tx_queue_len based on link speed/duplex settings.
81 * o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
82 * o Do not power off PHY if SoL/IDER session is active
83 * 6.2.4 8/10/05
84 * o Fix loopback test setup/cleanup for 82571/3 controllers
85 * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
86 * all packets as raw
87 * o Prevent operations that will cause the PHY to be reset if SoL/IDER
88 * sessions are active and log a message
89 * 6.2.2 7/21/05
90 * o used fixed size descriptors for all MTU sizes, reduces memory load
91 * 6.1.2 4/13/05
92 * o Fixed ethtool diagnostics
93 * o Enabled flow control to take default eeprom settings
94 * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
95 * calls, one from mii_ioctl and other from within update_stats while
96 * processing MIIREG ioctl.
37 */ 97 */
38 98
39char e1000_driver_name[] = "e1000"; 99char e1000_driver_name[] = "e1000";
@@ -295,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
295static inline void 355static inline void
296e1000_irq_enable(struct e1000_adapter *adapter) 356e1000_irq_enable(struct e1000_adapter *adapter)
297{ 357{
298 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { 358 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
299 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 359 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
300 E1000_WRITE_FLUSH(&adapter->hw); 360 E1000_WRITE_FLUSH(&adapter->hw);
301 } 361 }
@@ -307,17 +367,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
307 struct net_device *netdev = adapter->netdev; 367 struct net_device *netdev = adapter->netdev;
308 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 368 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
309 uint16_t old_vid = adapter->mng_vlan_id; 369 uint16_t old_vid = adapter->mng_vlan_id;
310 if(adapter->vlgrp) { 370 if (adapter->vlgrp) {
311 if(!adapter->vlgrp->vlan_devices[vid]) { 371 if (!adapter->vlgrp->vlan_devices[vid]) {
312 if(adapter->hw.mng_cookie.status & 372 if (adapter->hw.mng_cookie.status &
313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 373 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314 e1000_vlan_rx_add_vid(netdev, vid); 374 e1000_vlan_rx_add_vid(netdev, vid);
315 adapter->mng_vlan_id = vid; 375 adapter->mng_vlan_id = vid;
316 } else 376 } else
317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318 378
319 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 379 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
320 (vid != old_vid) && 380 (vid != old_vid) &&
321 !adapter->vlgrp->vlan_devices[old_vid]) 381 !adapter->vlgrp->vlan_devices[old_vid])
322 e1000_vlan_rx_kill_vid(netdev, old_vid); 382 e1000_vlan_rx_kill_vid(netdev, old_vid);
323 } 383 }
@@ -401,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
401 /* hardware has been reset, we need to reload some things */ 461 /* hardware has been reset, we need to reload some things */
402 462
403 /* Reset the PHY if it was previously powered down */ 463 /* Reset the PHY if it was previously powered down */
404 if(adapter->hw.media_type == e1000_media_type_copper) { 464 if (adapter->hw.media_type == e1000_media_type_copper) {
405 uint16_t mii_reg; 465 uint16_t mii_reg;
406 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 466 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
407 if(mii_reg & MII_CR_POWER_DOWN) 467 if (mii_reg & MII_CR_POWER_DOWN)
408 e1000_phy_reset(&adapter->hw); 468 e1000_phy_reset(&adapter->hw);
409 } 469 }
410 470
@@ -425,16 +485,16 @@ e1000_up(struct e1000_adapter *adapter)
425 } 485 }
426 486
427#ifdef CONFIG_PCI_MSI 487#ifdef CONFIG_PCI_MSI
428 if(adapter->hw.mac_type > e1000_82547_rev_2) { 488 if (adapter->hw.mac_type > e1000_82547_rev_2) {
429 adapter->have_msi = TRUE; 489 adapter->have_msi = TRUE;
430 if((err = pci_enable_msi(adapter->pdev))) { 490 if ((err = pci_enable_msi(adapter->pdev))) {
431 DPRINTK(PROBE, ERR, 491 DPRINTK(PROBE, ERR,
432 "Unable to allocate MSI interrupt Error: %d\n", err); 492 "Unable to allocate MSI interrupt Error: %d\n", err);
433 adapter->have_msi = FALSE; 493 adapter->have_msi = FALSE;
434 } 494 }
435 } 495 }
436#endif 496#endif
437 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 497 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
438 SA_SHIRQ | SA_SAMPLE_RANDOM, 498 SA_SHIRQ | SA_SAMPLE_RANDOM,
439 netdev->name, netdev))) { 499 netdev->name, netdev))) {
440 DPRINTK(PROBE, ERR, 500 DPRINTK(PROBE, ERR,
@@ -471,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
471#endif 531#endif
472 free_irq(adapter->pdev->irq, netdev); 532 free_irq(adapter->pdev->irq, netdev);
473#ifdef CONFIG_PCI_MSI 533#ifdef CONFIG_PCI_MSI
474 if(adapter->hw.mac_type > e1000_82547_rev_2 && 534 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
475 adapter->have_msi == TRUE) 535 adapter->have_msi == TRUE)
476 pci_disable_msi(adapter->pdev); 536 pci_disable_msi(adapter->pdev);
477#endif 537#endif
@@ -537,12 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
537 break; 597 break;
538 } 598 }
539 599
540 if((adapter->hw.mac_type != e1000_82573) && 600 if ((adapter->hw.mac_type != e1000_82573) &&
541 (adapter->netdev->mtu > E1000_RXBUFFER_8192)) 601 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
542 pba -= 8; /* allocate more FIFO for Tx */ 602 pba -= 8; /* allocate more FIFO for Tx */
543 603
544 604
545 if(adapter->hw.mac_type == e1000_82547) { 605 if (adapter->hw.mac_type == e1000_82547) {
546 adapter->tx_fifo_head = 0; 606 adapter->tx_fifo_head = 0;
547 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 607 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
548 adapter->tx_fifo_size = 608 adapter->tx_fifo_size =
@@ -565,9 +625,9 @@ e1000_reset(struct e1000_adapter *adapter)
565 625
566 /* Allow time for pending master requests to run */ 626 /* Allow time for pending master requests to run */
567 e1000_reset_hw(&adapter->hw); 627 e1000_reset_hw(&adapter->hw);
568 if(adapter->hw.mac_type >= e1000_82544) 628 if (adapter->hw.mac_type >= e1000_82544)
569 E1000_WRITE_REG(&adapter->hw, WUC, 0); 629 E1000_WRITE_REG(&adapter->hw, WUC, 0);
570 if(e1000_init_hw(&adapter->hw)) 630 if (e1000_init_hw(&adapter->hw))
571 DPRINTK(PROBE, ERR, "Hardware Error\n"); 631 DPRINTK(PROBE, ERR, "Hardware Error\n");
572 e1000_update_mng_vlan(adapter); 632 e1000_update_mng_vlan(adapter);
573 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -606,26 +666,26 @@ e1000_probe(struct pci_dev *pdev,
606 int i, err, pci_using_dac; 666 int i, err, pci_using_dac;
607 uint16_t eeprom_data; 667 uint16_t eeprom_data;
608 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 668 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
609 if((err = pci_enable_device(pdev))) 669 if ((err = pci_enable_device(pdev)))
610 return err; 670 return err;
611 671
612 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 672 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
613 pci_using_dac = 1; 673 pci_using_dac = 1;
614 } else { 674 } else {
615 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 675 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
616 E1000_ERR("No usable DMA configuration, aborting\n"); 676 E1000_ERR("No usable DMA configuration, aborting\n");
617 return err; 677 return err;
618 } 678 }
619 pci_using_dac = 0; 679 pci_using_dac = 0;
620 } 680 }
621 681
622 if((err = pci_request_regions(pdev, e1000_driver_name))) 682 if ((err = pci_request_regions(pdev, e1000_driver_name)))
623 return err; 683 return err;
624 684
625 pci_set_master(pdev); 685 pci_set_master(pdev);
626 686
627 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 687 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
628 if(!netdev) { 688 if (!netdev) {
629 err = -ENOMEM; 689 err = -ENOMEM;
630 goto err_alloc_etherdev; 690 goto err_alloc_etherdev;
631 } 691 }
@@ -644,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
644 mmio_len = pci_resource_len(pdev, BAR_0); 704 mmio_len = pci_resource_len(pdev, BAR_0);
645 705
646 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 706 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
647 if(!adapter->hw.hw_addr) { 707 if (!adapter->hw.hw_addr) {
648 err = -EIO; 708 err = -EIO;
649 goto err_ioremap; 709 goto err_ioremap;
650 } 710 }
651 711
652 for(i = BAR_1; i <= BAR_5; i++) { 712 for (i = BAR_1; i <= BAR_5; i++) {
653 if(pci_resource_len(pdev, i) == 0) 713 if (pci_resource_len(pdev, i) == 0)
654 continue; 714 continue;
655 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 715 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
656 adapter->hw.io_base = pci_resource_start(pdev, i); 716 adapter->hw.io_base = pci_resource_start(pdev, i);
657 break; 717 break;
658 } 718 }
@@ -689,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
689 749
690 /* setup the private structure */ 750 /* setup the private structure */
691 751
692 if((err = e1000_sw_init(adapter))) 752 if ((err = e1000_sw_init(adapter)))
693 goto err_sw_init; 753 goto err_sw_init;
694 754
695 if((err = e1000_check_phy_reset_block(&adapter->hw))) 755 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
696 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 756 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
697 757
698 if(adapter->hw.mac_type >= e1000_82543) { 758 if (adapter->hw.mac_type >= e1000_82543) {
699 netdev->features = NETIF_F_SG | 759 netdev->features = NETIF_F_SG |
700 NETIF_F_HW_CSUM | 760 NETIF_F_HW_CSUM |
701 NETIF_F_HW_VLAN_TX | 761 NETIF_F_HW_VLAN_TX |
@@ -704,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
704 } 764 }
705 765
706#ifdef NETIF_F_TSO 766#ifdef NETIF_F_TSO
707 if((adapter->hw.mac_type >= e1000_82544) && 767 if ((adapter->hw.mac_type >= e1000_82544) &&
708 (adapter->hw.mac_type != e1000_82547)) 768 (adapter->hw.mac_type != e1000_82547))
709 netdev->features |= NETIF_F_TSO; 769 netdev->features |= NETIF_F_TSO;
710 770
711#ifdef NETIF_F_TSO_IPV6 771#ifdef NETIF_F_TSO_IPV6
712 if(adapter->hw.mac_type > e1000_82547_rev_2) 772 if (adapter->hw.mac_type > e1000_82547_rev_2)
713 netdev->features |= NETIF_F_TSO_IPV6; 773 netdev->features |= NETIF_F_TSO_IPV6;
714#endif 774#endif
715#endif 775#endif
716 if(pci_using_dac) 776 if (pci_using_dac)
717 netdev->features |= NETIF_F_HIGHDMA; 777 netdev->features |= NETIF_F_HIGHDMA;
718 778
719 /* hard_start_xmit is safe against parallel locking */ 779 /* hard_start_xmit is safe against parallel locking */
@@ -721,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
721 781
722 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 782 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
723 783
724 /* before reading the EEPROM, reset the controller to 784 /* before reading the EEPROM, reset the controller to
725 * put the device in a known good starting state */ 785 * put the device in a known good starting state */
726 786
727 e1000_reset_hw(&adapter->hw); 787 e1000_reset_hw(&adapter->hw);
728 788
729 /* make sure the EEPROM is good */ 789 /* make sure the EEPROM is good */
730 790
731 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 791 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
732 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 792 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
733 err = -EIO; 793 err = -EIO;
734 goto err_eeprom; 794 goto err_eeprom;
@@ -736,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
736 796
737 /* copy the MAC address out of the EEPROM */ 797 /* copy the MAC address out of the EEPROM */
738 798
739 if(e1000_read_mac_addr(&adapter->hw)) 799 if (e1000_read_mac_addr(&adapter->hw))
740 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 800 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
741 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 801 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
742 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 802 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
743 803
744 if(!is_valid_ether_addr(netdev->perm_addr)) { 804 if (!is_valid_ether_addr(netdev->perm_addr)) {
745 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 805 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
746 err = -EIO; 806 err = -EIO;
747 goto err_eeprom; 807 goto err_eeprom;
@@ -781,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
781 * enable the ACPI Magic Packet filter 841 * enable the ACPI Magic Packet filter
782 */ 842 */
783 843
784 switch(adapter->hw.mac_type) { 844 switch (adapter->hw.mac_type) {
785 case e1000_82542_rev2_0: 845 case e1000_82542_rev2_0:
786 case e1000_82542_rev2_1: 846 case e1000_82542_rev2_1:
787 case e1000_82543: 847 case e1000_82543:
@@ -794,7 +854,7 @@ e1000_probe(struct pci_dev *pdev,
794 case e1000_82546: 854 case e1000_82546:
795 case e1000_82546_rev_3: 855 case e1000_82546_rev_3:
796 case e1000_82571: 856 case e1000_82571:
797 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ 857 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
798 e1000_read_eeprom(&adapter->hw, 858 e1000_read_eeprom(&adapter->hw,
799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 859 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
800 break; 860 break;
@@ -805,7 +865,7 @@ e1000_probe(struct pci_dev *pdev,
805 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 865 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
806 break; 866 break;
807 } 867 }
808 if(eeprom_data & eeprom_apme_mask) 868 if (eeprom_data & eeprom_apme_mask)
809 adapter->wol |= E1000_WUFC_MAG; 869 adapter->wol |= E1000_WUFC_MAG;
810 870
811 /* print bus type/speed/width info */ 871 /* print bus type/speed/width info */
@@ -840,7 +900,7 @@ e1000_probe(struct pci_dev *pdev,
840 e1000_get_hw_control(adapter); 900 e1000_get_hw_control(adapter);
841 901
842 strcpy(netdev->name, "eth%d"); 902 strcpy(netdev->name, "eth%d");
843 if((err = register_netdev(netdev))) 903 if ((err = register_netdev(netdev)))
844 goto err_register; 904 goto err_register;
845 905
846 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 906 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -881,10 +941,10 @@ e1000_remove(struct pci_dev *pdev)
881 941
882 flush_scheduled_work(); 942 flush_scheduled_work();
883 943
884 if(adapter->hw.mac_type >= e1000_82540 && 944 if (adapter->hw.mac_type >= e1000_82540 &&
885 adapter->hw.media_type == e1000_media_type_copper) { 945 adapter->hw.media_type == e1000_media_type_copper) {
886 manc = E1000_READ_REG(&adapter->hw, MANC); 946 manc = E1000_READ_REG(&adapter->hw, MANC);
887 if(manc & E1000_MANC_SMBUS_EN) { 947 if (manc & E1000_MANC_SMBUS_EN) {
888 manc |= E1000_MANC_ARP_EN; 948 manc |= E1000_MANC_ARP_EN;
889 E1000_WRITE_REG(&adapter->hw, MANC, manc); 949 E1000_WRITE_REG(&adapter->hw, MANC, manc);
890 } 950 }
@@ -900,7 +960,7 @@ e1000_remove(struct pci_dev *pdev)
900 __dev_put(&adapter->polling_netdev[i]); 960 __dev_put(&adapter->polling_netdev[i]);
901#endif 961#endif
902 962
903 if(!e1000_check_phy_reset_block(&adapter->hw)) 963 if (!e1000_check_phy_reset_block(&adapter->hw))
904 e1000_phy_hw_reset(&adapter->hw); 964 e1000_phy_hw_reset(&adapter->hw);
905 965
906 kfree(adapter->tx_ring); 966 kfree(adapter->tx_ring);
@@ -959,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
959 1019
960 /* identify the MAC */ 1020 /* identify the MAC */
961 1021
962 if(e1000_set_mac_type(hw)) { 1022 if (e1000_set_mac_type(hw)) {
963 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1023 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
964 return -EIO; 1024 return -EIO;
965 } 1025 }
966 1026
967 /* initialize eeprom parameters */ 1027 /* initialize eeprom parameters */
968 1028
969 if(e1000_init_eeprom_params(hw)) { 1029 if (e1000_init_eeprom_params(hw)) {
970 E1000_ERR("EEPROM initialization failed\n"); 1030 E1000_ERR("EEPROM initialization failed\n");
971 return -EIO; 1031 return -EIO;
972 } 1032 }
973 1033
974 switch(hw->mac_type) { 1034 switch (hw->mac_type) {
975 default: 1035 default:
976 break; 1036 break;
977 case e1000_82541: 1037 case e1000_82541:
@@ -990,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
990 1050
991 /* Copper options */ 1051 /* Copper options */
992 1052
993 if(hw->media_type == e1000_media_type_copper) { 1053 if (hw->media_type == e1000_media_type_copper) {
994 hw->mdix = AUTO_ALL_MODES; 1054 hw->mdix = AUTO_ALL_MODES;
995 hw->disable_polarity_correction = FALSE; 1055 hw->disable_polarity_correction = FALSE;
996 hw->master_slave = E1000_MASTER_SLAVE; 1056 hw->master_slave = E1000_MASTER_SLAVE;
@@ -1166,10 +1226,10 @@ e1000_open(struct net_device *netdev)
1166 if ((err = e1000_setup_all_rx_resources(adapter))) 1226 if ((err = e1000_setup_all_rx_resources(adapter)))
1167 goto err_setup_rx; 1227 goto err_setup_rx;
1168 1228
1169 if((err = e1000_up(adapter))) 1229 if ((err = e1000_up(adapter)))
1170 goto err_up; 1230 goto err_up;
1171 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1231 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1172 if((adapter->hw.mng_cookie.status & 1232 if ((adapter->hw.mng_cookie.status &
1173 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1233 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1174 e1000_update_mng_vlan(adapter); 1234 e1000_update_mng_vlan(adapter);
1175 } 1235 }
@@ -1214,7 +1274,7 @@ e1000_close(struct net_device *netdev)
1214 e1000_free_all_tx_resources(adapter); 1274 e1000_free_all_tx_resources(adapter);
1215 e1000_free_all_rx_resources(adapter); 1275 e1000_free_all_rx_resources(adapter);
1216 1276
1217 if((adapter->hw.mng_cookie.status & 1277 if ((adapter->hw.mng_cookie.status &
1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1278 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1279 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1220 } 1280 }
@@ -1269,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1269 size = sizeof(struct e1000_buffer) * txdr->count; 1329 size = sizeof(struct e1000_buffer) * txdr->count;
1270 1330
1271 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1331 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1272 if(!txdr->buffer_info) { 1332 if (!txdr->buffer_info) {
1273 DPRINTK(PROBE, ERR, 1333 DPRINTK(PROBE, ERR,
1274 "Unable to allocate memory for the transmit descriptor ring\n"); 1334 "Unable to allocate memory for the transmit descriptor ring\n");
1275 return -ENOMEM; 1335 return -ENOMEM;
@@ -1282,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1282 E1000_ROUNDUP(txdr->size, 4096); 1342 E1000_ROUNDUP(txdr->size, 4096);
1283 1343
1284 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1344 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1285 if(!txdr->desc) { 1345 if (!txdr->desc) {
1286setup_tx_desc_die: 1346setup_tx_desc_die:
1287 vfree(txdr->buffer_info); 1347 vfree(txdr->buffer_info);
1288 DPRINTK(PROBE, ERR, 1348 DPRINTK(PROBE, ERR,
@@ -1298,8 +1358,8 @@ setup_tx_desc_die:
1298 "at %p\n", txdr->size, txdr->desc); 1358 "at %p\n", txdr->size, txdr->desc);
1299 /* Try again, without freeing the previous */ 1359 /* Try again, without freeing the previous */
1300 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1360 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1301 if(!txdr->desc) {
1302 /* Failed allocation, critical failure */ 1361 /* Failed allocation, critical failure */
1362 if (!txdr->desc) {
1303 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1363 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1304 goto setup_tx_desc_die; 1364 goto setup_tx_desc_die;
1305 } 1365 }
@@ -1499,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1499 1559
1500 size = sizeof(struct e1000_ps_page) * rxdr->count; 1560 size = sizeof(struct e1000_ps_page) * rxdr->count;
1501 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1561 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1502 if(!rxdr->ps_page) { 1562 if (!rxdr->ps_page) {
1503 vfree(rxdr->buffer_info); 1563 vfree(rxdr->buffer_info);
1504 DPRINTK(PROBE, ERR, 1564 DPRINTK(PROBE, ERR,
1505 "Unable to allocate memory for the receive descriptor ring\n"); 1565 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1509,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1509 1569
1510 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1570 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1511 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1571 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1512 if(!rxdr->ps_page_dma) { 1572 if (!rxdr->ps_page_dma) {
1513 vfree(rxdr->buffer_info); 1573 vfree(rxdr->buffer_info);
1514 kfree(rxdr->ps_page); 1574 kfree(rxdr->ps_page);
1515 DPRINTK(PROBE, ERR, 1575 DPRINTK(PROBE, ERR,
@@ -1518,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1518 } 1578 }
1519 memset(rxdr->ps_page_dma, 0, size); 1579 memset(rxdr->ps_page_dma, 0, size);
1520 1580
1521 if(adapter->hw.mac_type <= e1000_82547_rev_2) 1581 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1522 desc_len = sizeof(struct e1000_rx_desc); 1582 desc_len = sizeof(struct e1000_rx_desc);
1523 else 1583 else
1524 desc_len = sizeof(union e1000_rx_desc_packet_split); 1584 desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1621,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1621{ 1681{
1622 uint32_t rctl, rfctl; 1682 uint32_t rctl, rfctl;
1623 uint32_t psrctl = 0; 1683 uint32_t psrctl = 0;
1624#ifdef CONFIG_E1000_PACKET_SPLIT 1684#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1625 uint32_t pages = 0; 1685 uint32_t pages = 0;
1626#endif 1686#endif
1627 1687
@@ -1647,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1647 rctl |= E1000_RCTL_LPE; 1707 rctl |= E1000_RCTL_LPE;
1648 1708
1649 /* Setup buffer sizes */ 1709 /* Setup buffer sizes */
1650 if(adapter->hw.mac_type >= e1000_82571) { 1710 if (adapter->hw.mac_type >= e1000_82571) {
1651 /* We can now specify buffers in 1K increments. 1711 /* We can now specify buffers in 1K increments.
1652 * BSIZE and BSEX are ignored in this case. */ 1712 * BSIZE and BSEX are ignored in this case. */
1653 rctl |= adapter->rx_buffer_len << 0x11; 1713 rctl |= adapter->rx_buffer_len << 0x11;
1654 } else { 1714 } else {
1655 rctl &= ~E1000_RCTL_SZ_4096; 1715 rctl &= ~E1000_RCTL_SZ_4096;
1656 rctl |= E1000_RCTL_BSEX; 1716 rctl &= ~E1000_RCTL_BSEX;
1657 switch (adapter->rx_buffer_len) { 1717 rctl |= E1000_RCTL_SZ_2048;
1658 case E1000_RXBUFFER_2048:
1659 default:
1660 rctl |= E1000_RCTL_SZ_2048;
1661 rctl &= ~E1000_RCTL_BSEX;
1662 break;
1663 case E1000_RXBUFFER_4096:
1664 rctl |= E1000_RCTL_SZ_4096;
1665 break;
1666 case E1000_RXBUFFER_8192:
1667 rctl |= E1000_RCTL_SZ_8192;
1668 break;
1669 case E1000_RXBUFFER_16384:
1670 rctl |= E1000_RCTL_SZ_16384;
1671 break;
1672 }
1673 } 1718 }
1674 1719
1675#ifdef CONFIG_E1000_PACKET_SPLIT 1720#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1676 /* 82571 and greater support packet-split where the protocol 1721 /* 82571 and greater support packet-split where the protocol
1677 * header is placed in skb->data and the packet data is 1722 * header is placed in skb->data and the packet data is
1678 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1723 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1696,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1696 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1741 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1697 1742
1698 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1743 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1699 1744
1700 psrctl |= adapter->rx_ps_bsize0 >> 1745 psrctl |= adapter->rx_ps_bsize0 >>
1701 E1000_PSRCTL_BSIZE0_SHIFT; 1746 E1000_PSRCTL_BSIZE0_SHIFT;
1702 1747
@@ -1758,7 +1803,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1758 1803
1759 if (hw->mac_type >= e1000_82540) { 1804 if (hw->mac_type >= e1000_82540) {
1760 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1805 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1761 if(adapter->itr > 1) 1806 if (adapter->itr > 1)
1762 E1000_WRITE_REG(hw, ITR, 1807 E1000_WRITE_REG(hw, ITR,
1763 1000000000 / (adapter->itr * 256)); 1808 1000000000 / (adapter->itr * 256));
1764 } 1809 }
@@ -1847,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1847 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1892 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1848 if (hw->mac_type >= e1000_82543) { 1893 if (hw->mac_type >= e1000_82543) {
1849 rxcsum = E1000_READ_REG(hw, RXCSUM); 1894 rxcsum = E1000_READ_REG(hw, RXCSUM);
1850 if(adapter->rx_csum == TRUE) { 1895 if (adapter->rx_csum == TRUE) {
1851 rxcsum |= E1000_RXCSUM_TUOFL; 1896 rxcsum |= E1000_RXCSUM_TUOFL;
1852 1897
1853 /* Enable 82571 IPv4 payload checksum for UDP fragments 1898 /* Enable 82571 IPv4 payload checksum for UDP fragments
1854 * Must be used in conjunction with packet-split. */ 1899 * Must be used in conjunction with packet-split. */
1855 if ((hw->mac_type >= e1000_82571) && 1900 if ((hw->mac_type >= e1000_82571) &&
1856 (adapter->rx_ps_pages)) { 1901 (adapter->rx_ps_pages)) {
1857 rxcsum |= E1000_RXCSUM_IPPCSE; 1902 rxcsum |= E1000_RXCSUM_IPPCSE;
1858 } 1903 }
1859 } else { 1904 } else {
@@ -1915,7 +1960,7 @@ static inline void
1915e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1960e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1916 struct e1000_buffer *buffer_info) 1961 struct e1000_buffer *buffer_info)
1917{ 1962{
1918 if(buffer_info->dma) { 1963 if (buffer_info->dma) {
1919 pci_unmap_page(adapter->pdev, 1964 pci_unmap_page(adapter->pdev,
1920 buffer_info->dma, 1965 buffer_info->dma,
1921 buffer_info->length, 1966 buffer_info->length,
@@ -1942,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1942 1987
1943 /* Free all the Tx ring sk_buffs */ 1988 /* Free all the Tx ring sk_buffs */
1944 1989
1945 for(i = 0; i < tx_ring->count; i++) { 1990 for (i = 0; i < tx_ring->count; i++) {
1946 buffer_info = &tx_ring->buffer_info[i]; 1991 buffer_info = &tx_ring->buffer_info[i];
1947 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1992 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1948 } 1993 }
@@ -2038,10 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2038 unsigned int i, j; 2083 unsigned int i, j;
2039 2084
2040 /* Free all the Rx ring sk_buffs */ 2085 /* Free all the Rx ring sk_buffs */
2041 2086 for (i = 0; i < rx_ring->count; i++) {
2042 for(i = 0; i < rx_ring->count; i++) {
2043 buffer_info = &rx_ring->buffer_info[i]; 2087 buffer_info = &rx_ring->buffer_info[i];
2044 if(buffer_info->skb) { 2088 if (buffer_info->skb) {
2045 pci_unmap_single(pdev, 2089 pci_unmap_single(pdev,
2046 buffer_info->dma, 2090 buffer_info->dma,
2047 buffer_info->length, 2091 buffer_info->length,
@@ -2122,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
2122 E1000_WRITE_FLUSH(&adapter->hw); 2166 E1000_WRITE_FLUSH(&adapter->hw);
2123 mdelay(5); 2167 mdelay(5);
2124 2168
2125 if(netif_running(netdev)) 2169 if (netif_running(netdev))
2126 e1000_clean_all_rx_rings(adapter); 2170 e1000_clean_all_rx_rings(adapter);
2127} 2171}
2128 2172
@@ -2138,13 +2182,13 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2138 E1000_WRITE_FLUSH(&adapter->hw); 2182 E1000_WRITE_FLUSH(&adapter->hw);
2139 mdelay(5); 2183 mdelay(5);
2140 2184
2141 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2185 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2142 e1000_pci_set_mwi(&adapter->hw); 2186 e1000_pci_set_mwi(&adapter->hw);
2143 2187
2144 if(netif_running(netdev)) { 2188 if (netif_running(netdev)) {
2145 e1000_configure_rx(adapter);
2146 /* No need to loop, because 82542 supports only 1 queue */ 2189 /* No need to loop, because 82542 supports only 1 queue */
2147 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2190 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2191 e1000_configure_rx(adapter);
2148 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2192 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2149 } 2193 }
2150} 2194}
@@ -2163,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
2163 struct e1000_adapter *adapter = netdev_priv(netdev); 2207 struct e1000_adapter *adapter = netdev_priv(netdev);
2164 struct sockaddr *addr = p; 2208 struct sockaddr *addr = p;
2165 2209
2166 if(!is_valid_ether_addr(addr->sa_data)) 2210 if (!is_valid_ether_addr(addr->sa_data))
2167 return -EADDRNOTAVAIL; 2211 return -EADDRNOTAVAIL;
2168 2212
2169 /* 82542 2.0 needs to be in reset to write receive address registers */ 2213 /* 82542 2.0 needs to be in reset to write receive address registers */
2170 2214
2171 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2215 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2172 e1000_enter_82542_rst(adapter); 2216 e1000_enter_82542_rst(adapter);
2173 2217
2174 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2182,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
2182 /* activate the work around */ 2226 /* activate the work around */
2183 adapter->hw.laa_is_present = 1; 2227 adapter->hw.laa_is_present = 1;
2184 2228
2185 /* Hold a copy of the LAA in RAR[14] This is done so that 2229 /* Hold a copy of the LAA in RAR[14] This is done so that
2186 * between the time RAR[0] gets clobbered and the time it 2230 * between the time RAR[0] gets clobbered and the time it
2187 * gets fixed (in e1000_watchdog), the actual LAA is in one 2231 * gets fixed (in e1000_watchdog), the actual LAA is in one
2188 * of the RARs and no incoming packets directed to this port 2232 * of the RARs and no incoming packets directed to this port
2189 * are dropped. Eventaully the LAA will be in RAR[0] and 2233 * are dropped. Eventaully the LAA will be in RAR[0] and
2190 * RAR[14] */ 2234 * RAR[14] */
2191 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2235 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2192 E1000_RAR_ENTRIES - 1); 2236 E1000_RAR_ENTRIES - 1);
2193 } 2237 }
2194 2238
2195 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2239 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2196 e1000_leave_82542_rst(adapter); 2240 e1000_leave_82542_rst(adapter);
2197 2241
2198 return 0; 2242 return 0;
@@ -2226,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
2226 2270
2227 rctl = E1000_READ_REG(hw, RCTL); 2271 rctl = E1000_READ_REG(hw, RCTL);
2228 2272
2229 if(netdev->flags & IFF_PROMISC) { 2273 if (netdev->flags & IFF_PROMISC) {
2230 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2274 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2231 } else if(netdev->flags & IFF_ALLMULTI) { 2275 } else if (netdev->flags & IFF_ALLMULTI) {
2232 rctl |= E1000_RCTL_MPE; 2276 rctl |= E1000_RCTL_MPE;
2233 rctl &= ~E1000_RCTL_UPE; 2277 rctl &= ~E1000_RCTL_UPE;
2234 } else { 2278 } else {
@@ -2239,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
2239 2283
2240 /* 82542 2.0 needs to be in reset to write receive address registers */ 2284 /* 82542 2.0 needs to be in reset to write receive address registers */
2241 2285
2242 if(hw->mac_type == e1000_82542_rev2_0) 2286 if (hw->mac_type == e1000_82542_rev2_0)
2243 e1000_enter_82542_rst(adapter); 2287 e1000_enter_82542_rst(adapter);
2244 2288
2245 /* load the first 14 multicast address into the exact filters 1-14 2289 /* load the first 14 multicast address into the exact filters 1-14
@@ -2249,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
2249 */ 2293 */
2250 mc_ptr = netdev->mc_list; 2294 mc_ptr = netdev->mc_list;
2251 2295
2252 for(i = 1; i < rar_entries; i++) { 2296 for (i = 1; i < rar_entries; i++) {
2253 if (mc_ptr) { 2297 if (mc_ptr) {
2254 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2298 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2255 mc_ptr = mc_ptr->next; 2299 mc_ptr = mc_ptr->next;
@@ -2261,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
2261 2305
2262 /* clear the old settings from the multicast hash table */ 2306 /* clear the old settings from the multicast hash table */
2263 2307
2264 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2308 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
2265 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2309 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2266 2310
2267 /* load any remaining addresses into the hash table */ 2311 /* load any remaining addresses into the hash table */
2268 2312
2269 for(; mc_ptr; mc_ptr = mc_ptr->next) { 2313 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2270 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2314 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2271 e1000_mta_set(hw, hash_value); 2315 e1000_mta_set(hw, hash_value);
2272 } 2316 }
2273 2317
2274 if(hw->mac_type == e1000_82542_rev2_0) 2318 if (hw->mac_type == e1000_82542_rev2_0)
2275 e1000_leave_82542_rst(adapter); 2319 e1000_leave_82542_rst(adapter);
2276} 2320}
2277 2321
@@ -2297,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2297 struct net_device *netdev = adapter->netdev; 2341 struct net_device *netdev = adapter->netdev;
2298 uint32_t tctl; 2342 uint32_t tctl;
2299 2343
2300 if(atomic_read(&adapter->tx_fifo_stall)) { 2344 if (atomic_read(&adapter->tx_fifo_stall)) {
2301 if((E1000_READ_REG(&adapter->hw, TDT) == 2345 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2302 E1000_READ_REG(&adapter->hw, TDH)) && 2346 E1000_READ_REG(&adapter->hw, TDH)) &&
2303 (E1000_READ_REG(&adapter->hw, TDFT) == 2347 (E1000_READ_REG(&adapter->hw, TDFT) ==
2304 E1000_READ_REG(&adapter->hw, TDFH)) && 2348 E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2350,18 +2394,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2350 e1000_check_for_link(&adapter->hw); 2394 e1000_check_for_link(&adapter->hw);
2351 if (adapter->hw.mac_type == e1000_82573) { 2395 if (adapter->hw.mac_type == e1000_82573) {
2352 e1000_enable_tx_pkt_filtering(&adapter->hw); 2396 e1000_enable_tx_pkt_filtering(&adapter->hw);
2353 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2397 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2354 e1000_update_mng_vlan(adapter); 2398 e1000_update_mng_vlan(adapter);
2355 } 2399 }
2356 2400
2357 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2401 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2358 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2402 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2359 link = !adapter->hw.serdes_link_down; 2403 link = !adapter->hw.serdes_link_down;
2360 else 2404 else
2361 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2405 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2362 2406
2363 if(link) { 2407 if (link) {
2364 if(!netif_carrier_ok(netdev)) { 2408 if (!netif_carrier_ok(netdev)) {
2365 e1000_get_speed_and_duplex(&adapter->hw, 2409 e1000_get_speed_and_duplex(&adapter->hw,
2366 &adapter->link_speed, 2410 &adapter->link_speed,
2367 &adapter->link_duplex); 2411 &adapter->link_duplex);
@@ -2392,7 +2436,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2392 adapter->smartspeed = 0; 2436 adapter->smartspeed = 0;
2393 } 2437 }
2394 } else { 2438 } else {
2395 if(netif_carrier_ok(netdev)) { 2439 if (netif_carrier_ok(netdev)) {
2396 adapter->link_speed = 0; 2440 adapter->link_speed = 0;
2397 adapter->link_duplex = 0; 2441 adapter->link_duplex = 0;
2398 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2442 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2432,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2432 } 2476 }
2433 2477
2434 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ 2478 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2435 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { 2479 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2436 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total 2480 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2437 * asymmetrical Tx or Rx gets ITR=8000; everyone 2481 * asymmetrical Tx or Rx gets ITR=8000; everyone
2438 * else is between 2000-8000. */ 2482 * else is between 2000-8000. */
2439 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; 2483 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2440 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 2484 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2441 adapter->gotcl - adapter->gorcl : 2485 adapter->gotcl - adapter->gorcl :
2442 adapter->gorcl - adapter->gotcl) / 10000; 2486 adapter->gorcl - adapter->gotcl) / 10000;
2443 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2487 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2450,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2450 /* Force detection of hung controller every watchdog period */ 2494 /* Force detection of hung controller every watchdog period */
2451 adapter->detect_tx_hung = TRUE; 2495 adapter->detect_tx_hung = TRUE;
2452 2496
2453 /* With 82571 controllers, LAA may be overwritten due to controller 2497 /* With 82571 controllers, LAA may be overwritten due to controller
2454 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2498 * reset from the other port. Set the appropriate LAA in RAR[0] */
2455 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2499 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2456 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2500 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2479,7 +2523,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2479 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2523 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2480 int err; 2524 int err;
2481 2525
2482 if(skb_shinfo(skb)->tso_size) { 2526 if (skb_shinfo(skb)->tso_size) {
2483 if (skb_header_cloned(skb)) { 2527 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2528 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485 if (err) 2529 if (err)
@@ -2488,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2488 2532
2489 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2533 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2490 mss = skb_shinfo(skb)->tso_size; 2534 mss = skb_shinfo(skb)->tso_size;
2491 if(skb->protocol == ntohs(ETH_P_IP)) { 2535 if (skb->protocol == ntohs(ETH_P_IP)) {
2492 skb->nh.iph->tot_len = 0; 2536 skb->nh.iph->tot_len = 0;
2493 skb->nh.iph->check = 0; 2537 skb->nh.iph->check = 0;
2494 skb->h.th->check = 2538 skb->h.th->check =
@@ -2500,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2500 cmd_length = E1000_TXD_CMD_IP; 2544 cmd_length = E1000_TXD_CMD_IP;
2501 ipcse = skb->h.raw - skb->data - 1; 2545 ipcse = skb->h.raw - skb->data - 1;
2502#ifdef NETIF_F_TSO_IPV6 2546#ifdef NETIF_F_TSO_IPV6
2503 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 2547 } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
2504 skb->nh.ipv6h->payload_len = 0; 2548 skb->nh.ipv6h->payload_len = 0;
2505 skb->h.th->check = 2549 skb->h.th->check =
2506 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2550 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2555,7 +2599,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2555 unsigned int i; 2599 unsigned int i;
2556 uint8_t css; 2600 uint8_t css;
2557 2601
2558 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2602 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2559 css = skb->h.raw - skb->data; 2603 css = skb->h.raw - skb->data;
2560 2604
2561 i = tx_ring->next_to_use; 2605 i = tx_ring->next_to_use;
@@ -2595,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2595 2639
2596 i = tx_ring->next_to_use; 2640 i = tx_ring->next_to_use;
2597 2641
2598 while(len) { 2642 while (len) {
2599 buffer_info = &tx_ring->buffer_info[i]; 2643 buffer_info = &tx_ring->buffer_info[i];
2600 size = min(len, max_per_txd); 2644 size = min(len, max_per_txd);
2601#ifdef NETIF_F_TSO 2645#ifdef NETIF_F_TSO
@@ -2611,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2611 2655
2612 /* Workaround for premature desc write-backs 2656 /* Workaround for premature desc write-backs
2613 * in TSO mode. Append 4-byte sentinel desc */ 2657 * in TSO mode. Append 4-byte sentinel desc */
2614 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2658 if (unlikely(mss && !nr_frags && size == len && size > 8))
2615 size -= 4; 2659 size -= 4;
2616#endif 2660#endif
2617 /* work-around for errata 10 and it applies 2661 /* work-around for errata 10 and it applies
@@ -2619,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2619 * The fix is to make sure that the first descriptor of a 2663 * The fix is to make sure that the first descriptor of a
2620 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2664 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2621 */ 2665 */
2622 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2666 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2623 (size > 2015) && count == 0)) 2667 (size > 2015) && count == 0))
2624 size = 2015; 2668 size = 2015;
2625 2669
2626 /* Workaround for potential 82544 hang in PCI-X. Avoid 2670 /* Workaround for potential 82544 hang in PCI-X. Avoid
2627 * terminating buffers within evenly-aligned dwords. */ 2671 * terminating buffers within evenly-aligned dwords. */
2628 if(unlikely(adapter->pcix_82544 && 2672 if (unlikely(adapter->pcix_82544 &&
2629 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2673 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2630 size > 4)) 2674 size > 4))
2631 size -= 4; 2675 size -= 4;
@@ -2641,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2641 len -= size; 2685 len -= size;
2642 offset += size; 2686 offset += size;
2643 count++; 2687 count++;
2644 if(unlikely(++i == tx_ring->count)) i = 0; 2688 if (unlikely(++i == tx_ring->count)) i = 0;
2645 } 2689 }
2646 2690
2647 for(f = 0; f < nr_frags; f++) { 2691 for (f = 0; f < nr_frags; f++) {
2648 struct skb_frag_struct *frag; 2692 struct skb_frag_struct *frag;
2649 2693
2650 frag = &skb_shinfo(skb)->frags[f]; 2694 frag = &skb_shinfo(skb)->frags[f];
2651 len = frag->size; 2695 len = frag->size;
2652 offset = frag->page_offset; 2696 offset = frag->page_offset;
2653 2697
2654 while(len) { 2698 while (len) {
2655 buffer_info = &tx_ring->buffer_info[i]; 2699 buffer_info = &tx_ring->buffer_info[i];
2656 size = min(len, max_per_txd); 2700 size = min(len, max_per_txd);
2657#ifdef NETIF_F_TSO 2701#ifdef NETIF_F_TSO
2658 /* Workaround for premature desc write-backs 2702 /* Workaround for premature desc write-backs
2659 * in TSO mode. Append 4-byte sentinel desc */ 2703 * in TSO mode. Append 4-byte sentinel desc */
2660 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2704 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2661 size -= 4; 2705 size -= 4;
2662#endif 2706#endif
2663 /* Workaround for potential 82544 hang in PCI-X. 2707 /* Workaround for potential 82544 hang in PCI-X.
2664 * Avoid terminating buffers within evenly-aligned 2708 * Avoid terminating buffers within evenly-aligned
2665 * dwords. */ 2709 * dwords. */
2666 if(unlikely(adapter->pcix_82544 && 2710 if (unlikely(adapter->pcix_82544 &&
2667 !((unsigned long)(frag->page+offset+size-1) & 4) && 2711 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2668 size > 4)) 2712 size > 4))
2669 size -= 4; 2713 size -= 4;
@@ -2680,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2680 len -= size; 2724 len -= size;
2681 offset += size; 2725 offset += size;
2682 count++; 2726 count++;
2683 if(unlikely(++i == tx_ring->count)) i = 0; 2727 if (unlikely(++i == tx_ring->count)) i = 0;
2684 } 2728 }
2685 } 2729 }
2686 2730
@@ -2700,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2700 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2744 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2701 unsigned int i; 2745 unsigned int i;
2702 2746
2703 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2747 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2704 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2748 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2705 E1000_TXD_CMD_TSE; 2749 E1000_TXD_CMD_TSE;
2706 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2750 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2707 2751
2708 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2752 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2709 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2753 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2710 } 2754 }
2711 2755
2712 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2756 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2713 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2757 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2714 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2758 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2715 } 2759 }
2716 2760
2717 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2761 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2718 txd_lower |= E1000_TXD_CMD_VLE; 2762 txd_lower |= E1000_TXD_CMD_VLE;
2719 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2763 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2720 } 2764 }
2721 2765
2722 i = tx_ring->next_to_use; 2766 i = tx_ring->next_to_use;
2723 2767
2724 while(count--) { 2768 while (count--) {
2725 buffer_info = &tx_ring->buffer_info[i]; 2769 buffer_info = &tx_ring->buffer_info[i];
2726 tx_desc = E1000_TX_DESC(*tx_ring, i); 2770 tx_desc = E1000_TX_DESC(*tx_ring, i);
2727 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2771 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2728 tx_desc->lower.data = 2772 tx_desc->lower.data =
2729 cpu_to_le32(txd_lower | buffer_info->length); 2773 cpu_to_le32(txd_lower | buffer_info->length);
2730 tx_desc->upper.data = cpu_to_le32(txd_upper); 2774 tx_desc->upper.data = cpu_to_le32(txd_upper);
2731 if(unlikely(++i == tx_ring->count)) i = 0; 2775 if (unlikely(++i == tx_ring->count)) i = 0;
2732 } 2776 }
2733 2777
2734 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2778 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2763,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2763 2807
2764 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 2808 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2765 2809
2766 if(adapter->link_duplex != HALF_DUPLEX) 2810 if (adapter->link_duplex != HALF_DUPLEX)
2767 goto no_fifo_stall_required; 2811 goto no_fifo_stall_required;
2768 2812
2769 if(atomic_read(&adapter->tx_fifo_stall)) 2813 if (atomic_read(&adapter->tx_fifo_stall))
2770 return 1; 2814 return 1;
2771 2815
2772 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2816 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2773 atomic_set(&adapter->tx_fifo_stall, 1); 2817 atomic_set(&adapter->tx_fifo_stall, 1);
2774 return 1; 2818 return 1;
2775 } 2819 }
2776 2820
2777no_fifo_stall_required: 2821no_fifo_stall_required:
2778 adapter->tx_fifo_head += skb_fifo_len; 2822 adapter->tx_fifo_head += skb_fifo_len;
2779 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) 2823 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2780 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2824 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2781 return 0; 2825 return 0;
2782} 2826}
@@ -2787,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2787{ 2831{
2788 struct e1000_hw *hw = &adapter->hw; 2832 struct e1000_hw *hw = &adapter->hw;
2789 uint16_t length, offset; 2833 uint16_t length, offset;
2790 if(vlan_tx_tag_present(skb)) { 2834 if (vlan_tx_tag_present(skb)) {
2791 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 2835 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2792 ( adapter->hw.mng_cookie.status & 2836 ( adapter->hw.mng_cookie.status &
2793 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2837 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2794 return 0; 2838 return 0;
2795 } 2839 }
2796 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2840 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2797 struct ethhdr *eth = (struct ethhdr *) skb->data; 2841 struct ethhdr *eth = (struct ethhdr *) skb->data;
2798 if((htons(ETH_P_IP) == eth->h_proto)) { 2842 if ((htons(ETH_P_IP) == eth->h_proto)) {
2799 const struct iphdr *ip = 2843 const struct iphdr *ip =
2800 (struct iphdr *)((uint8_t *)skb->data+14); 2844 (struct iphdr *)((uint8_t *)skb->data+14);
2801 if(IPPROTO_UDP == ip->protocol) { 2845 if (IPPROTO_UDP == ip->protocol) {
2802 struct udphdr *udp = 2846 struct udphdr *udp =
2803 (struct udphdr *)((uint8_t *)ip + 2847 (struct udphdr *)((uint8_t *)ip +
2804 (ip->ihl << 2)); 2848 (ip->ihl << 2));
2805 if(ntohs(udp->dest) == 67) { 2849 if (ntohs(udp->dest) == 67) {
2806 offset = (uint8_t *)udp + 8 - skb->data; 2850 offset = (uint8_t *)udp + 8 - skb->data;
2807 length = skb->len - offset; 2851 length = skb->len - offset;
2808 2852
2809 return e1000_mng_write_dhcp_info(hw, 2853 return e1000_mng_write_dhcp_info(hw,
2810 (uint8_t *)udp + 8, 2854 (uint8_t *)udp + 8,
2811 length); 2855 length);
2812 } 2856 }
2813 } 2857 }
@@ -2830,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2830 unsigned int nr_frags = 0; 2874 unsigned int nr_frags = 0;
2831 unsigned int mss = 0; 2875 unsigned int mss = 0;
2832 int count = 0; 2876 int count = 0;
2833 int tso; 2877 int tso;
2834 unsigned int f; 2878 unsigned int f;
2835 len -= skb->data_len; 2879 len -= skb->data_len;
2836 2880
@@ -2853,7 +2897,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2853 * 4 = ceil(buffer len/mss). To make sure we don't 2897 * 4 = ceil(buffer len/mss). To make sure we don't
2854 * overrun the FIFO, adjust the max buffer len if mss 2898 * overrun the FIFO, adjust the max buffer len if mss
2855 * drops. */ 2899 * drops. */
2856 if(mss) { 2900 if (mss) {
2857 uint8_t hdr_len; 2901 uint8_t hdr_len;
2858 max_per_txd = min(mss << 2, max_per_txd); 2902 max_per_txd = min(mss << 2, max_per_txd);
2859 max_txd_pwr = fls(max_per_txd) - 1; 2903 max_txd_pwr = fls(max_per_txd) - 1;
@@ -2876,12 +2920,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2876 } 2920 }
2877 } 2921 }
2878 2922
2879 if((mss) || (skb->ip_summed == CHECKSUM_HW))
2880 /* reserve a descriptor for the offload context */ 2923 /* reserve a descriptor for the offload context */
2924 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2881 count++; 2925 count++;
2882 count++; 2926 count++;
2883#else 2927#else
2884 if(skb->ip_summed == CHECKSUM_HW) 2928 if (skb->ip_summed == CHECKSUM_HW)
2885 count++; 2929 count++;
2886#endif 2930#endif
2887 2931
@@ -2894,24 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2894 2938
2895 count += TXD_USE_COUNT(len, max_txd_pwr); 2939 count += TXD_USE_COUNT(len, max_txd_pwr);
2896 2940
2897 if(adapter->pcix_82544) 2941 if (adapter->pcix_82544)
2898 count++; 2942 count++;
2899 2943
2900 /* work-around for errata 10 and it applies to all controllers 2944 /* work-around for errata 10 and it applies to all controllers
2901 * in PCI-X mode, so add one more descriptor to the count 2945 * in PCI-X mode, so add one more descriptor to the count
2902 */ 2946 */
2903 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2947 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2904 (len > 2015))) 2948 (len > 2015)))
2905 count++; 2949 count++;
2906 2950
2907 nr_frags = skb_shinfo(skb)->nr_frags; 2951 nr_frags = skb_shinfo(skb)->nr_frags;
2908 for(f = 0; f < nr_frags; f++) 2952 for (f = 0; f < nr_frags; f++)
2909 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2953 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2910 max_txd_pwr); 2954 max_txd_pwr);
2911 if(adapter->pcix_82544) 2955 if (adapter->pcix_82544)
2912 count += nr_frags; 2956 count += nr_frags;
2913 2957
2914 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2958 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2915 e1000_transfer_dhcp_info(adapter, skb); 2959 e1000_transfer_dhcp_info(adapter, skb);
2916 2960
2917 local_irq_save(flags); 2961 local_irq_save(flags);
@@ -2929,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2929 return NETDEV_TX_BUSY; 2973 return NETDEV_TX_BUSY;
2930 } 2974 }
2931 2975
2932 if(unlikely(adapter->hw.mac_type == e1000_82547)) { 2976 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2933 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2977 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2934 netif_stop_queue(netdev); 2978 netif_stop_queue(netdev);
2935 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2979 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2936 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2980 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2938,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2938 } 2982 }
2939 } 2983 }
2940 2984
2941 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2985 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2942 tx_flags |= E1000_TX_FLAGS_VLAN; 2986 tx_flags |= E1000_TX_FLAGS_VLAN;
2943 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2987 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2944 } 2988 }
2945 2989
2946 first = tx_ring->next_to_use; 2990 first = tx_ring->next_to_use;
2947 2991
2948 tso = e1000_tso(adapter, tx_ring, skb); 2992 tso = e1000_tso(adapter, tx_ring, skb);
2949 if (tso < 0) { 2993 if (tso < 0) {
2950 dev_kfree_skb_any(skb); 2994 dev_kfree_skb_any(skb);
@@ -3033,9 +3077,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3033 struct e1000_adapter *adapter = netdev_priv(netdev); 3077 struct e1000_adapter *adapter = netdev_priv(netdev);
3034 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3078 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3035 3079
3036 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3080 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3037 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3081 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3038 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3082 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3039 return -EINVAL; 3083 return -EINVAL;
3040 } 3084 }
3041 3085
@@ -3083,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3083 3127
3084 netdev->mtu = new_mtu; 3128 netdev->mtu = new_mtu;
3085 3129
3086 if(netif_running(netdev)) { 3130 if (netif_running(netdev)) {
3087 e1000_down(adapter); 3131 e1000_down(adapter);
3088 e1000_up(adapter); 3132 e1000_up(adapter);
3089 } 3133 }
@@ -3170,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3170 hw->collision_delta = E1000_READ_REG(hw, COLC); 3214 hw->collision_delta = E1000_READ_REG(hw, COLC);
3171 adapter->stats.colc += hw->collision_delta; 3215 adapter->stats.colc += hw->collision_delta;
3172 3216
3173 if(hw->mac_type >= e1000_82543) { 3217 if (hw->mac_type >= e1000_82543) {
3174 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3218 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3175 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3219 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3176 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3220 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3178,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3178 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3222 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3179 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3223 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3180 } 3224 }
3181 if(hw->mac_type > e1000_82547_rev_2) { 3225 if (hw->mac_type > e1000_82547_rev_2) {
3182 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3226 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3183 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3227 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3184 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3228 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3222,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
3222 3266
3223 /* Phy Stats */ 3267 /* Phy Stats */
3224 3268
3225 if(hw->media_type == e1000_media_type_copper) { 3269 if (hw->media_type == e1000_media_type_copper) {
3226 if((adapter->link_speed == SPEED_1000) && 3270 if ((adapter->link_speed == SPEED_1000) &&
3227 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3271 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3228 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3272 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3229 adapter->phy_stats.idle_errors += phy_tmp; 3273 adapter->phy_stats.idle_errors += phy_tmp;
3230 } 3274 }
3231 3275
3232 if((hw->mac_type <= e1000_82546) && 3276 if ((hw->mac_type <= e1000_82546) &&
3233 (hw->phy_type == e1000_phy_m88) && 3277 (hw->phy_type == e1000_phy_m88) &&
3234 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3278 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3235 adapter->phy_stats.receive_errors += phy_tmp; 3279 adapter->phy_stats.receive_errors += phy_tmp;
@@ -3294,7 +3338,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3294 return IRQ_NONE; /* Not our interrupt */ 3338 return IRQ_NONE; /* Not our interrupt */
3295 } 3339 }
3296 3340
3297 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3341 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3298 hw->get_link_status = 1; 3342 hw->get_link_status = 1;
3299 mod_timer(&adapter->watchdog_timer, jiffies); 3343 mod_timer(&adapter->watchdog_timer, jiffies);
3300 } 3344 }
@@ -3326,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3326 3370
3327#else /* if !CONFIG_E1000_NAPI */ 3371#else /* if !CONFIG_E1000_NAPI */
3328 /* Writing IMC and IMS is needed for 82547. 3372 /* Writing IMC and IMS is needed for 82547.
3329 Due to Hub Link bus being occupied, an interrupt 3373 * Due to Hub Link bus being occupied, an interrupt
3330 de-assertion message is not able to be sent. 3374 * de-assertion message is not able to be sent.
3331 When an interrupt assertion message is generated later, 3375 * When an interrupt assertion message is generated later,
3332 two messages are re-ordered and sent out. 3376 * two messages are re-ordered and sent out.
3333 That causes APIC to think 82547 is in de-assertion 3377 * That causes APIC to think 82547 is in de-assertion
3334 state, while 82547 is in assertion state, resulting 3378 * state, while 82547 is in assertion state, resulting
3335 in dead lock. Writing IMC forces 82547 into 3379 * in dead lock. Writing IMC forces 82547 into
3336 de-assertion state. 3380 * de-assertion state.
3337 */ 3381 */
3338 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 3382 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3339 atomic_inc(&adapter->irq_sem); 3383 atomic_inc(&adapter->irq_sem);
3340 E1000_WRITE_REG(hw, IMC, ~0); 3384 E1000_WRITE_REG(hw, IMC, ~0);
3341 } 3385 }
3342 3386
3343 for(i = 0; i < E1000_MAX_INTR; i++) 3387 for (i = 0; i < E1000_MAX_INTR; i++)
3344 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3388 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3345 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3389 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3346 break; 3390 break;
3347 3391
3348 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3392 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3349 e1000_irq_enable(adapter); 3393 e1000_irq_enable(adapter);
3350 3394
3351#endif /* CONFIG_E1000_NAPI */ 3395#endif /* CONFIG_E1000_NAPI */
@@ -3397,9 +3441,9 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3397 3441
3398 *budget -= work_done; 3442 *budget -= work_done;
3399 poll_dev->quota -= work_done; 3443 poll_dev->quota -= work_done;
3400 3444
3401 /* If no Tx and not enough Rx work done, exit the polling mode */ 3445 /* If no Tx and not enough Rx work done, exit the polling mode */
3402 if((!tx_cleaned && (work_done == 0)) || 3446 if ((!tx_cleaned && (work_done == 0)) ||
3403 !netif_running(adapter->netdev)) { 3447 !netif_running(adapter->netdev)) {
3404quit_polling: 3448quit_polling:
3405 netif_rx_complete(poll_dev); 3449 netif_rx_complete(poll_dev);
@@ -3431,7 +3475,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3431 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3475 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3432 3476
3433 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3477 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3434 for(cleaned = FALSE; !cleaned; ) { 3478 for (cleaned = FALSE; !cleaned; ) {
3435 tx_desc = E1000_TX_DESC(*tx_ring, i); 3479 tx_desc = E1000_TX_DESC(*tx_ring, i);
3436 buffer_info = &tx_ring->buffer_info[i]; 3480 buffer_info = &tx_ring->buffer_info[i];
3437 cleaned = (i == eop); 3481 cleaned = (i == eop);
@@ -3442,7 +3486,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3442 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3486 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3443 memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); 3487 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3444 3488
3445 if(unlikely(++i == tx_ring->count)) i = 0; 3489 if (unlikely(++i == tx_ring->count)) i = 0;
3446 } 3490 }
3447 3491
3448#ifdef CONFIG_E1000_MQ 3492#ifdef CONFIG_E1000_MQ
@@ -3457,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3457 3501
3458 spin_lock(&tx_ring->tx_lock); 3502 spin_lock(&tx_ring->tx_lock);
3459 3503
3460 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3504 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3461 netif_carrier_ok(netdev))) 3505 netif_carrier_ok(netdev)))
3462 netif_wake_queue(netdev); 3506 netif_wake_queue(netdev);
3463 3507
@@ -3519,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3519 skb->ip_summed = CHECKSUM_NONE; 3563 skb->ip_summed = CHECKSUM_NONE;
3520 3564
3521 /* 82543 or newer only */ 3565 /* 82543 or newer only */
3522 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 3566 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3523 /* Ignore Checksum bit is set */ 3567 /* Ignore Checksum bit is set */
3524 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 3568 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3525 /* TCP/UDP checksum error bit is set */ 3569 /* TCP/UDP checksum error bit is set */
3526 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 3570 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3527 /* let the stack verify checksum errors */ 3571 /* let the stack verify checksum errors */
3528 adapter->hw_csum_err++; 3572 adapter->hw_csum_err++;
3529 return; 3573 return;
3530 } 3574 }
3531 /* TCP/UDP Checksum has not been calculated */ 3575 /* TCP/UDP Checksum has not been calculated */
3532 if(adapter->hw.mac_type <= e1000_82547_rev_2) { 3576 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3533 if(!(status & E1000_RXD_STAT_TCPCS)) 3577 if (!(status & E1000_RXD_STAT_TCPCS))
3534 return; 3578 return;
3535 } else { 3579 } else {
3536 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 3580 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3537 return; 3581 return;
3538 } 3582 }
3539 /* It must be a TCP or UDP packet with a valid checksum */ 3583 /* It must be a TCP or UDP packet with a valid checksum */
@@ -3569,9 +3613,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3569{ 3613{
3570 struct net_device *netdev = adapter->netdev; 3614 struct net_device *netdev = adapter->netdev;
3571 struct pci_dev *pdev = adapter->pdev; 3615 struct pci_dev *pdev = adapter->pdev;
3572 struct e1000_rx_desc *rx_desc; 3616 struct e1000_rx_desc *rx_desc, *next_rxd;
3573 struct e1000_buffer *buffer_info; 3617 struct e1000_buffer *buffer_info, *next_buffer;
3574 struct sk_buff *skb;
3575 unsigned long flags; 3618 unsigned long flags;
3576 uint32_t length; 3619 uint32_t length;
3577 uint8_t last_byte; 3620 uint8_t last_byte;
@@ -3581,16 +3624,25 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3581 3624
3582 i = rx_ring->next_to_clean; 3625 i = rx_ring->next_to_clean;
3583 rx_desc = E1000_RX_DESC(*rx_ring, i); 3626 rx_desc = E1000_RX_DESC(*rx_ring, i);
3627 buffer_info = &rx_ring->buffer_info[i];
3584 3628
3585 while(rx_desc->status & E1000_RXD_STAT_DD) { 3629 while (rx_desc->status & E1000_RXD_STAT_DD) {
3586 buffer_info = &rx_ring->buffer_info[i]; 3630 struct sk_buff *skb, *next_skb;
3587 u8 status; 3631 u8 status;
3588#ifdef CONFIG_E1000_NAPI 3632#ifdef CONFIG_E1000_NAPI
3589 if(*work_done >= work_to_do) 3633 if (*work_done >= work_to_do)
3590 break; 3634 break;
3591 (*work_done)++; 3635 (*work_done)++;
3592#endif 3636#endif
3593 status = rx_desc->status; 3637 status = rx_desc->status;
3638 skb = buffer_info->skb;
3639 buffer_info->skb = NULL;
3640
3641 if (++i == rx_ring->count) i = 0;
3642 next_rxd = E1000_RX_DESC(*rx_ring, i);
3643 next_buffer = &rx_ring->buffer_info[i];
3644 next_skb = next_buffer->skb;
3645
3594 cleaned = TRUE; 3646 cleaned = TRUE;
3595 cleaned_count++; 3647 cleaned_count++;
3596 pci_unmap_single(pdev, 3648 pci_unmap_single(pdev,
@@ -3598,20 +3650,50 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3598 buffer_info->length, 3650 buffer_info->length,
3599 PCI_DMA_FROMDEVICE); 3651 PCI_DMA_FROMDEVICE);
3600 3652
3601 skb = buffer_info->skb;
3602 length = le16_to_cpu(rx_desc->length); 3653 length = le16_to_cpu(rx_desc->length);
3603 3654
3604 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 3655 skb_put(skb, length);
3605 /* All receives must fit into a single buffer */ 3656
3606 E1000_DBG("%s: Receive packet consumed multiple" 3657 if (!(status & E1000_RXD_STAT_EOP)) {
3607 " buffers\n", netdev->name); 3658 if (!rx_ring->rx_skb_top) {
3608 dev_kfree_skb_irq(skb); 3659 rx_ring->rx_skb_top = skb;
3660 rx_ring->rx_skb_top->len = length;
3661 rx_ring->rx_skb_prev = skb;
3662 } else {
3663 if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664 rx_ring->rx_skb_prev->next = skb;
3665 skb->prev = rx_ring->rx_skb_prev;
3666 } else {
3667 skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668 }
3669 rx_ring->rx_skb_prev = skb;
3670 rx_ring->rx_skb_top->data_len += length;
3671 }
3609 goto next_desc; 3672 goto next_desc;
3673 } else {
3674 if (rx_ring->rx_skb_top) {
3675 if (skb_shinfo(rx_ring->rx_skb_top)
3676 ->frag_list) {
3677 rx_ring->rx_skb_prev->next = skb;
3678 skb->prev = rx_ring->rx_skb_prev;
3679 } else
3680 skb_shinfo(rx_ring->rx_skb_top)
3681 ->frag_list = skb;
3682
3683 rx_ring->rx_skb_top->data_len += length;
3684 rx_ring->rx_skb_top->len +=
3685 rx_ring->rx_skb_top->data_len;
3686
3687 skb = rx_ring->rx_skb_top;
3688 multi_descriptor = TRUE;
3689 rx_ring->rx_skb_top = NULL;
3690 rx_ring->rx_skb_prev = NULL;
3691 }
3610 } 3692 }
3611 3693
3612 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3694 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3613 last_byte = *(skb->data + length - 1); 3695 last_byte = *(skb->data + length - 1);
3614 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, 3696 if (TBI_ACCEPT(&adapter->hw, status,
3615 rx_desc->errors, length, last_byte)) { 3697 rx_desc->errors, length, last_byte)) {
3616 spin_lock_irqsave(&adapter->stats_lock, flags); 3698 spin_lock_irqsave(&adapter->stats_lock, flags);
3617 e1000_tbi_adjust_stats(&adapter->hw, 3699 e1000_tbi_adjust_stats(&adapter->hw,
@@ -3656,9 +3738,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3656 (uint32_t)(status) | 3738 (uint32_t)(status) |
3657 ((uint32_t)(rx_desc->errors) << 24), 3739 ((uint32_t)(rx_desc->errors) << 24),
3658 rx_desc->csum, skb); 3740 rx_desc->csum, skb);
3741
3659 skb->protocol = eth_type_trans(skb, netdev); 3742 skb->protocol = eth_type_trans(skb, netdev);
3660#ifdef CONFIG_E1000_NAPI 3743#ifdef CONFIG_E1000_NAPI
3661 if(unlikely(adapter->vlgrp && 3744 if (unlikely(adapter->vlgrp &&
3662 (status & E1000_RXD_STAT_VP))) { 3745 (status & E1000_RXD_STAT_VP))) {
3663 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3746 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3664 le16_to_cpu(rx_desc->special) & 3747 le16_to_cpu(rx_desc->special) &
@@ -3667,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3667 netif_receive_skb(skb); 3750 netif_receive_skb(skb);
3668 } 3751 }
3669#else /* CONFIG_E1000_NAPI */ 3752#else /* CONFIG_E1000_NAPI */
3670 if(unlikely(adapter->vlgrp && 3753 if (unlikely(adapter->vlgrp &&
3671 (rx_desc->status & E1000_RXD_STAT_VP))) { 3754 (status & E1000_RXD_STAT_VP))) {
3672 vlan_hwaccel_rx(skb, adapter->vlgrp, 3755 vlan_hwaccel_rx(skb, adapter->vlgrp,
3673 le16_to_cpu(rx_desc->special) & 3756 le16_to_cpu(rx_desc->special) &
3674 E1000_RXD_SPC_VLAN_MASK); 3757 E1000_RXD_SPC_VLAN_MASK);
@@ -3691,6 +3774,8 @@ next_desc:
3691 cleaned_count = 0; 3774 cleaned_count = 0;
3692 } 3775 }
3693 3776
3777 rx_desc = next_rxd;
3778 buffer_info = next_buffer;
3694 } 3779 }
3695 rx_ring->next_to_clean = i; 3780 rx_ring->next_to_clean = i;
3696 3781
@@ -3716,13 +3801,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3716 struct e1000_rx_ring *rx_ring) 3801 struct e1000_rx_ring *rx_ring)
3717#endif 3802#endif
3718{ 3803{
3719 union e1000_rx_desc_packet_split *rx_desc; 3804 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
3720 struct net_device *netdev = adapter->netdev; 3805 struct net_device *netdev = adapter->netdev;
3721 struct pci_dev *pdev = adapter->pdev; 3806 struct pci_dev *pdev = adapter->pdev;
3722 struct e1000_buffer *buffer_info; 3807 struct e1000_buffer *buffer_info, *next_buffer;
3723 struct e1000_ps_page *ps_page; 3808 struct e1000_ps_page *ps_page;
3724 struct e1000_ps_page_dma *ps_page_dma; 3809 struct e1000_ps_page_dma *ps_page_dma;
3725 struct sk_buff *skb; 3810 struct sk_buff *skb, *next_skb;
3726 unsigned int i, j; 3811 unsigned int i, j;
3727 uint32_t length, staterr; 3812 uint32_t length, staterr;
3728 int cleaned_count = 0; 3813 int cleaned_count = 0;
@@ -3731,39 +3816,44 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3731 i = rx_ring->next_to_clean; 3816 i = rx_ring->next_to_clean;
3732 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3817 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3733 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3818 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3819 buffer_info = &rx_ring->buffer_info[i];
3734 3820
3735 while(staterr & E1000_RXD_STAT_DD) { 3821 while (staterr & E1000_RXD_STAT_DD) {
3736 buffer_info = &rx_ring->buffer_info[i];
3737 ps_page = &rx_ring->ps_page[i]; 3822 ps_page = &rx_ring->ps_page[i];
3738 ps_page_dma = &rx_ring->ps_page_dma[i]; 3823 ps_page_dma = &rx_ring->ps_page_dma[i];
3739#ifdef CONFIG_E1000_NAPI 3824#ifdef CONFIG_E1000_NAPI
3740 if(unlikely(*work_done >= work_to_do)) 3825 if (unlikely(*work_done >= work_to_do))
3741 break; 3826 break;
3742 (*work_done)++; 3827 (*work_done)++;
3743#endif 3828#endif
3829 skb = buffer_info->skb;
3830
3831 if (++i == rx_ring->count) i = 0;
3832 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
3833 next_buffer = &rx_ring->buffer_info[i];
3834 next_skb = next_buffer->skb;
3835
3744 cleaned = TRUE; 3836 cleaned = TRUE;
3745 cleaned_count++; 3837 cleaned_count++;
3746 pci_unmap_single(pdev, buffer_info->dma, 3838 pci_unmap_single(pdev, buffer_info->dma,
3747 buffer_info->length, 3839 buffer_info->length,
3748 PCI_DMA_FROMDEVICE); 3840 PCI_DMA_FROMDEVICE);
3749 3841
3750 skb = buffer_info->skb; 3842 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3751
3752 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3753 E1000_DBG("%s: Packet Split buffers didn't pick up" 3843 E1000_DBG("%s: Packet Split buffers didn't pick up"
3754 " the full packet\n", netdev->name); 3844 " the full packet\n", netdev->name);
3755 dev_kfree_skb_irq(skb); 3845 dev_kfree_skb_irq(skb);
3756 goto next_desc; 3846 goto next_desc;
3757 } 3847 }
3758 3848
3759 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3849 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3760 dev_kfree_skb_irq(skb); 3850 dev_kfree_skb_irq(skb);
3761 goto next_desc; 3851 goto next_desc;
3762 } 3852 }
3763 3853
3764 length = le16_to_cpu(rx_desc->wb.middle.length0); 3854 length = le16_to_cpu(rx_desc->wb.middle.length0);
3765 3855
3766 if(unlikely(!length)) { 3856 if (unlikely(!length)) {
3767 E1000_DBG("%s: Last part of the packet spanning" 3857 E1000_DBG("%s: Last part of the packet spanning"
3768 " multiple descriptors\n", netdev->name); 3858 " multiple descriptors\n", netdev->name);
3769 dev_kfree_skb_irq(skb); 3859 dev_kfree_skb_irq(skb);
@@ -3773,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3773 /* Good Receive */ 3863 /* Good Receive */
3774 skb_put(skb, length); 3864 skb_put(skb, length);
3775 3865
3776 for(j = 0; j < adapter->rx_ps_pages; j++) { 3866 for (j = 0; j < adapter->rx_ps_pages; j++) {
3777 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3867 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3778 break; 3868 break;
3779 3869
3780 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3870 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3794,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3794 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3884 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3795 skb->protocol = eth_type_trans(skb, netdev); 3885 skb->protocol = eth_type_trans(skb, netdev);
3796 3886
3797 if(likely(rx_desc->wb.upper.header_status & 3887 if (likely(rx_desc->wb.upper.header_status &
3798 E1000_RXDPS_HDRSTAT_HDRSP)) { 3888 E1000_RXDPS_HDRSTAT_HDRSP))
3799 adapter->rx_hdr_split++; 3889 adapter->rx_hdr_split++;
3800#ifdef HAVE_RX_ZERO_COPY
3801 skb_shinfo(skb)->zero_copy = TRUE;
3802#endif
3803 }
3804#ifdef CONFIG_E1000_NAPI 3890#ifdef CONFIG_E1000_NAPI
3805 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3891 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3806 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3892 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3807 le16_to_cpu(rx_desc->wb.middle.vlan) & 3893 le16_to_cpu(rx_desc->wb.middle.vlan) &
3808 E1000_RXD_SPC_VLAN_MASK); 3894 E1000_RXD_SPC_VLAN_MASK);
@@ -3810,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3810 netif_receive_skb(skb); 3896 netif_receive_skb(skb);
3811 } 3897 }
3812#else /* CONFIG_E1000_NAPI */ 3898#else /* CONFIG_E1000_NAPI */
3813 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3899 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3814 vlan_hwaccel_rx(skb, adapter->vlgrp, 3900 vlan_hwaccel_rx(skb, adapter->vlgrp,
3815 le16_to_cpu(rx_desc->wb.middle.vlan) & 3901 le16_to_cpu(rx_desc->wb.middle.vlan) &
3816 E1000_RXD_SPC_VLAN_MASK); 3902 E1000_RXD_SPC_VLAN_MASK);
@@ -3834,6 +3920,9 @@ next_desc:
3834 cleaned_count = 0; 3920 cleaned_count = 0;
3835 } 3921 }
3836 3922
3923 rx_desc = next_rxd;
3924 buffer_info = next_buffer;
3925
3837 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3926 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3838 } 3927 }
3839 rx_ring->next_to_clean = i; 3928 rx_ring->next_to_clean = i;
@@ -3875,7 +3964,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3875 } 3964 }
3876 3965
3877 3966
3878 if(unlikely(!skb)) { 3967 if (unlikely(!skb)) {
3879 /* Better luck next round */ 3968 /* Better luck next round */
3880 adapter->alloc_rx_buff_failed++; 3969 adapter->alloc_rx_buff_failed++;
3881 break; 3970 break;
@@ -3940,20 +4029,23 @@ map_skb:
3940 rx_desc = E1000_RX_DESC(*rx_ring, i); 4029 rx_desc = E1000_RX_DESC(*rx_ring, i);
3941 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4030 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3942 4031
3943 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4032 if (unlikely(++i == rx_ring->count))
3944 /* Force memory writes to complete before letting h/w 4033 i = 0;
3945 * know there are new descriptors to fetch. (Only
3946 * applicable for weak-ordered memory model archs,
3947 * such as IA-64). */
3948 wmb();
3949 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3950 }
3951
3952 if(unlikely(++i == rx_ring->count)) i = 0;
3953 buffer_info = &rx_ring->buffer_info[i]; 4034 buffer_info = &rx_ring->buffer_info[i];
3954 } 4035 }
3955 4036
3956 rx_ring->next_to_use = i; 4037 if (likely(rx_ring->next_to_use != i)) {
4038 rx_ring->next_to_use = i;
4039 if (unlikely(i-- == 0))
4040 i = (rx_ring->count - 1);
4041
4042 /* Force memory writes to complete before letting h/w
4043 * know there are new descriptors to fetch. (Only
4044 * applicable for weak-ordered memory model archs,
4045 * such as IA-64). */
4046 wmb();
4047 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4048 }
3957} 4049}
3958 4050
3959/** 4051/**
@@ -3983,13 +4075,15 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3983 while (cleaned_count--) { 4075 while (cleaned_count--) {
3984 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4076 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3985 4077
3986 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 4078 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
3987 if (j < adapter->rx_ps_pages) { 4079 if (j < adapter->rx_ps_pages) {
3988 if (likely(!ps_page->ps_page[j])) { 4080 if (likely(!ps_page->ps_page[j])) {
3989 ps_page->ps_page[j] = 4081 ps_page->ps_page[j] =
3990 alloc_page(GFP_ATOMIC); 4082 alloc_page(GFP_ATOMIC);
3991 if (unlikely(!ps_page->ps_page[j])) 4083 if (unlikely(!ps_page->ps_page[j])) {
4084 adapter->alloc_rx_buff_failed++;
3992 goto no_buffers; 4085 goto no_buffers;
4086 }
3993 ps_page_dma->ps_page_dma[j] = 4087 ps_page_dma->ps_page_dma[j] =
3994 pci_map_page(pdev, 4088 pci_map_page(pdev,
3995 ps_page->ps_page[j], 4089 ps_page->ps_page[j],
@@ -3997,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3997 PCI_DMA_FROMDEVICE); 4091 PCI_DMA_FROMDEVICE);
3998 } 4092 }
3999 /* Refresh the desc even if buffer_addrs didn't 4093 /* Refresh the desc even if buffer_addrs didn't
4000 * change because each write-back erases 4094 * change because each write-back erases
4001 * this info. 4095 * this info.
4002 */ 4096 */
4003 rx_desc->read.buffer_addr[j+1] = 4097 rx_desc->read.buffer_addr[j+1] =
@@ -4008,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4008 4102
4009 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4103 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4010 4104
4011 if(unlikely(!skb)) 4105 if (unlikely(!skb)) {
4106 adapter->alloc_rx_buff_failed++;
4012 break; 4107 break;
4108 }
4013 4109
4014 /* Make buffer alignment 2 beyond a 16 byte boundary 4110 /* Make buffer alignment 2 beyond a 16 byte boundary
4015 * this will result in a 16 byte aligned IP header after 4111 * this will result in a 16 byte aligned IP header after
@@ -4027,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4027 4123
4028 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4124 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4029 4125
4030 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 4126 if (unlikely(++i == rx_ring->count)) i = 0;
4031 /* Force memory writes to complete before letting h/w
4032 * know there are new descriptors to fetch. (Only
4033 * applicable for weak-ordered memory model archs,
4034 * such as IA-64). */
4035 wmb();
4036 /* Hardware increments by 16 bytes, but packet split
4037 * descriptors are 32 bytes...so we increment tail
4038 * twice as much.
4039 */
4040 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4041 }
4042
4043 if(unlikely(++i == rx_ring->count)) i = 0;
4044 buffer_info = &rx_ring->buffer_info[i]; 4127 buffer_info = &rx_ring->buffer_info[i];
4045 ps_page = &rx_ring->ps_page[i]; 4128 ps_page = &rx_ring->ps_page[i];
4046 ps_page_dma = &rx_ring->ps_page_dma[i]; 4129 ps_page_dma = &rx_ring->ps_page_dma[i];
4047 } 4130 }
4048 4131
4049no_buffers: 4132no_buffers:
4050 rx_ring->next_to_use = i; 4133 if (likely(rx_ring->next_to_use != i)) {
4134 rx_ring->next_to_use = i;
4135 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4136
4137 /* Force memory writes to complete before letting h/w
4138 * know there are new descriptors to fetch. (Only
4139 * applicable for weak-ordered memory model archs,
4140 * such as IA-64). */
4141 wmb();
4142 /* Hardware increments by 16 bytes, but packet split
4143 * descriptors are 32 bytes...so we increment tail
4144 * twice as much.
4145 */
4146 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4147 }
4051} 4148}
4052 4149
4053/** 4150/**
@@ -4061,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4061 uint16_t phy_status; 4158 uint16_t phy_status;
4062 uint16_t phy_ctrl; 4159 uint16_t phy_ctrl;
4063 4160
4064 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4161 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4065 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4162 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4066 return; 4163 return;
4067 4164
4068 if(adapter->smartspeed == 0) { 4165 if (adapter->smartspeed == 0) {
4069 /* If Master/Slave config fault is asserted twice, 4166 /* If Master/Slave config fault is asserted twice,
4070 * we assume back-to-back */ 4167 * we assume back-to-back */
4071 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4168 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4072 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4169 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4073 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4170 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4074 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4171 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4075 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4172 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4076 if(phy_ctrl & CR_1000T_MS_ENABLE) { 4173 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4077 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4174 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4078 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4175 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4079 phy_ctrl); 4176 phy_ctrl);
4080 adapter->smartspeed++; 4177 adapter->smartspeed++;
4081 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4178 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4082 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4179 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4083 &phy_ctrl)) { 4180 &phy_ctrl)) {
4084 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4181 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -4088,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4088 } 4185 }
4089 } 4186 }
4090 return; 4187 return;
4091 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4188 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4092 /* If still no link, perhaps using 2/3 pair cable */ 4189 /* If still no link, perhaps using 2/3 pair cable */
4093 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4190 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4094 phy_ctrl |= CR_1000T_MS_ENABLE; 4191 phy_ctrl |= CR_1000T_MS_ENABLE;
4095 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4192 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4096 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4193 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4097 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4194 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4098 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4195 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4099 MII_CR_RESTART_AUTO_NEG); 4196 MII_CR_RESTART_AUTO_NEG);
@@ -4101,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4101 } 4198 }
4102 } 4199 }
4103 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4200 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4104 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4201 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4105 adapter->smartspeed = 0; 4202 adapter->smartspeed = 0;
4106} 4203}
4107 4204
@@ -4142,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4142 uint16_t spddplx; 4239 uint16_t spddplx;
4143 unsigned long flags; 4240 unsigned long flags;
4144 4241
4145 if(adapter->hw.media_type != e1000_media_type_copper) 4242 if (adapter->hw.media_type != e1000_media_type_copper)
4146 return -EOPNOTSUPP; 4243 return -EOPNOTSUPP;
4147 4244
4148 switch (cmd) { 4245 switch (cmd) {
@@ -4150,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4150 data->phy_id = adapter->hw.phy_addr; 4247 data->phy_id = adapter->hw.phy_addr;
4151 break; 4248 break;
4152 case SIOCGMIIREG: 4249 case SIOCGMIIREG:
4153 if(!capable(CAP_NET_ADMIN)) 4250 if (!capable(CAP_NET_ADMIN))
4154 return -EPERM; 4251 return -EPERM;
4155 spin_lock_irqsave(&adapter->stats_lock, flags); 4252 spin_lock_irqsave(&adapter->stats_lock, flags);
4156 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4253 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4157 &data->val_out)) { 4254 &data->val_out)) {
4158 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4255 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4159 return -EIO; 4256 return -EIO;
@@ -4161,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4161 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4258 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4162 break; 4259 break;
4163 case SIOCSMIIREG: 4260 case SIOCSMIIREG:
4164 if(!capable(CAP_NET_ADMIN)) 4261 if (!capable(CAP_NET_ADMIN))
4165 return -EPERM; 4262 return -EPERM;
4166 if(data->reg_num & ~(0x1F)) 4263 if (data->reg_num & ~(0x1F))
4167 return -EFAULT; 4264 return -EFAULT;
4168 mii_reg = data->val_in; 4265 mii_reg = data->val_in;
4169 spin_lock_irqsave(&adapter->stats_lock, flags); 4266 spin_lock_irqsave(&adapter->stats_lock, flags);
4170 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, 4267 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4171 mii_reg)) { 4268 mii_reg)) {
4172 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4269 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4173 return -EIO; 4270 return -EIO;
4174 } 4271 }
4175 if(adapter->hw.phy_type == e1000_phy_m88) { 4272 if (adapter->hw.phy_type == e1000_phy_m88) {
4176 switch (data->reg_num) { 4273 switch (data->reg_num) {
4177 case PHY_CTRL: 4274 case PHY_CTRL:
4178 if(mii_reg & MII_CR_POWER_DOWN) 4275 if (mii_reg & MII_CR_POWER_DOWN)
4179 break; 4276 break;
4180 if(mii_reg & MII_CR_AUTO_NEG_EN) { 4277 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4181 adapter->hw.autoneg = 1; 4278 adapter->hw.autoneg = 1;
4182 adapter->hw.autoneg_advertised = 0x2F; 4279 adapter->hw.autoneg_advertised = 0x2F;
4183 } else { 4280 } else {
@@ -4192,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4192 HALF_DUPLEX; 4289 HALF_DUPLEX;
4193 retval = e1000_set_spd_dplx(adapter, 4290 retval = e1000_set_spd_dplx(adapter,
4194 spddplx); 4291 spddplx);
4195 if(retval) { 4292 if (retval) {
4196 spin_unlock_irqrestore( 4293 spin_unlock_irqrestore(
4197 &adapter->stats_lock, 4294 &adapter->stats_lock,
4198 flags); 4295 flags);
4199 return retval; 4296 return retval;
4200 } 4297 }
4201 } 4298 }
4202 if(netif_running(adapter->netdev)) { 4299 if (netif_running(adapter->netdev)) {
4203 e1000_down(adapter); 4300 e1000_down(adapter);
4204 e1000_up(adapter); 4301 e1000_up(adapter);
4205 } else 4302 } else
@@ -4207,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4207 break; 4304 break;
4208 case M88E1000_PHY_SPEC_CTRL: 4305 case M88E1000_PHY_SPEC_CTRL:
4209 case M88E1000_EXT_PHY_SPEC_CTRL: 4306 case M88E1000_EXT_PHY_SPEC_CTRL:
4210 if(e1000_phy_reset(&adapter->hw)) { 4307 if (e1000_phy_reset(&adapter->hw)) {
4211 spin_unlock_irqrestore( 4308 spin_unlock_irqrestore(
4212 &adapter->stats_lock, flags); 4309 &adapter->stats_lock, flags);
4213 return -EIO; 4310 return -EIO;
@@ -4217,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4217 } else { 4314 } else {
4218 switch (data->reg_num) { 4315 switch (data->reg_num) {
4219 case PHY_CTRL: 4316 case PHY_CTRL:
4220 if(mii_reg & MII_CR_POWER_DOWN) 4317 if (mii_reg & MII_CR_POWER_DOWN)
4221 break; 4318 break;
4222 if(netif_running(adapter->netdev)) { 4319 if (netif_running(adapter->netdev)) {
4223 e1000_down(adapter); 4320 e1000_down(adapter);
4224 e1000_up(adapter); 4321 e1000_up(adapter);
4225 } else 4322 } else
@@ -4241,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
4241 struct e1000_adapter *adapter = hw->back; 4338 struct e1000_adapter *adapter = hw->back;
4242 int ret_val = pci_set_mwi(adapter->pdev); 4339 int ret_val = pci_set_mwi(adapter->pdev);
4243 4340
4244 if(ret_val) 4341 if (ret_val)
4245 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4342 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4246} 4343}
4247 4344
@@ -4290,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4290 e1000_irq_disable(adapter); 4387 e1000_irq_disable(adapter);
4291 adapter->vlgrp = grp; 4388 adapter->vlgrp = grp;
4292 4389
4293 if(grp) { 4390 if (grp) {
4294 /* enable VLAN tag insert/strip */ 4391 /* enable VLAN tag insert/strip */
4295 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4392 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4296 ctrl |= E1000_CTRL_VME; 4393 ctrl |= E1000_CTRL_VME;
@@ -4312,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4312 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4409 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4313 rctl &= ~E1000_RCTL_VFE; 4410 rctl &= ~E1000_RCTL_VFE;
4314 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4411 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4315 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4412 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4316 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4413 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4414 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4318 } 4415 }
@@ -4326,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4326{ 4423{
4327 struct e1000_adapter *adapter = netdev_priv(netdev); 4424 struct e1000_adapter *adapter = netdev_priv(netdev);
4328 uint32_t vfta, index; 4425 uint32_t vfta, index;
4329 if((adapter->hw.mng_cookie.status & 4426
4330 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4427 if ((adapter->hw.mng_cookie.status &
4331 (vid == adapter->mng_vlan_id)) 4428 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4429 (vid == adapter->mng_vlan_id))
4332 return; 4430 return;
4333 /* add VID to filter table */ 4431 /* add VID to filter table */
4334 index = (vid >> 5) & 0x7F; 4432 index = (vid >> 5) & 0x7F;
@@ -4345,13 +4443,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4345 4443
4346 e1000_irq_disable(adapter); 4444 e1000_irq_disable(adapter);
4347 4445
4348 if(adapter->vlgrp) 4446 if (adapter->vlgrp)
4349 adapter->vlgrp->vlan_devices[vid] = NULL; 4447 adapter->vlgrp->vlan_devices[vid] = NULL;
4350 4448
4351 e1000_irq_enable(adapter); 4449 e1000_irq_enable(adapter);
4352 4450
4353 if((adapter->hw.mng_cookie.status & 4451 if ((adapter->hw.mng_cookie.status &
4354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4355 (vid == adapter->mng_vlan_id)) { 4453 (vid == adapter->mng_vlan_id)) {
4356 /* release control to f/w */ 4454 /* release control to f/w */
4357 e1000_release_hw_control(adapter); 4455 e1000_release_hw_control(adapter);
@@ -4370,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
4370{ 4468{
4371 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4469 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4372 4470
4373 if(adapter->vlgrp) { 4471 if (adapter->vlgrp) {
4374 uint16_t vid; 4472 uint16_t vid;
4375 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4473 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4376 if(!adapter->vlgrp->vlan_devices[vid]) 4474 if (!adapter->vlgrp->vlan_devices[vid])
4377 continue; 4475 continue;
4378 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4476 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4379 } 4477 }
@@ -4386,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4386 adapter->hw.autoneg = 0; 4484 adapter->hw.autoneg = 0;
4387 4485
4388 /* Fiber NICs only allow 1000 gbps Full duplex */ 4486 /* Fiber NICs only allow 1000 gbps Full duplex */
4389 if((adapter->hw.media_type == e1000_media_type_fiber) && 4487 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4390 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4488 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4391 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4489 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4392 return -EINVAL; 4490 return -EINVAL;
4393 } 4491 }
4394 4492
4395 switch(spddplx) { 4493 switch (spddplx) {
4396 case SPEED_10 + DUPLEX_HALF: 4494 case SPEED_10 + DUPLEX_HALF:
4397 adapter->hw.forced_speed_duplex = e1000_10_half; 4495 adapter->hw.forced_speed_duplex = e1000_10_half;
4398 break; 4496 break;
@@ -4418,6 +4516,54 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4418} 4516}
4419 4517
4420#ifdef CONFIG_PM 4518#ifdef CONFIG_PM
4519/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
4520 * space versus the 64 bytes that pci_[save|restore]_state handle
4521 */
4522#define PCIE_CONFIG_SPACE_LEN 256
4523#define PCI_CONFIG_SPACE_LEN 64
4524static int
4525e1000_pci_save_state(struct e1000_adapter *adapter)
4526{
4527 struct pci_dev *dev = adapter->pdev;
4528 int size;
4529 int i;
4530 if (adapter->hw.mac_type >= e1000_82571)
4531 size = PCIE_CONFIG_SPACE_LEN;
4532 else
4533 size = PCI_CONFIG_SPACE_LEN;
4534
4535 WARN_ON(adapter->config_space != NULL);
4536
4537 adapter->config_space = kmalloc(size, GFP_KERNEL);
4538 if (!adapter->config_space) {
4539 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
4540 return -ENOMEM;
4541 }
4542 for (i = 0; i < (size / 4); i++)
4543 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
4544 return 0;
4545}
4546
4547static void
4548e1000_pci_restore_state(struct e1000_adapter *adapter)
4549{
4550 struct pci_dev *dev = adapter->pdev;
4551 int size;
4552 int i;
4553 if (adapter->config_space == NULL)
4554 return;
4555 if (adapter->hw.mac_type >= e1000_82571)
4556 size = PCIE_CONFIG_SPACE_LEN;
4557 else
4558 size = PCI_CONFIG_SPACE_LEN;
4559 for (i = 0; i < (size / 4); i++)
4560 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
4561 kfree(adapter->config_space);
4562 adapter->config_space = NULL;
4563 return;
4564}
4565#endif /* CONFIG_PM */
4566
4421static int 4567static int
4422e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4568e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4423{ 4569{
@@ -4429,25 +4575,33 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4429 4575
4430 netif_device_detach(netdev); 4576 netif_device_detach(netdev);
4431 4577
4432 if(netif_running(netdev)) 4578 if (netif_running(netdev))
4433 e1000_down(adapter); 4579 e1000_down(adapter);
4434 4580
4581#ifdef CONFIG_PM
4582 /* implement our own version of pci_save_state(pdev) because pci
4583 * express adapters have larger 256 byte config spaces */
4584 retval = e1000_pci_save_state(adapter);
4585 if (retval)
4586 return retval;
4587#endif
4588
4435 status = E1000_READ_REG(&adapter->hw, STATUS); 4589 status = E1000_READ_REG(&adapter->hw, STATUS);
4436 if(status & E1000_STATUS_LU) 4590 if (status & E1000_STATUS_LU)
4437 wufc &= ~E1000_WUFC_LNKC; 4591 wufc &= ~E1000_WUFC_LNKC;
4438 4592
4439 if(wufc) { 4593 if (wufc) {
4440 e1000_setup_rctl(adapter); 4594 e1000_setup_rctl(adapter);
4441 e1000_set_multi(netdev); 4595 e1000_set_multi(netdev);
4442 4596
4443 /* turn on all-multi mode if wake on multicast is enabled */ 4597 /* turn on all-multi mode if wake on multicast is enabled */
4444 if(adapter->wol & E1000_WUFC_MC) { 4598 if (adapter->wol & E1000_WUFC_MC) {
4445 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4599 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4446 rctl |= E1000_RCTL_MPE; 4600 rctl |= E1000_RCTL_MPE;
4447 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4601 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4448 } 4602 }
4449 4603
4450 if(adapter->hw.mac_type >= e1000_82540) { 4604 if (adapter->hw.mac_type >= e1000_82540) {
4451 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4605 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4452 /* advertise wake from D3Cold */ 4606 /* advertise wake from D3Cold */
4453 #define E1000_CTRL_ADVD3WUC 0x00100000 4607 #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4458,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4458 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4612 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4459 } 4613 }
4460 4614
4461 if(adapter->hw.media_type == e1000_media_type_fiber || 4615 if (adapter->hw.media_type == e1000_media_type_fiber ||
4462 adapter->hw.media_type == e1000_media_type_internal_serdes) { 4616 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4463 /* keep the laser running in D3 */ 4617 /* keep the laser running in D3 */
4464 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4618 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4488,12 +4642,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4488 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); 4642 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4489 } 4643 }
4490 4644
4491 pci_save_state(pdev); 4645 if (adapter->hw.mac_type >= e1000_82540 &&
4492
4493 if(adapter->hw.mac_type >= e1000_82540 &&
4494 adapter->hw.media_type == e1000_media_type_copper) { 4646 adapter->hw.media_type == e1000_media_type_copper) {
4495 manc = E1000_READ_REG(&adapter->hw, MANC); 4647 manc = E1000_READ_REG(&adapter->hw, MANC);
4496 if(manc & E1000_MANC_SMBUS_EN) { 4648 if (manc & E1000_MANC_SMBUS_EN) {
4497 manc |= E1000_MANC_ARP_EN; 4649 manc |= E1000_MANC_ARP_EN;
4498 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4650 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4499 retval = pci_enable_wake(pdev, PCI_D3hot, 1); 4651 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -4518,6 +4670,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4518 return 0; 4670 return 0;
4519} 4671}
4520 4672
4673#ifdef CONFIG_PM
4521static int 4674static int
4522e1000_resume(struct pci_dev *pdev) 4675e1000_resume(struct pci_dev *pdev)
4523{ 4676{
@@ -4529,6 +4682,7 @@ e1000_resume(struct pci_dev *pdev)
4529 retval = pci_set_power_state(pdev, PCI_D0); 4682 retval = pci_set_power_state(pdev, PCI_D0);
4530 if (retval) 4683 if (retval)
4531 DPRINTK(PROBE, ERR, "Error in setting power state\n"); 4684 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4685 e1000_pci_restore_state(adapter);
4532 ret_val = pci_enable_device(pdev); 4686 ret_val = pci_enable_device(pdev);
4533 pci_set_master(pdev); 4687 pci_set_master(pdev);
4534 4688
@@ -4542,12 +4696,12 @@ e1000_resume(struct pci_dev *pdev)
4542 e1000_reset(adapter); 4696 e1000_reset(adapter);
4543 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4697 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4544 4698
4545 if(netif_running(netdev)) 4699 if (netif_running(netdev))
4546 e1000_up(adapter); 4700 e1000_up(adapter);
4547 4701
4548 netif_device_attach(netdev); 4702 netif_device_attach(netdev);
4549 4703
4550 if(adapter->hw.mac_type >= e1000_82540 && 4704 if (adapter->hw.mac_type >= e1000_82540 &&
4551 adapter->hw.media_type == e1000_media_type_copper) { 4705 adapter->hw.media_type == e1000_media_type_copper) {
4552 manc = E1000_READ_REG(&adapter->hw, MANC); 4706 manc = E1000_READ_REG(&adapter->hw, MANC);
4553 manc &= ~(E1000_MANC_ARP_EN); 4707 manc &= ~(E1000_MANC_ARP_EN);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de61437..9790db974dc1 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
47 BUG(); \ 47 BUG(); \
48 } else { \ 48 } else { \
49 msleep(x); \ 49 msleep(x); \
50 } } while(0) 50 } } while (0)
51 51
52/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
53 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 0a7918c62557..3768d83cd577 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -227,7 +227,7 @@ static int __devinit
227e1000_validate_option(int *value, struct e1000_option *opt, 227e1000_validate_option(int *value, struct e1000_option *opt,
228 struct e1000_adapter *adapter) 228 struct e1000_adapter *adapter)
229{ 229{
230 if(*value == OPTION_UNSET) { 230 if (*value == OPTION_UNSET) {
231 *value = opt->def; 231 *value = opt->def;
232 return 0; 232 return 0;
233 } 233 }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
244 } 244 }
245 break; 245 break;
246 case range_option: 246 case range_option:
247 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 DPRINTK(PROBE, INFO, 248 DPRINTK(PROBE, INFO,
249 "%s set to %i\n", opt->name, *value); 249 "%s set to %i\n", opt->name, *value);
250 return 0; 250 return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
254 int i; 254 int i;
255 struct e1000_opt_list *ent; 255 struct e1000_opt_list *ent;
256 256
257 for(i = 0; i < opt->arg.l.nr; i++) { 257 for (i = 0; i < opt->arg.l.nr; i++) {
258 ent = &opt->arg.l.p[i]; 258 ent = &opt->arg.l.p[i];
259 if(*value == ent->i) { 259 if (*value == ent->i) {
260 if(ent->str[0] != '\0') 260 if (ent->str[0] != '\0')
261 DPRINTK(PROBE, INFO, "%s\n", ent->str); 261 DPRINTK(PROBE, INFO, "%s\n", ent->str);
262 return 0; 262 return 0;
263 } 263 }
@@ -291,7 +291,7 @@ void __devinit
291e1000_check_options(struct e1000_adapter *adapter) 291e1000_check_options(struct e1000_adapter *adapter)
292{ 292{
293 int bd = adapter->bd_number; 293 int bd = adapter->bd_number;
294 if(bd >= E1000_MAX_NIC) { 294 if (bd >= E1000_MAX_NIC) {
295 DPRINTK(PROBE, NOTICE, 295 DPRINTK(PROBE, NOTICE,
296 "Warning: no configuration for board #%i\n", bd); 296 "Warning: no configuration for board #%i\n", bd);
297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,7 +315,7 @@ e1000_check_options(struct e1000_adapter *adapter)
315 if (num_TxDescriptors > bd) { 315 if (num_TxDescriptors > bd) {
316 tx_ring->count = TxDescriptors[bd]; 316 tx_ring->count = TxDescriptors[bd];
317 e1000_validate_option(&tx_ring->count, &opt, adapter); 317 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 E1000_ROUNDUP(tx_ring->count, 318 E1000_ROUNDUP(tx_ring->count,
319 REQ_TX_DESCRIPTOR_MULTIPLE); 319 REQ_TX_DESCRIPTOR_MULTIPLE);
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
@@ -341,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
341 if (num_RxDescriptors > bd) { 341 if (num_RxDescriptors > bd) {
342 rx_ring->count = RxDescriptors[bd]; 342 rx_ring->count = RxDescriptors[bd];
343 e1000_validate_option(&rx_ring->count, &opt, adapter); 343 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 E1000_ROUNDUP(rx_ring->count, 344 E1000_ROUNDUP(rx_ring->count,
345 REQ_RX_DESCRIPTOR_MULTIPLE); 345 REQ_RX_DESCRIPTOR_MULTIPLE);
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
403 403
404 if (num_TxIntDelay > bd) { 404 if (num_TxIntDelay > bd) {
405 adapter->tx_int_delay = TxIntDelay[bd]; 405 adapter->tx_int_delay = TxIntDelay[bd];
406 e1000_validate_option(&adapter->tx_int_delay, &opt, 406 e1000_validate_option(&adapter->tx_int_delay, &opt,
407 adapter); 407 adapter);
408 } else { 408 } else {
409 adapter->tx_int_delay = opt.def; 409 adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
421 421
422 if (num_TxAbsIntDelay > bd) { 422 if (num_TxAbsIntDelay > bd) {
423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 adapter); 425 adapter);
426 } else { 426 } else {
427 adapter->tx_abs_int_delay = opt.def; 427 adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
439 439
440 if (num_RxIntDelay > bd) { 440 if (num_RxIntDelay > bd) {
441 adapter->rx_int_delay = RxIntDelay[bd]; 441 adapter->rx_int_delay = RxIntDelay[bd];
442 e1000_validate_option(&adapter->rx_int_delay, &opt, 442 e1000_validate_option(&adapter->rx_int_delay, &opt,
443 adapter); 443 adapter);
444 } else { 444 } else {
445 adapter->rx_int_delay = opt.def; 445 adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
457 457
458 if (num_RxAbsIntDelay > bd) { 458 if (num_RxAbsIntDelay > bd) {
459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 adapter); 461 adapter);
462 } else { 462 } else {
463 adapter->rx_abs_int_delay = opt.def; 463 adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
475 475
476 if (num_InterruptThrottleRate > bd) { 476 if (num_InterruptThrottleRate > bd) {
477 adapter->itr = InterruptThrottleRate[bd]; 477 adapter->itr = InterruptThrottleRate[bd];
478 switch(adapter->itr) { 478 switch (adapter->itr) {
479 case 0: 479 case 0:
480 DPRINTK(PROBE, INFO, "%s turned off\n", 480 DPRINTK(PROBE, INFO, "%s turned off\n",
481 opt.name); 481 opt.name);
482 break; 482 break;
483 case 1: 483 case 1:
484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
485 opt.name); 485 opt.name);
486 break; 486 break;
487 default: 487 default:
488 e1000_validate_option(&adapter->itr, &opt, 488 e1000_validate_option(&adapter->itr, &opt,
489 adapter); 489 adapter);
490 break; 490 break;
491 } 491 }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
494 } 494 }
495 } 495 }
496 496
497 switch(adapter->hw.media_type) { 497 switch (adapter->hw.media_type) {
498 case e1000_media_type_fiber: 498 case e1000_media_type_fiber:
499 case e1000_media_type_internal_serdes: 499 case e1000_media_type_internal_serdes:
500 e1000_check_fiber_options(adapter); 500 e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
518e1000_check_fiber_options(struct e1000_adapter *adapter) 518e1000_check_fiber_options(struct e1000_adapter *adapter)
519{ 519{
520 int bd = adapter->bd_number; 520 int bd = adapter->bd_number;
521 if(num_Speed > bd) { 521 if (num_Speed > bd) {
522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
523 "parameter ignored\n"); 523 "parameter ignored\n");
524 } 524 }
525 525
526 if(num_Duplex > bd) { 526 if (num_Duplex > bd) {
527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
528 "parameter ignored\n"); 528 "parameter ignored\n");
529 } 529 }
530 530
531 if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 531 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
533 "not valid for fiber adapters, " 533 "not valid for fiber adapters, "
534 "parameter ignored\n"); 534 "parameter ignored\n");
@@ -598,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
598 } 598 }
599 } 599 }
600 600
601 if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 601 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
602 DPRINTK(PROBE, INFO, 602 DPRINTK(PROBE, INFO,
603 "AutoNeg specified along with Speed or Duplex, " 603 "AutoNeg specified along with Speed or Duplex, "
604 "parameter ignored\n"); 604 "parameter ignored\n");
@@ -659,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
659 switch (speed + dplx) { 659 switch (speed + dplx) {
660 case 0: 660 case 0:
661 adapter->hw.autoneg = adapter->fc_autoneg = 1; 661 adapter->hw.autoneg = adapter->fc_autoneg = 1;
662 if((num_Speed > bd) && (speed != 0 || dplx != 0)) 662 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
663 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
664 "Speed and duplex autonegotiation enabled\n"); 664 "Speed and duplex autonegotiation enabled\n");
665 break; 665 break;