aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/e1000/e1000_ethtool.c307
-rw-r--r--drivers/net/e1000/e1000_hw.c18
-rw-r--r--drivers/net/e1000/e1000_hw.h17
-rw-r--r--drivers/net/e1000/e1000_main.c496
-rw-r--r--drivers/net/e1000/e1000_osdep.h2
-rw-r--r--drivers/net/e1000/e1000_param.c44
6 files changed, 443 insertions, 441 deletions
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d252297e4db0..5cedc81786e3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -121,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
121 struct e1000_adapter *adapter = netdev_priv(netdev); 121 struct e1000_adapter *adapter = netdev_priv(netdev);
122 struct e1000_hw *hw = &adapter->hw; 122 struct e1000_hw *hw = &adapter->hw;
123 123
124 if(hw->media_type == e1000_media_type_copper) { 124 if (hw->media_type == e1000_media_type_copper) {
125 125
126 ecmd->supported = (SUPPORTED_10baseT_Half | 126 ecmd->supported = (SUPPORTED_10baseT_Half |
127 SUPPORTED_10baseT_Full | 127 SUPPORTED_10baseT_Full |
@@ -133,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
133 133
134 ecmd->advertising = ADVERTISED_TP; 134 ecmd->advertising = ADVERTISED_TP;
135 135
136 if(hw->autoneg == 1) { 136 if (hw->autoneg == 1) {
137 ecmd->advertising |= ADVERTISED_Autoneg; 137 ecmd->advertising |= ADVERTISED_Autoneg;
138 138
139 /* the e1000 autoneg seems to match ethtool nicely */ 139 /* the e1000 autoneg seems to match ethtool nicely */
@@ -144,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
144 ecmd->port = PORT_TP; 144 ecmd->port = PORT_TP;
145 ecmd->phy_address = hw->phy_addr; 145 ecmd->phy_address = hw->phy_addr;
146 146
147 if(hw->mac_type == e1000_82543) 147 if (hw->mac_type == e1000_82543)
148 ecmd->transceiver = XCVR_EXTERNAL; 148 ecmd->transceiver = XCVR_EXTERNAL;
149 else 149 else
150 ecmd->transceiver = XCVR_INTERNAL; 150 ecmd->transceiver = XCVR_INTERNAL;
@@ -160,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
160 160
161 ecmd->port = PORT_FIBRE; 161 ecmd->port = PORT_FIBRE;
162 162
163 if(hw->mac_type >= e1000_82545) 163 if (hw->mac_type >= e1000_82545)
164 ecmd->transceiver = XCVR_INTERNAL; 164 ecmd->transceiver = XCVR_INTERNAL;
165 else 165 else
166 ecmd->transceiver = XCVR_EXTERNAL; 166 ecmd->transceiver = XCVR_EXTERNAL;
167 } 167 }
168 168
169 if(netif_carrier_ok(adapter->netdev)) { 169 if (netif_carrier_ok(adapter->netdev)) {
170 170
171 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 171 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
172 &adapter->link_duplex); 172 &adapter->link_duplex);
@@ -175,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL 175 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
176 * and HALF_DUPLEX != DUPLEX_HALF */ 176 * and HALF_DUPLEX != DUPLEX_HALF */
177 177
178 if(adapter->link_duplex == FULL_DUPLEX) 178 if (adapter->link_duplex == FULL_DUPLEX)
179 ecmd->duplex = DUPLEX_FULL; 179 ecmd->duplex = DUPLEX_FULL;
180 else 180 else
181 ecmd->duplex = DUPLEX_HALF; 181 ecmd->duplex = DUPLEX_HALF;
@@ -205,11 +205,11 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
205 205
206 if (ecmd->autoneg == AUTONEG_ENABLE) { 206 if (ecmd->autoneg == AUTONEG_ENABLE) {
207 hw->autoneg = 1; 207 hw->autoneg = 1;
208 if(hw->media_type == e1000_media_type_fiber) 208 if (hw->media_type == e1000_media_type_fiber)
209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
210 ADVERTISED_FIBRE | 210 ADVERTISED_FIBRE |
211 ADVERTISED_Autoneg; 211 ADVERTISED_Autoneg;
212 else 212 else
213 hw->autoneg_advertised = ADVERTISED_10baseT_Half | 213 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
214 ADVERTISED_10baseT_Full | 214 ADVERTISED_10baseT_Full |
215 ADVERTISED_100baseT_Half | 215 ADVERTISED_100baseT_Half |
@@ -219,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
219 ADVERTISED_TP; 219 ADVERTISED_TP;
220 ecmd->advertising = hw->autoneg_advertised; 220 ecmd->advertising = hw->autoneg_advertised;
221 } else 221 } else
222 if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 222 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
223 return -EINVAL; 223 return -EINVAL;
224 224
225 /* reset the link */ 225 /* reset the link */
226 226
227 if(netif_running(adapter->netdev)) { 227 if (netif_running(adapter->netdev)) {
228 e1000_down(adapter); 228 e1000_down(adapter);
229 e1000_reset(adapter); 229 e1000_reset(adapter);
230 e1000_up(adapter); 230 e1000_up(adapter);
@@ -241,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
241 struct e1000_adapter *adapter = netdev_priv(netdev); 241 struct e1000_adapter *adapter = netdev_priv(netdev);
242 struct e1000_hw *hw = &adapter->hw; 242 struct e1000_hw *hw = &adapter->hw;
243 243
244 pause->autoneg = 244 pause->autoneg =
245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 245 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
246 246
247 if(hw->fc == e1000_fc_rx_pause) 247 if (hw->fc == e1000_fc_rx_pause)
248 pause->rx_pause = 1; 248 pause->rx_pause = 1;
249 else if(hw->fc == e1000_fc_tx_pause) 249 else if (hw->fc == e1000_fc_tx_pause)
250 pause->tx_pause = 1; 250 pause->tx_pause = 1;
251 else if(hw->fc == e1000_fc_full) { 251 else if (hw->fc == e1000_fc_full) {
252 pause->rx_pause = 1; 252 pause->rx_pause = 1;
253 pause->tx_pause = 1; 253 pause->tx_pause = 1;
254 } 254 }
@@ -260,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
260{ 260{
261 struct e1000_adapter *adapter = netdev_priv(netdev); 261 struct e1000_adapter *adapter = netdev_priv(netdev);
262 struct e1000_hw *hw = &adapter->hw; 262 struct e1000_hw *hw = &adapter->hw;
263 263
264 adapter->fc_autoneg = pause->autoneg; 264 adapter->fc_autoneg = pause->autoneg;
265 265
266 if(pause->rx_pause && pause->tx_pause) 266 if (pause->rx_pause && pause->tx_pause)
267 hw->fc = e1000_fc_full; 267 hw->fc = e1000_fc_full;
268 else if(pause->rx_pause && !pause->tx_pause) 268 else if (pause->rx_pause && !pause->tx_pause)
269 hw->fc = e1000_fc_rx_pause; 269 hw->fc = e1000_fc_rx_pause;
270 else if(!pause->rx_pause && pause->tx_pause) 270 else if (!pause->rx_pause && pause->tx_pause)
271 hw->fc = e1000_fc_tx_pause; 271 hw->fc = e1000_fc_tx_pause;
272 else if(!pause->rx_pause && !pause->tx_pause) 272 else if (!pause->rx_pause && !pause->tx_pause)
273 hw->fc = e1000_fc_none; 273 hw->fc = e1000_fc_none;
274 274
275 hw->original_fc = hw->fc; 275 hw->original_fc = hw->fc;
276 276
277 if(adapter->fc_autoneg == AUTONEG_ENABLE) { 277 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
278 if(netif_running(adapter->netdev)) { 278 if (netif_running(adapter->netdev)) {
279 e1000_down(adapter); 279 e1000_down(adapter);
280 e1000_up(adapter); 280 e1000_up(adapter);
281 } else 281 } else
282 e1000_reset(adapter); 282 e1000_reset(adapter);
283 } 283 } else
284 else
285 return ((hw->media_type == e1000_media_type_fiber) ? 284 return ((hw->media_type == e1000_media_type_fiber) ?
286 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 285 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
287 286
288 return 0; 287 return 0;
289} 288}
290 289
@@ -301,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
301 struct e1000_adapter *adapter = netdev_priv(netdev); 300 struct e1000_adapter *adapter = netdev_priv(netdev);
302 adapter->rx_csum = data; 301 adapter->rx_csum = data;
303 302
304 if(netif_running(netdev)) { 303 if (netif_running(netdev)) {
305 e1000_down(adapter); 304 e1000_down(adapter);
306 e1000_up(adapter); 305 e1000_up(adapter);
307 } else 306 } else
308 e1000_reset(adapter); 307 e1000_reset(adapter);
309 return 0; 308 return 0;
310} 309}
311 310
312static uint32_t 311static uint32_t
313e1000_get_tx_csum(struct net_device *netdev) 312e1000_get_tx_csum(struct net_device *netdev)
314{ 313{
@@ -320,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
320{ 319{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 320 struct e1000_adapter *adapter = netdev_priv(netdev);
322 321
323 if(adapter->hw.mac_type < e1000_82543) { 322 if (adapter->hw.mac_type < e1000_82543) {
324 if (!data) 323 if (!data)
325 return -EINVAL; 324 return -EINVAL;
326 return 0; 325 return 0;
@@ -339,8 +338,8 @@ static int
339e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, uint32_t data)
340{ 339{
341 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
342 if((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
343 (adapter->hw.mac_type == e1000_82547)) 342 (adapter->hw.mac_type == e1000_82547))
344 return data ? -EINVAL : 0; 343 return data ? -EINVAL : 0;
345 344
346 if (data) 345 if (data)
@@ -348,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
348 else 347 else
349 netdev->features &= ~NETIF_F_TSO; 348 netdev->features &= ~NETIF_F_TSO;
350 return 0; 349 return 0;
351} 350}
352#endif /* NETIF_F_TSO */ 351#endif /* NETIF_F_TSO */
353 352
354static uint32_t 353static uint32_t
@@ -365,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
365 adapter->msg_enable = data; 364 adapter->msg_enable = data;
366} 365}
367 366
368static int 367static int
369e1000_get_regs_len(struct net_device *netdev) 368e1000_get_regs_len(struct net_device *netdev)
370{ 369{
371#define E1000_REGS_LEN 32 370#define E1000_REGS_LEN 32
@@ -401,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
401 regs_buff[11] = E1000_READ_REG(hw, TIDV); 400 regs_buff[11] = E1000_READ_REG(hw, TIDV);
402 401
403 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ 402 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
404 if(hw->phy_type == e1000_phy_igp) { 403 if (hw->phy_type == e1000_phy_igp) {
405 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 404 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
406 IGP01E1000_PHY_AGC_A); 405 IGP01E1000_PHY_AGC_A);
407 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 406 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -455,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
455 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 454 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
456 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 455 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
457 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 456 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
458 if(hw->mac_type >= e1000_82540 && 457 if (hw->mac_type >= e1000_82540 &&
459 hw->media_type == e1000_media_type_copper) { 458 hw->media_type == e1000_media_type_copper) {
460 regs_buff[26] = E1000_READ_REG(hw, MANC); 459 regs_buff[26] = E1000_READ_REG(hw, MANC);
461 } 460 }
@@ -479,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
479 int ret_val = 0; 478 int ret_val = 0;
480 uint16_t i; 479 uint16_t i;
481 480
482 if(eeprom->len == 0) 481 if (eeprom->len == 0)
483 return -EINVAL; 482 return -EINVAL;
484 483
485 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 484 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -489,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
489 488
490 eeprom_buff = kmalloc(sizeof(uint16_t) * 489 eeprom_buff = kmalloc(sizeof(uint16_t) *
491 (last_word - first_word + 1), GFP_KERNEL); 490 (last_word - first_word + 1), GFP_KERNEL);
492 if(!eeprom_buff) 491 if (!eeprom_buff)
493 return -ENOMEM; 492 return -ENOMEM;
494 493
495 if(hw->eeprom.type == e1000_eeprom_spi) 494 if (hw->eeprom.type == e1000_eeprom_spi)
496 ret_val = e1000_read_eeprom(hw, first_word, 495 ret_val = e1000_read_eeprom(hw, first_word,
497 last_word - first_word + 1, 496 last_word - first_word + 1,
498 eeprom_buff); 497 eeprom_buff);
499 else { 498 else {
500 for (i = 0; i < last_word - first_word + 1; i++) 499 for (i = 0; i < last_word - first_word + 1; i++)
501 if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, 500 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
502 &eeprom_buff[i]))) 501 &eeprom_buff[i])))
503 break; 502 break;
504 } 503 }
@@ -525,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
525 int max_len, first_word, last_word, ret_val = 0; 524 int max_len, first_word, last_word, ret_val = 0;
526 uint16_t i; 525 uint16_t i;
527 526
528 if(eeprom->len == 0) 527 if (eeprom->len == 0)
529 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
530 529
531 if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 530 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
532 return -EFAULT; 531 return -EFAULT;
533 532
534 max_len = hw->eeprom.word_size * 2; 533 max_len = hw->eeprom.word_size * 2;
@@ -536,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
536 first_word = eeprom->offset >> 1; 535 first_word = eeprom->offset >> 1;
537 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 536 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
538 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 537 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
539 if(!eeprom_buff) 538 if (!eeprom_buff)
540 return -ENOMEM; 539 return -ENOMEM;
541 540
542 ptr = (void *)eeprom_buff; 541 ptr = (void *)eeprom_buff;
543 542
544 if(eeprom->offset & 1) { 543 if (eeprom->offset & 1) {
545 /* need read/modify/write of first changed EEPROM word */ 544 /* need read/modify/write of first changed EEPROM word */
546 /* only the second byte of the word is being modified */ 545 /* only the second byte of the word is being modified */
547 ret_val = e1000_read_eeprom(hw, first_word, 1, 546 ret_val = e1000_read_eeprom(hw, first_word, 1,
548 &eeprom_buff[0]); 547 &eeprom_buff[0]);
549 ptr++; 548 ptr++;
550 } 549 }
551 if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 550 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
552 /* need read/modify/write of last changed EEPROM word */ 551 /* need read/modify/write of last changed EEPROM word */
553 /* only the first byte of the word is being modified */ 552 /* only the first byte of the word is being modified */
554 ret_val = e1000_read_eeprom(hw, last_word, 1, 553 ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -567,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
567 ret_val = e1000_write_eeprom(hw, first_word, 566 ret_val = e1000_write_eeprom(hw, first_word,
568 last_word - first_word + 1, eeprom_buff); 567 last_word - first_word + 1, eeprom_buff);
569 568
570 /* Update the checksum over the first part of the EEPROM if needed 569 /* Update the checksum over the first part of the EEPROM if needed
571 * and flush shadow RAM for 82573 conrollers */ 570 * and flush shadow RAM for 82573 conrollers */
572 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 571 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
573 (hw->mac_type == e1000_82573))) 572 (hw->mac_type == e1000_82573)))
574 e1000_update_eeprom_checksum(hw); 573 e1000_update_eeprom_checksum(hw);
575 574
@@ -633,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
633 ring->rx_jumbo_pending = 0; 632 ring->rx_jumbo_pending = 0;
634} 633}
635 634
636static int 635static int
637e1000_set_ringparam(struct net_device *netdev, 636e1000_set_ringparam(struct net_device *netdev,
638 struct ethtool_ringparam *ring) 637 struct ethtool_ringparam *ring)
639{ 638{
@@ -670,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
670 txdr = adapter->tx_ring; 669 txdr = adapter->tx_ring;
671 rxdr = adapter->rx_ring; 670 rxdr = adapter->rx_ring;
672 671
673 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 672 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
674 return -EINVAL; 673 return -EINVAL;
675 674
676 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 675 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
677 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 676 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
678 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 677 E1000_MAX_RXD : E1000_MAX_82544_RXD));
679 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 678 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
680 679
681 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 680 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
682 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 681 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
683 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 682 E1000_MAX_TXD : E1000_MAX_82544_TXD));
684 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 683 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
685 684
686 for (i = 0; i < adapter->num_tx_queues; i++) 685 for (i = 0; i < adapter->num_tx_queues; i++)
687 txdr[i].count = txdr->count; 686 txdr[i].count = txdr->count;
688 for (i = 0; i < adapter->num_rx_queues; i++) 687 for (i = 0; i < adapter->num_rx_queues; i++)
689 rxdr[i].count = rxdr->count; 688 rxdr[i].count = rxdr->count;
690 689
691 if(netif_running(adapter->netdev)) { 690 if (netif_running(adapter->netdev)) {
692 /* Try to get new resources before deleting old */ 691 /* Try to get new resources before deleting old */
693 if ((err = e1000_setup_all_rx_resources(adapter))) 692 if ((err = e1000_setup_all_rx_resources(adapter)))
694 goto err_setup_rx; 693 goto err_setup_rx;
@@ -708,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
708 kfree(rx_old); 707 kfree(rx_old);
709 adapter->rx_ring = rx_new; 708 adapter->rx_ring = rx_new;
710 adapter->tx_ring = tx_new; 709 adapter->tx_ring = tx_new;
711 if((err = e1000_up(adapter))) 710 if ((err = e1000_up(adapter)))
712 return err; 711 return err;
713 } 712 }
714 713
@@ -727,10 +726,10 @@ err_setup_rx:
727 uint32_t pat, value; \ 726 uint32_t pat, value; \
728 uint32_t test[] = \ 727 uint32_t test[] = \
729 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 728 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
730 for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ 729 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
731 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ 730 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
732 value = E1000_READ_REG(&adapter->hw, R); \ 731 value = E1000_READ_REG(&adapter->hw, R); \
733 if(value != (test[pat] & W & M)) { \ 732 if (value != (test[pat] & W & M)) { \
734 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ 733 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
735 "0x%08X expected 0x%08X\n", \ 734 "0x%08X expected 0x%08X\n", \
736 E1000_##R, value, (test[pat] & W & M)); \ 735 E1000_##R, value, (test[pat] & W & M)); \
@@ -746,7 +745,7 @@ err_setup_rx:
746 uint32_t value; \ 745 uint32_t value; \
747 E1000_WRITE_REG(&adapter->hw, R, W & M); \ 746 E1000_WRITE_REG(&adapter->hw, R, W & M); \
748 value = E1000_READ_REG(&adapter->hw, R); \ 747 value = E1000_READ_REG(&adapter->hw, R); \
749 if((W & M) != (value & M)) { \ 748 if ((W & M) != (value & M)) { \
750 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 749 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
751 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ 750 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
752 *data = (adapter->hw.mac_type < e1000_82543) ? \ 751 *data = (adapter->hw.mac_type < e1000_82543) ? \
@@ -782,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
782 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); 781 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
783 E1000_WRITE_REG(&adapter->hw, STATUS, toggle); 782 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
784 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; 783 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
785 if(value != after) { 784 if (value != after) {
786 DPRINTK(DRV, ERR, "failed STATUS register test got: " 785 DPRINTK(DRV, ERR, "failed STATUS register test got: "
787 "0x%08X expected: 0x%08X\n", after, value); 786 "0x%08X expected: 0x%08X\n", after, value);
788 *data = 1; 787 *data = 1;
@@ -810,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
810 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); 809 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
811 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 810 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
812 811
813 if(adapter->hw.mac_type >= e1000_82543) { 812 if (adapter->hw.mac_type >= e1000_82543) {
814 813
815 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); 814 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
816 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 815 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -818,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
818 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 817 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
819 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
820 819
821 for(i = 0; i < E1000_RAR_ENTRIES; i++) { 820 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
822 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, 821 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
823 0xFFFFFFFF); 822 0xFFFFFFFF);
824 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 823 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -834,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
834 833
835 } 834 }
836 835
837 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 836 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
838 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); 837 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
839 838
840 *data = 0; 839 *data = 0;
@@ -850,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
850 849
851 *data = 0; 850 *data = 0;
852 /* Read and add up the contents of the EEPROM */ 851 /* Read and add up the contents of the EEPROM */
853 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 852 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
854 if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { 853 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
855 *data = 1; 854 *data = 1;
856 break; 855 break;
857 } 856 }
@@ -859,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
859 } 858 }
860 859
861 /* If Checksum is not Correct return error else test passed */ 860 /* If Checksum is not Correct return error else test passed */
862 if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 861 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
863 *data = 2; 862 *data = 2;
864 863
865 return *data; 864 return *data;
@@ -888,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
888 *data = 0; 887 *data = 0;
889 888
890 /* Hook up test interrupt handler just for this test */ 889 /* Hook up test interrupt handler just for this test */
891 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 890 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
892 shared_int = FALSE; 891 shared_int = FALSE;
893 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 892 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
894 netdev->name, netdev)){ 893 netdev->name, netdev)){
895 *data = 1; 894 *data = 1;
896 return -1; 895 return -1;
@@ -901,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
901 msec_delay(10); 900 msec_delay(10);
902 901
903 /* Test each interrupt */ 902 /* Test each interrupt */
904 for(; i < 10; i++) { 903 for (; i < 10; i++) {
905 904
906 /* Interrupt to test */ 905 /* Interrupt to test */
907 mask = 1 << i; 906 mask = 1 << i;
908 907
909 if(!shared_int) { 908 if (!shared_int) {
910 /* Disable the interrupt to be reported in 909 /* Disable the interrupt to be reported in
911 * the cause register and then force the same 910 * the cause register and then force the same
912 * interrupt and see if one gets posted. If 911 * interrupt and see if one gets posted. If
@@ -917,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
917 E1000_WRITE_REG(&adapter->hw, IMC, mask); 916 E1000_WRITE_REG(&adapter->hw, IMC, mask);
918 E1000_WRITE_REG(&adapter->hw, ICS, mask); 917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
919 msec_delay(10); 918 msec_delay(10);
920 919
921 if(adapter->test_icr & mask) { 920 if (adapter->test_icr & mask) {
922 *data = 3; 921 *data = 3;
923 break; 922 break;
924 } 923 }
@@ -935,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
935 E1000_WRITE_REG(&adapter->hw, ICS, mask); 934 E1000_WRITE_REG(&adapter->hw, ICS, mask);
936 msec_delay(10); 935 msec_delay(10);
937 936
938 if(!(adapter->test_icr & mask)) { 937 if (!(adapter->test_icr & mask)) {
939 *data = 4; 938 *data = 4;
940 break; 939 break;
941 } 940 }
942 941
943 if(!shared_int) { 942 if (!shared_int) {
944 /* Disable the other interrupts to be reported in 943 /* Disable the other interrupts to be reported in
945 * the cause register and then force the other 944 * the cause register and then force the other
946 * interrupts and see if any get posted. If 945 * interrupts and see if any get posted. If
@@ -952,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
952 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 951 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
953 msec_delay(10); 952 msec_delay(10);
954 953
955 if(adapter->test_icr) { 954 if (adapter->test_icr) {
956 *data = 5; 955 *data = 5;
957 break; 956 break;
958 } 957 }
@@ -977,24 +976,24 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
977 struct pci_dev *pdev = adapter->pdev; 976 struct pci_dev *pdev = adapter->pdev;
978 int i; 977 int i;
979 978
980 if(txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
981 for(i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
982 if(txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
983 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
984 txdr->buffer_info[i].length, 983 txdr->buffer_info[i].length,
985 PCI_DMA_TODEVICE); 984 PCI_DMA_TODEVICE);
986 if(txdr->buffer_info[i].skb) 985 if (txdr->buffer_info[i].skb)
987 dev_kfree_skb(txdr->buffer_info[i].skb); 986 dev_kfree_skb(txdr->buffer_info[i].skb);
988 } 987 }
989 } 988 }
990 989
991 if(rxdr->desc && rxdr->buffer_info) { 990 if (rxdr->desc && rxdr->buffer_info) {
992 for(i = 0; i < rxdr->count; i++) { 991 for (i = 0; i < rxdr->count; i++) {
993 if(rxdr->buffer_info[i].dma) 992 if (rxdr->buffer_info[i].dma)
994 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 993 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
995 rxdr->buffer_info[i].length, 994 rxdr->buffer_info[i].length,
996 PCI_DMA_FROMDEVICE); 995 PCI_DMA_FROMDEVICE);
997 if(rxdr->buffer_info[i].skb) 996 if (rxdr->buffer_info[i].skb)
998 dev_kfree_skb(rxdr->buffer_info[i].skb); 997 dev_kfree_skb(rxdr->buffer_info[i].skb);
999 } 998 }
1000 } 999 }
@@ -1027,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1027 1026
1028 /* Setup Tx descriptor ring and Tx buffers */ 1027 /* Setup Tx descriptor ring and Tx buffers */
1029 1028
1030 if(!txdr->count) 1029 if (!txdr->count)
1031 txdr->count = E1000_DEFAULT_TXD; 1030 txdr->count = E1000_DEFAULT_TXD;
1032 1031
1033 size = txdr->count * sizeof(struct e1000_buffer); 1032 size = txdr->count * sizeof(struct e1000_buffer);
1034 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1033 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1035 ret_val = 1; 1034 ret_val = 1;
1036 goto err_nomem; 1035 goto err_nomem;
1037 } 1036 }
@@ -1039,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 1038
1040 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1039 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1041 E1000_ROUNDUP(txdr->size, 4096); 1040 E1000_ROUNDUP(txdr->size, 4096);
1042 if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { 1041 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1043 ret_val = 2; 1042 ret_val = 2;
1044 goto err_nomem; 1043 goto err_nomem;
1045 } 1044 }
@@ -1058,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1058 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1057 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1059 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1058 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1060 1059
1061 for(i = 0; i < txdr->count; i++) { 1060 for (i = 0; i < txdr->count; i++) {
1062 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); 1061 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1063 struct sk_buff *skb; 1062 struct sk_buff *skb;
1064 unsigned int size = 1024; 1063 unsigned int size = 1024;
1065 1064
1066 if(!(skb = alloc_skb(size, GFP_KERNEL))) { 1065 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1067 ret_val = 3; 1066 ret_val = 3;
1068 goto err_nomem; 1067 goto err_nomem;
1069 } 1068 }
@@ -1083,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1083 1082
1084 /* Setup Rx descriptor ring and Rx buffers */ 1083 /* Setup Rx descriptor ring and Rx buffers */
1085 1084
1086 if(!rxdr->count) 1085 if (!rxdr->count)
1087 rxdr->count = E1000_DEFAULT_RXD; 1086 rxdr->count = E1000_DEFAULT_RXD;
1088 1087
1089 size = rxdr->count * sizeof(struct e1000_buffer); 1088 size = rxdr->count * sizeof(struct e1000_buffer);
1090 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 1089 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1091 ret_val = 4; 1090 ret_val = 4;
1092 goto err_nomem; 1091 goto err_nomem;
1093 } 1092 }
1094 memset(rxdr->buffer_info, 0, size); 1093 memset(rxdr->buffer_info, 0, size);
1095 1094
1096 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1097 if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { 1096 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1098 ret_val = 5; 1097 ret_val = 5;
1099 goto err_nomem; 1098 goto err_nomem;
1100 } 1099 }
@@ -1114,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1114 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1113 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1115 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1114 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1116 1115
1117 for(i = 0; i < rxdr->count; i++) { 1116 for (i = 0; i < rxdr->count; i++) {
1118 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1117 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1119 struct sk_buff *skb; 1118 struct sk_buff *skb;
1120 1119
1121 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1120 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1122 GFP_KERNEL))) { 1121 GFP_KERNEL))) {
1123 ret_val = 6; 1122 ret_val = 6;
1124 goto err_nomem; 1123 goto err_nomem;
@@ -1227,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1227 1226
1228 /* Check Phy Configuration */ 1227 /* Check Phy Configuration */
1229 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); 1228 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1230 if(phy_reg != 0x4100) 1229 if (phy_reg != 0x4100)
1231 return 9; 1230 return 9;
1232 1231
1233 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1232 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1234 if(phy_reg != 0x0070) 1233 if (phy_reg != 0x0070)
1235 return 10; 1234 return 10;
1236 1235
1237 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); 1236 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1238 if(phy_reg != 0x001A) 1237 if (phy_reg != 0x001A)
1239 return 11; 1238 return 11;
1240 1239
1241 return 0; 1240 return 0;
@@ -1249,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1249 1248
1250 adapter->hw.autoneg = FALSE; 1249 adapter->hw.autoneg = FALSE;
1251 1250
1252 if(adapter->hw.phy_type == e1000_phy_m88) { 1251 if (adapter->hw.phy_type == e1000_phy_m88) {
1253 /* Auto-MDI/MDIX Off */ 1252 /* Auto-MDI/MDIX Off */
1254 e1000_write_phy_reg(&adapter->hw, 1253 e1000_write_phy_reg(&adapter->hw,
1255 M88E1000_PHY_SPEC_CTRL, 0x0808); 1254 M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1269,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1269 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1268 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1270 E1000_CTRL_FD); /* Force Duplex to FULL */ 1269 E1000_CTRL_FD); /* Force Duplex to FULL */
1271 1270
1272 if(adapter->hw.media_type == e1000_media_type_copper && 1271 if (adapter->hw.media_type == e1000_media_type_copper &&
1273 adapter->hw.phy_type == e1000_phy_m88) { 1272 adapter->hw.phy_type == e1000_phy_m88) {
1274 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1273 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1275 } else { 1274 } else {
1276 /* Set the ILOS bit on the fiber Nic is half 1275 /* Set the ILOS bit on the fiber Nic is half
1277 * duplex link is detected. */ 1276 * duplex link is detected. */
1278 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1277 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1279 if((stat_reg & E1000_STATUS_FD) == 0) 1278 if ((stat_reg & E1000_STATUS_FD) == 0)
1280 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1279 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1281 } 1280 }
1282 1281
@@ -1285,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1285 /* Disable the receiver on the PHY so when a cable is plugged in, the 1284 /* Disable the receiver on the PHY so when a cable is plugged in, the
1286 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1285 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1287 */ 1286 */
1288 if(adapter->hw.phy_type == e1000_phy_m88) 1287 if (adapter->hw.phy_type == e1000_phy_m88)
1289 e1000_phy_disable_receiver(adapter); 1288 e1000_phy_disable_receiver(adapter);
1290 1289
1291 udelay(500); 1290 udelay(500);
@@ -1301,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1301 1300
1302 switch (adapter->hw.mac_type) { 1301 switch (adapter->hw.mac_type) {
1303 case e1000_82543: 1302 case e1000_82543:
1304 if(adapter->hw.media_type == e1000_media_type_copper) { 1303 if (adapter->hw.media_type == e1000_media_type_copper) {
1305 /* Attempt to setup Loopback mode on Non-integrated PHY. 1304 /* Attempt to setup Loopback mode on Non-integrated PHY.
1306 * Some PHY registers get corrupted at random, so 1305 * Some PHY registers get corrupted at random, so
1307 * attempt this 10 times. 1306 * attempt this 10 times.
1308 */ 1307 */
1309 while(e1000_nonintegrated_phy_loopback(adapter) && 1308 while (e1000_nonintegrated_phy_loopback(adapter) &&
1310 count++ < 10); 1309 count++ < 10);
1311 if(count < 11) 1310 if (count < 11)
1312 return 0; 1311 return 0;
1313 } 1312 }
1314 break; 1313 break;
@@ -1430,8 +1429,8 @@ static int
1430e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1429e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1431{ 1430{
1432 frame_size &= ~1; 1431 frame_size &= ~1;
1433 if(*(skb->data + 3) == 0xFF) { 1432 if (*(skb->data + 3) == 0xFF) {
1434 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1433 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1435 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1434 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1436 return 0; 1435 return 0;
1437 } 1436 }
@@ -1450,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1450 1449
1451 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1450 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1452 1451
1453 /* Calculate the loop count based on the largest descriptor ring 1452 /* Calculate the loop count based on the largest descriptor ring
1454 * The idea is to wrap the largest ring a number of times using 64 1453 * The idea is to wrap the largest ring a number of times using 64
1455 * send/receive pairs during each loop 1454 * send/receive pairs during each loop
1456 */ 1455 */
1457 1456
1458 if(rxdr->count <= txdr->count) 1457 if (rxdr->count <= txdr->count)
1459 lc = ((txdr->count / 64) * 2) + 1; 1458 lc = ((txdr->count / 64) * 2) + 1;
1460 else 1459 else
1461 lc = ((rxdr->count / 64) * 2) + 1; 1460 lc = ((rxdr->count / 64) * 2) + 1;
1462 1461
1463 k = l = 0; 1462 k = l = 0;
1464 for(j = 0; j <= lc; j++) { /* loop count loop */ 1463 for (j = 0; j <= lc; j++) { /* loop count loop */
1465 for(i = 0; i < 64; i++) { /* send the packets */ 1464 for (i = 0; i < 64; i++) { /* send the packets */
1466 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1465 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1467 1024); 1466 1024);
1468 pci_dma_sync_single_for_device(pdev, 1467 pci_dma_sync_single_for_device(pdev,
1469 txdr->buffer_info[k].dma, 1468 txdr->buffer_info[k].dma,
1470 txdr->buffer_info[k].length, 1469 txdr->buffer_info[k].length,
1471 PCI_DMA_TODEVICE); 1470 PCI_DMA_TODEVICE);
1472 if(unlikely(++k == txdr->count)) k = 0; 1471 if (unlikely(++k == txdr->count)) k = 0;
1473 } 1472 }
1474 E1000_WRITE_REG(&adapter->hw, TDT, k); 1473 E1000_WRITE_REG(&adapter->hw, TDT, k);
1475 msec_delay(200); 1474 msec_delay(200);
1476 time = jiffies; /* set the start time for the receive */ 1475 time = jiffies; /* set the start time for the receive */
1477 good_cnt = 0; 1476 good_cnt = 0;
1478 do { /* receive the sent packets */ 1477 do { /* receive the sent packets */
1479 pci_dma_sync_single_for_cpu(pdev, 1478 pci_dma_sync_single_for_cpu(pdev,
1480 rxdr->buffer_info[l].dma, 1479 rxdr->buffer_info[l].dma,
1481 rxdr->buffer_info[l].length, 1480 rxdr->buffer_info[l].length,
1482 PCI_DMA_FROMDEVICE); 1481 PCI_DMA_FROMDEVICE);
1483 1482
1484 ret_val = e1000_check_lbtest_frame( 1483 ret_val = e1000_check_lbtest_frame(
1485 rxdr->buffer_info[l].skb, 1484 rxdr->buffer_info[l].skb,
1486 1024); 1485 1024);
1487 if(!ret_val) 1486 if (!ret_val)
1488 good_cnt++; 1487 good_cnt++;
1489 if(unlikely(++l == rxdr->count)) l = 0; 1488 if (unlikely(++l == rxdr->count)) l = 0;
1490 /* time + 20 msecs (200 msecs on 2.4) is more than 1489 /* time + 20 msecs (200 msecs on 2.4) is more than
1491 * enough time to complete the receives, if it's 1490 * enough time to complete the receives, if it's
1492 * exceeded, break and error off 1491 * exceeded, break and error off
1493 */ 1492 */
1494 } while (good_cnt < 64 && jiffies < (time + 20)); 1493 } while (good_cnt < 64 && jiffies < (time + 20));
1495 if(good_cnt != 64) { 1494 if (good_cnt != 64) {
1496 ret_val = 13; /* ret_val is the same as mis-compare */ 1495 ret_val = 13; /* ret_val is the same as mis-compare */
1497 break; 1496 break;
1498 } 1497 }
1499 if(jiffies >= (time + 2)) { 1498 if (jiffies >= (time + 2)) {
1500 ret_val = 14; /* error code for time out error */ 1499 ret_val = 14; /* error code for time out error */
1501 break; 1500 break;
1502 } 1501 }
@@ -1549,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1549 *data = 1; 1548 *data = 1;
1550 } else { 1549 } else {
1551 e1000_check_for_link(&adapter->hw); 1550 e1000_check_for_link(&adapter->hw);
1552 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1551 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1553 msec_delay(4000); 1552 msec_delay(4000);
1554 1553
1555 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1554 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1556 *data = 1; 1555 *data = 1;
1557 } 1556 }
1558 } 1557 }
1559 return *data; 1558 return *data;
1560} 1559}
1561 1560
1562static int 1561static int
1563e1000_diag_test_count(struct net_device *netdev) 1562e1000_diag_test_count(struct net_device *netdev)
1564{ 1563{
1565 return E1000_TEST_LEN; 1564 return E1000_TEST_LEN;
@@ -1572,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
1572 struct e1000_adapter *adapter = netdev_priv(netdev); 1571 struct e1000_adapter *adapter = netdev_priv(netdev);
1573 boolean_t if_running = netif_running(netdev); 1572 boolean_t if_running = netif_running(netdev);
1574 1573
1575 if(eth_test->flags == ETH_TEST_FL_OFFLINE) { 1574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1576 /* Offline tests */ 1575 /* Offline tests */
1577 1576
1578 /* save speed, duplex, autoneg settings */ 1577 /* save speed, duplex, autoneg settings */
@@ -1582,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
1582 1581
1583 /* Link test performed before hardware reset so autoneg doesn't 1582 /* Link test performed before hardware reset so autoneg doesn't
1584 * interfere with test result */ 1583 * interfere with test result */
1585 if(e1000_link_test(adapter, &data[4])) 1584 if (e1000_link_test(adapter, &data[4]))
1586 eth_test->flags |= ETH_TEST_FL_FAILED; 1585 eth_test->flags |= ETH_TEST_FL_FAILED;
1587 1586
1588 if(if_running) 1587 if (if_running)
1589 e1000_down(adapter); 1588 e1000_down(adapter);
1590 else 1589 else
1591 e1000_reset(adapter); 1590 e1000_reset(adapter);
1592 1591
1593 if(e1000_reg_test(adapter, &data[0])) 1592 if (e1000_reg_test(adapter, &data[0]))
1594 eth_test->flags |= ETH_TEST_FL_FAILED; 1593 eth_test->flags |= ETH_TEST_FL_FAILED;
1595 1594
1596 e1000_reset(adapter); 1595 e1000_reset(adapter);
1597 if(e1000_eeprom_test(adapter, &data[1])) 1596 if (e1000_eeprom_test(adapter, &data[1]))
1598 eth_test->flags |= ETH_TEST_FL_FAILED; 1597 eth_test->flags |= ETH_TEST_FL_FAILED;
1599 1598
1600 e1000_reset(adapter); 1599 e1000_reset(adapter);
1601 if(e1000_intr_test(adapter, &data[2])) 1600 if (e1000_intr_test(adapter, &data[2]))
1602 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1603 1602
1604 e1000_reset(adapter); 1603 e1000_reset(adapter);
1605 if(e1000_loopback_test(adapter, &data[3])) 1604 if (e1000_loopback_test(adapter, &data[3]))
1606 eth_test->flags |= ETH_TEST_FL_FAILED; 1605 eth_test->flags |= ETH_TEST_FL_FAILED;
1607 1606
1608 /* restore speed, duplex, autoneg settings */ 1607 /* restore speed, duplex, autoneg settings */
@@ -1611,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
1611 adapter->hw.autoneg = autoneg; 1610 adapter->hw.autoneg = autoneg;
1612 1611
1613 e1000_reset(adapter); 1612 e1000_reset(adapter);
1614 if(if_running) 1613 if (if_running)
1615 e1000_up(adapter); 1614 e1000_up(adapter);
1616 } else { 1615 } else {
1617 /* Online tests */ 1616 /* Online tests */
1618 if(e1000_link_test(adapter, &data[4])) 1617 if (e1000_link_test(adapter, &data[4]))
1619 eth_test->flags |= ETH_TEST_FL_FAILED; 1618 eth_test->flags |= ETH_TEST_FL_FAILED;
1620 1619
1621 /* Offline tests aren't run; pass by default */ 1620 /* Offline tests aren't run; pass by default */
@@ -1633,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1633 struct e1000_adapter *adapter = netdev_priv(netdev); 1632 struct e1000_adapter *adapter = netdev_priv(netdev);
1634 struct e1000_hw *hw = &adapter->hw; 1633 struct e1000_hw *hw = &adapter->hw;
1635 1634
1636 switch(adapter->hw.device_id) { 1635 switch (adapter->hw.device_id) {
1637 case E1000_DEV_ID_82542: 1636 case E1000_DEV_ID_82542:
1638 case E1000_DEV_ID_82543GC_FIBER: 1637 case E1000_DEV_ID_82543GC_FIBER:
1639 case E1000_DEV_ID_82543GC_COPPER: 1638 case E1000_DEV_ID_82543GC_COPPER:
@@ -1649,7 +1648,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1649 case E1000_DEV_ID_82546GB_FIBER: 1648 case E1000_DEV_ID_82546GB_FIBER:
1650 case E1000_DEV_ID_82571EB_FIBER: 1649 case E1000_DEV_ID_82571EB_FIBER:
1651 /* Wake events only supported on port A for dual fiber */ 1650 /* Wake events only supported on port A for dual fiber */
1652 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1651 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1653 wol->supported = 0; 1652 wol->supported = 0;
1654 wol->wolopts = 0; 1653 wol->wolopts = 0;
1655 return; 1654 return;
@@ -1661,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1661 WAKE_BCAST | WAKE_MAGIC; 1660 WAKE_BCAST | WAKE_MAGIC;
1662 1661
1663 wol->wolopts = 0; 1662 wol->wolopts = 0;
1664 if(adapter->wol & E1000_WUFC_EX) 1663 if (adapter->wol & E1000_WUFC_EX)
1665 wol->wolopts |= WAKE_UCAST; 1664 wol->wolopts |= WAKE_UCAST;
1666 if(adapter->wol & E1000_WUFC_MC) 1665 if (adapter->wol & E1000_WUFC_MC)
1667 wol->wolopts |= WAKE_MCAST; 1666 wol->wolopts |= WAKE_MCAST;
1668 if(adapter->wol & E1000_WUFC_BC) 1667 if (adapter->wol & E1000_WUFC_BC)
1669 wol->wolopts |= WAKE_BCAST; 1668 wol->wolopts |= WAKE_BCAST;
1670 if(adapter->wol & E1000_WUFC_MAG) 1669 if (adapter->wol & E1000_WUFC_MAG)
1671 wol->wolopts |= WAKE_MAGIC; 1670 wol->wolopts |= WAKE_MAGIC;
1672 return; 1671 return;
1673 } 1672 }
@@ -1679,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1679 struct e1000_adapter *adapter = netdev_priv(netdev); 1678 struct e1000_adapter *adapter = netdev_priv(netdev);
1680 struct e1000_hw *hw = &adapter->hw; 1679 struct e1000_hw *hw = &adapter->hw;
1681 1680
1682 switch(adapter->hw.device_id) { 1681 switch (adapter->hw.device_id) {
1683 case E1000_DEV_ID_82542: 1682 case E1000_DEV_ID_82542:
1684 case E1000_DEV_ID_82543GC_FIBER: 1683 case E1000_DEV_ID_82543GC_FIBER:
1685 case E1000_DEV_ID_82543GC_COPPER: 1684 case E1000_DEV_ID_82543GC_COPPER:
@@ -1693,23 +1692,23 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1693 case E1000_DEV_ID_82546GB_FIBER: 1692 case E1000_DEV_ID_82546GB_FIBER:
1694 case E1000_DEV_ID_82571EB_FIBER: 1693 case E1000_DEV_ID_82571EB_FIBER:
1695 /* Wake events only supported on port A for dual fiber */ 1694 /* Wake events only supported on port A for dual fiber */
1696 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1695 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1697 return wol->wolopts ? -EOPNOTSUPP : 0; 1696 return wol->wolopts ? -EOPNOTSUPP : 0;
1698 /* Fall Through */ 1697 /* Fall Through */
1699 1698
1700 default: 1699 default:
1701 if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1700 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1702 return -EOPNOTSUPP; 1701 return -EOPNOTSUPP;
1703 1702
1704 adapter->wol = 0; 1703 adapter->wol = 0;
1705 1704
1706 if(wol->wolopts & WAKE_UCAST) 1705 if (wol->wolopts & WAKE_UCAST)
1707 adapter->wol |= E1000_WUFC_EX; 1706 adapter->wol |= E1000_WUFC_EX;
1708 if(wol->wolopts & WAKE_MCAST) 1707 if (wol->wolopts & WAKE_MCAST)
1709 adapter->wol |= E1000_WUFC_MC; 1708 adapter->wol |= E1000_WUFC_MC;
1710 if(wol->wolopts & WAKE_BCAST) 1709 if (wol->wolopts & WAKE_BCAST)
1711 adapter->wol |= E1000_WUFC_BC; 1710 adapter->wol |= E1000_WUFC_BC;
1712 if(wol->wolopts & WAKE_MAGIC) 1711 if (wol->wolopts & WAKE_MAGIC)
1713 adapter->wol |= E1000_WUFC_MAG; 1712 adapter->wol |= E1000_WUFC_MAG;
1714 } 1713 }
1715 1714
@@ -1727,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
1727{ 1726{
1728 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1727 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1729 1728
1730 if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1729 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1731 e1000_led_off(&adapter->hw); 1730 e1000_led_off(&adapter->hw);
1732 else 1731 else
1733 e1000_led_on(&adapter->hw); 1732 e1000_led_on(&adapter->hw);
@@ -1740,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1740{ 1739{
1741 struct e1000_adapter *adapter = netdev_priv(netdev); 1740 struct e1000_adapter *adapter = netdev_priv(netdev);
1742 1741
1743 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1742 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1744 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1743 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1745 1744
1746 if(adapter->hw.mac_type < e1000_82571) { 1745 if (adapter->hw.mac_type < e1000_82571) {
1747 if(!adapter->blink_timer.function) { 1746 if (!adapter->blink_timer.function) {
1748 init_timer(&adapter->blink_timer); 1747 init_timer(&adapter->blink_timer);
1749 adapter->blink_timer.function = e1000_led_blink_callback; 1748 adapter->blink_timer.function = e1000_led_blink_callback;
1750 adapter->blink_timer.data = (unsigned long) adapter; 1749 adapter->blink_timer.data = (unsigned long) adapter;
@@ -1782,21 +1781,21 @@ static int
1782e1000_nway_reset(struct net_device *netdev) 1781e1000_nway_reset(struct net_device *netdev)
1783{ 1782{
1784 struct e1000_adapter *adapter = netdev_priv(netdev); 1783 struct e1000_adapter *adapter = netdev_priv(netdev);
1785 if(netif_running(netdev)) { 1784 if (netif_running(netdev)) {
1786 e1000_down(adapter); 1785 e1000_down(adapter);
1787 e1000_up(adapter); 1786 e1000_up(adapter);
1788 } 1787 }
1789 return 0; 1788 return 0;
1790} 1789}
1791 1790
1792static int 1791static int
1793e1000_get_stats_count(struct net_device *netdev) 1792e1000_get_stats_count(struct net_device *netdev)
1794{ 1793{
1795 return E1000_STATS_LEN; 1794 return E1000_STATS_LEN;
1796} 1795}
1797 1796
1798static void 1797static void
1799e1000_get_ethtool_stats(struct net_device *netdev, 1798e1000_get_ethtool_stats(struct net_device *netdev,
1800 struct ethtool_stats *stats, uint64_t *data) 1799 struct ethtool_stats *stats, uint64_t *data)
1801{ 1800{
1802 struct e1000_adapter *adapter = netdev_priv(netdev); 1801 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1830,7 +1829,7 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1830/* BUG_ON(i != E1000_STATS_LEN); */ 1829/* BUG_ON(i != E1000_STATS_LEN); */
1831} 1830}
1832 1831
1833static void 1832static void
1834e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1833e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1835{ 1834{
1836#ifdef CONFIG_E1000_MQ 1835#ifdef CONFIG_E1000_MQ
@@ -1839,9 +1838,9 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1839 uint8_t *p = data; 1838 uint8_t *p = data;
1840 int i; 1839 int i;
1841 1840
1842 switch(stringset) { 1841 switch (stringset) {
1843 case ETH_SS_TEST: 1842 case ETH_SS_TEST:
1844 memcpy(data, *e1000_gstrings_test, 1843 memcpy(data, *e1000_gstrings_test,
1845 E1000_TEST_LEN*ETH_GSTRING_LEN); 1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1846 break; 1845 break;
1847 case ETH_SS_STATS: 1846 case ETH_SS_STATS:
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 2437d362ff63..beeec0fbbeac 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1600,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1600 if(ret_val) 1600 if(ret_val)
1601 return ret_val; 1601 return ret_val;
1602 1602
1603 /* Read the MII 1000Base-T Control Register (Address 9). */ 1603 /* Read the MII 1000Base-T Control Register (Address 9). */
1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1604 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1605 if(ret_val) 1605 if(ret_val)
1606 return ret_val; 1606 return ret_val;
1607 1607
1608 /* Need to parse both autoneg_advertised and fc and set up 1608 /* Need to parse both autoneg_advertised and fc and set up
1609 * the appropriate PHY registers. First we will parse for 1609 * the appropriate PHY registers. First we will parse for
@@ -3916,7 +3916,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
3916 } 3916 }
3917 } 3917 }
3918 3918
3919 if(eeprom->use_eerd == TRUE) { 3919 if (eeprom->use_eerd == TRUE) {
3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3920 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3921 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3922 (hw->mac_type != e1000_82573)) 3922 (hw->mac_type != e1000_82573))
@@ -4423,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4423 return -E1000_ERR_EEPROM; 4423 return -E1000_ERR_EEPROM;
4424 } 4424 }
4425 4425
4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */ 4426 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) { 4427 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 4428 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4429 } 4429 }
@@ -4431,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
4431 /* Perform the flash update */ 4431 /* Perform the flash update */
4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 4432 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4433 4433
4434 for (i=0; i < attempts; i++) { 4434 for (i=0; i < attempts; i++) {
4435 eecd = E1000_READ_REG(hw, EECD); 4435 eecd = E1000_READ_REG(hw, EECD);
4436 if ((eecd & E1000_EECD_FLUPD) == 0) { 4436 if ((eecd & E1000_EECD_FLUPD) == 0) {
4437 break; 4437 break;
@@ -4504,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4504 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4505 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4506 } 4506 }
4507
4507 switch (hw->mac_type) { 4508 switch (hw->mac_type) {
4508 default: 4509 default:
4509 break; 4510 break;
@@ -6840,7 +6841,8 @@ int32_t
6840e1000_check_phy_reset_block(struct e1000_hw *hw) 6841e1000_check_phy_reset_block(struct e1000_hw *hw)
6841{ 6842{
6842 uint32_t manc = 0; 6843 uint32_t manc = 0;
6843 if(hw->mac_type > e1000_82547_rev_2) 6844
6845 if (hw->mac_type > e1000_82547_rev_2)
6844 manc = E1000_READ_REG(hw, MANC); 6846 manc = E1000_READ_REG(hw, MANC);
6845 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 6847 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6846 E1000_BLK_PHY_RESET : E1000_SUCCESS; 6848 E1000_BLK_PHY_RESET : E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 09a7c80dd508..f1219dd9dbac 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -902,14 +902,14 @@ struct e1000_ffvt_entry {
902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 902#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 903#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 904#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 905#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 906#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 907#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 908#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 909#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 910#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 911#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 912#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 913#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 914#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 915#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -1764,7 +1764,6 @@ struct e1000_hw {
1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1764#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1765#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1766 still to be processed. */ 1766 still to be processed. */
1767
1768/* Transmit Configuration Word */ 1767/* Transmit Configuration Word */
1769#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1768#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
1770#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ 1769#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 501f5108254e..44149f902868 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -295,7 +295,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
295static inline void 295static inline void
296e1000_irq_enable(struct e1000_adapter *adapter) 296e1000_irq_enable(struct e1000_adapter *adapter)
297{ 297{
298 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { 298 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
299 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 299 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
300 E1000_WRITE_FLUSH(&adapter->hw); 300 E1000_WRITE_FLUSH(&adapter->hw);
301 } 301 }
@@ -307,17 +307,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
307 struct net_device *netdev = adapter->netdev; 307 struct net_device *netdev = adapter->netdev;
308 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 308 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
309 uint16_t old_vid = adapter->mng_vlan_id; 309 uint16_t old_vid = adapter->mng_vlan_id;
310 if(adapter->vlgrp) { 310 if (adapter->vlgrp) {
311 if(!adapter->vlgrp->vlan_devices[vid]) { 311 if (!adapter->vlgrp->vlan_devices[vid]) {
312 if(adapter->hw.mng_cookie.status & 312 if (adapter->hw.mng_cookie.status &
313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314 e1000_vlan_rx_add_vid(netdev, vid); 314 e1000_vlan_rx_add_vid(netdev, vid);
315 adapter->mng_vlan_id = vid; 315 adapter->mng_vlan_id = vid;
316 } else 316 } else
317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318 318
319 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 319 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
320 (vid != old_vid) && 320 (vid != old_vid) &&
321 !adapter->vlgrp->vlan_devices[old_vid]) 321 !adapter->vlgrp->vlan_devices[old_vid])
322 e1000_vlan_rx_kill_vid(netdev, old_vid); 322 e1000_vlan_rx_kill_vid(netdev, old_vid);
323 } 323 }
@@ -401,10 +401,10 @@ e1000_up(struct e1000_adapter *adapter)
401 /* hardware has been reset, we need to reload some things */ 401 /* hardware has been reset, we need to reload some things */
402 402
403 /* Reset the PHY if it was previously powered down */ 403 /* Reset the PHY if it was previously powered down */
404 if(adapter->hw.media_type == e1000_media_type_copper) { 404 if (adapter->hw.media_type == e1000_media_type_copper) {
405 uint16_t mii_reg; 405 uint16_t mii_reg;
406 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 406 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
407 if(mii_reg & MII_CR_POWER_DOWN) 407 if (mii_reg & MII_CR_POWER_DOWN)
408 e1000_phy_reset(&adapter->hw); 408 e1000_phy_reset(&adapter->hw);
409 } 409 }
410 410
@@ -425,16 +425,16 @@ e1000_up(struct e1000_adapter *adapter)
425 } 425 }
426 426
427#ifdef CONFIG_PCI_MSI 427#ifdef CONFIG_PCI_MSI
428 if(adapter->hw.mac_type > e1000_82547_rev_2) { 428 if (adapter->hw.mac_type > e1000_82547_rev_2) {
429 adapter->have_msi = TRUE; 429 adapter->have_msi = TRUE;
430 if((err = pci_enable_msi(adapter->pdev))) { 430 if ((err = pci_enable_msi(adapter->pdev))) {
431 DPRINTK(PROBE, ERR, 431 DPRINTK(PROBE, ERR,
432 "Unable to allocate MSI interrupt Error: %d\n", err); 432 "Unable to allocate MSI interrupt Error: %d\n", err);
433 adapter->have_msi = FALSE; 433 adapter->have_msi = FALSE;
434 } 434 }
435 } 435 }
436#endif 436#endif
437 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 437 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
438 SA_SHIRQ | SA_SAMPLE_RANDOM, 438 SA_SHIRQ | SA_SAMPLE_RANDOM,
439 netdev->name, netdev))) { 439 netdev->name, netdev))) {
440 DPRINTK(PROBE, ERR, 440 DPRINTK(PROBE, ERR,
@@ -471,7 +471,7 @@ e1000_down(struct e1000_adapter *adapter)
471#endif 471#endif
472 free_irq(adapter->pdev->irq, netdev); 472 free_irq(adapter->pdev->irq, netdev);
473#ifdef CONFIG_PCI_MSI 473#ifdef CONFIG_PCI_MSI
474 if(adapter->hw.mac_type > e1000_82547_rev_2 && 474 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
475 adapter->have_msi == TRUE) 475 adapter->have_msi == TRUE)
476 pci_disable_msi(adapter->pdev); 476 pci_disable_msi(adapter->pdev);
477#endif 477#endif
@@ -537,12 +537,12 @@ e1000_reset(struct e1000_adapter *adapter)
537 break; 537 break;
538 } 538 }
539 539
540 if((adapter->hw.mac_type != e1000_82573) && 540 if ((adapter->hw.mac_type != e1000_82573) &&
541 (adapter->netdev->mtu > E1000_RXBUFFER_8192)) 541 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
542 pba -= 8; /* allocate more FIFO for Tx */ 542 pba -= 8; /* allocate more FIFO for Tx */
543 543
544 544
545 if(adapter->hw.mac_type == e1000_82547) { 545 if (adapter->hw.mac_type == e1000_82547) {
546 adapter->tx_fifo_head = 0; 546 adapter->tx_fifo_head = 0;
547 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 547 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
548 adapter->tx_fifo_size = 548 adapter->tx_fifo_size =
@@ -565,9 +565,9 @@ e1000_reset(struct e1000_adapter *adapter)
565 565
566 /* Allow time for pending master requests to run */ 566 /* Allow time for pending master requests to run */
567 e1000_reset_hw(&adapter->hw); 567 e1000_reset_hw(&adapter->hw);
568 if(adapter->hw.mac_type >= e1000_82544) 568 if (adapter->hw.mac_type >= e1000_82544)
569 E1000_WRITE_REG(&adapter->hw, WUC, 0); 569 E1000_WRITE_REG(&adapter->hw, WUC, 0);
570 if(e1000_init_hw(&adapter->hw)) 570 if (e1000_init_hw(&adapter->hw))
571 DPRINTK(PROBE, ERR, "Hardware Error\n"); 571 DPRINTK(PROBE, ERR, "Hardware Error\n");
572 e1000_update_mng_vlan(adapter); 572 e1000_update_mng_vlan(adapter);
573 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 573 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -606,26 +606,26 @@ e1000_probe(struct pci_dev *pdev,
606 int i, err, pci_using_dac; 606 int i, err, pci_using_dac;
607 uint16_t eeprom_data; 607 uint16_t eeprom_data;
608 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 608 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
609 if((err = pci_enable_device(pdev))) 609 if ((err = pci_enable_device(pdev)))
610 return err; 610 return err;
611 611
612 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 612 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
613 pci_using_dac = 1; 613 pci_using_dac = 1;
614 } else { 614 } else {
615 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 615 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
616 E1000_ERR("No usable DMA configuration, aborting\n"); 616 E1000_ERR("No usable DMA configuration, aborting\n");
617 return err; 617 return err;
618 } 618 }
619 pci_using_dac = 0; 619 pci_using_dac = 0;
620 } 620 }
621 621
622 if((err = pci_request_regions(pdev, e1000_driver_name))) 622 if ((err = pci_request_regions(pdev, e1000_driver_name)))
623 return err; 623 return err;
624 624
625 pci_set_master(pdev); 625 pci_set_master(pdev);
626 626
627 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 627 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
628 if(!netdev) { 628 if (!netdev) {
629 err = -ENOMEM; 629 err = -ENOMEM;
630 goto err_alloc_etherdev; 630 goto err_alloc_etherdev;
631 } 631 }
@@ -644,15 +644,15 @@ e1000_probe(struct pci_dev *pdev,
644 mmio_len = pci_resource_len(pdev, BAR_0); 644 mmio_len = pci_resource_len(pdev, BAR_0);
645 645
646 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 646 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
647 if(!adapter->hw.hw_addr) { 647 if (!adapter->hw.hw_addr) {
648 err = -EIO; 648 err = -EIO;
649 goto err_ioremap; 649 goto err_ioremap;
650 } 650 }
651 651
652 for(i = BAR_1; i <= BAR_5; i++) { 652 for (i = BAR_1; i <= BAR_5; i++) {
653 if(pci_resource_len(pdev, i) == 0) 653 if (pci_resource_len(pdev, i) == 0)
654 continue; 654 continue;
655 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { 655 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
656 adapter->hw.io_base = pci_resource_start(pdev, i); 656 adapter->hw.io_base = pci_resource_start(pdev, i);
657 break; 657 break;
658 } 658 }
@@ -689,13 +689,13 @@ e1000_probe(struct pci_dev *pdev,
689 689
690 /* setup the private structure */ 690 /* setup the private structure */
691 691
692 if((err = e1000_sw_init(adapter))) 692 if ((err = e1000_sw_init(adapter)))
693 goto err_sw_init; 693 goto err_sw_init;
694 694
695 if((err = e1000_check_phy_reset_block(&adapter->hw))) 695 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
696 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 696 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
697 697
698 if(adapter->hw.mac_type >= e1000_82543) { 698 if (adapter->hw.mac_type >= e1000_82543) {
699 netdev->features = NETIF_F_SG | 699 netdev->features = NETIF_F_SG |
700 NETIF_F_HW_CSUM | 700 NETIF_F_HW_CSUM |
701 NETIF_F_HW_VLAN_TX | 701 NETIF_F_HW_VLAN_TX |
@@ -704,16 +704,16 @@ e1000_probe(struct pci_dev *pdev,
704 } 704 }
705 705
706#ifdef NETIF_F_TSO 706#ifdef NETIF_F_TSO
707 if((adapter->hw.mac_type >= e1000_82544) && 707 if ((adapter->hw.mac_type >= e1000_82544) &&
708 (adapter->hw.mac_type != e1000_82547)) 708 (adapter->hw.mac_type != e1000_82547))
709 netdev->features |= NETIF_F_TSO; 709 netdev->features |= NETIF_F_TSO;
710 710
711#ifdef NETIF_F_TSO_IPV6 711#ifdef NETIF_F_TSO_IPV6
712 if(adapter->hw.mac_type > e1000_82547_rev_2) 712 if (adapter->hw.mac_type > e1000_82547_rev_2)
713 netdev->features |= NETIF_F_TSO_IPV6; 713 netdev->features |= NETIF_F_TSO_IPV6;
714#endif 714#endif
715#endif 715#endif
716 if(pci_using_dac) 716 if (pci_using_dac)
717 netdev->features |= NETIF_F_HIGHDMA; 717 netdev->features |= NETIF_F_HIGHDMA;
718 718
719 /* hard_start_xmit is safe against parallel locking */ 719 /* hard_start_xmit is safe against parallel locking */
@@ -721,14 +721,14 @@ e1000_probe(struct pci_dev *pdev,
721 721
722 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 722 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
723 723
724 /* before reading the EEPROM, reset the controller to 724 /* before reading the EEPROM, reset the controller to
725 * put the device in a known good starting state */ 725 * put the device in a known good starting state */
726 726
727 e1000_reset_hw(&adapter->hw); 727 e1000_reset_hw(&adapter->hw);
728 728
729 /* make sure the EEPROM is good */ 729 /* make sure the EEPROM is good */
730 730
731 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 731 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
732 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 732 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
733 err = -EIO; 733 err = -EIO;
734 goto err_eeprom; 734 goto err_eeprom;
@@ -736,12 +736,12 @@ e1000_probe(struct pci_dev *pdev,
736 736
737 /* copy the MAC address out of the EEPROM */ 737 /* copy the MAC address out of the EEPROM */
738 738
739 if(e1000_read_mac_addr(&adapter->hw)) 739 if (e1000_read_mac_addr(&adapter->hw))
740 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 740 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
741 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 741 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
742 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); 742 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
743 743
744 if(!is_valid_ether_addr(netdev->perm_addr)) { 744 if (!is_valid_ether_addr(netdev->perm_addr)) {
745 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 745 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
746 err = -EIO; 746 err = -EIO;
747 goto err_eeprom; 747 goto err_eeprom;
@@ -781,7 +781,7 @@ e1000_probe(struct pci_dev *pdev,
781 * enable the ACPI Magic Packet filter 781 * enable the ACPI Magic Packet filter
782 */ 782 */
783 783
784 switch(adapter->hw.mac_type) { 784 switch (adapter->hw.mac_type) {
785 case e1000_82542_rev2_0: 785 case e1000_82542_rev2_0:
786 case e1000_82542_rev2_1: 786 case e1000_82542_rev2_1:
787 case e1000_82543: 787 case e1000_82543:
@@ -794,7 +794,7 @@ e1000_probe(struct pci_dev *pdev,
794 case e1000_82546: 794 case e1000_82546:
795 case e1000_82546_rev_3: 795 case e1000_82546_rev_3:
796 case e1000_82571: 796 case e1000_82571:
797 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ 797 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
798 e1000_read_eeprom(&adapter->hw, 798 e1000_read_eeprom(&adapter->hw,
799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
800 break; 800 break;
@@ -805,7 +805,7 @@ e1000_probe(struct pci_dev *pdev,
805 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 805 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
806 break; 806 break;
807 } 807 }
808 if(eeprom_data & eeprom_apme_mask) 808 if (eeprom_data & eeprom_apme_mask)
809 adapter->wol |= E1000_WUFC_MAG; 809 adapter->wol |= E1000_WUFC_MAG;
810 810
811 /* print bus type/speed/width info */ 811 /* print bus type/speed/width info */
@@ -840,7 +840,7 @@ e1000_probe(struct pci_dev *pdev,
840 e1000_get_hw_control(adapter); 840 e1000_get_hw_control(adapter);
841 841
842 strcpy(netdev->name, "eth%d"); 842 strcpy(netdev->name, "eth%d");
843 if((err = register_netdev(netdev))) 843 if ((err = register_netdev(netdev)))
844 goto err_register; 844 goto err_register;
845 845
846 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 846 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -881,10 +881,10 @@ e1000_remove(struct pci_dev *pdev)
881 881
882 flush_scheduled_work(); 882 flush_scheduled_work();
883 883
884 if(adapter->hw.mac_type >= e1000_82540 && 884 if (adapter->hw.mac_type >= e1000_82540 &&
885 adapter->hw.media_type == e1000_media_type_copper) { 885 adapter->hw.media_type == e1000_media_type_copper) {
886 manc = E1000_READ_REG(&adapter->hw, MANC); 886 manc = E1000_READ_REG(&adapter->hw, MANC);
887 if(manc & E1000_MANC_SMBUS_EN) { 887 if (manc & E1000_MANC_SMBUS_EN) {
888 manc |= E1000_MANC_ARP_EN; 888 manc |= E1000_MANC_ARP_EN;
889 E1000_WRITE_REG(&adapter->hw, MANC, manc); 889 E1000_WRITE_REG(&adapter->hw, MANC, manc);
890 } 890 }
@@ -900,7 +900,7 @@ e1000_remove(struct pci_dev *pdev)
900 __dev_put(&adapter->polling_netdev[i]); 900 __dev_put(&adapter->polling_netdev[i]);
901#endif 901#endif
902 902
903 if(!e1000_check_phy_reset_block(&adapter->hw)) 903 if (!e1000_check_phy_reset_block(&adapter->hw))
904 e1000_phy_hw_reset(&adapter->hw); 904 e1000_phy_hw_reset(&adapter->hw);
905 905
906 kfree(adapter->tx_ring); 906 kfree(adapter->tx_ring);
@@ -959,19 +959,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
959 959
960 /* identify the MAC */ 960 /* identify the MAC */
961 961
962 if(e1000_set_mac_type(hw)) { 962 if (e1000_set_mac_type(hw)) {
963 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 963 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
964 return -EIO; 964 return -EIO;
965 } 965 }
966 966
967 /* initialize eeprom parameters */ 967 /* initialize eeprom parameters */
968 968
969 if(e1000_init_eeprom_params(hw)) { 969 if (e1000_init_eeprom_params(hw)) {
970 E1000_ERR("EEPROM initialization failed\n"); 970 E1000_ERR("EEPROM initialization failed\n");
971 return -EIO; 971 return -EIO;
972 } 972 }
973 973
974 switch(hw->mac_type) { 974 switch (hw->mac_type) {
975 default: 975 default:
976 break; 976 break;
977 case e1000_82541: 977 case e1000_82541:
@@ -990,7 +990,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
990 990
991 /* Copper options */ 991 /* Copper options */
992 992
993 if(hw->media_type == e1000_media_type_copper) { 993 if (hw->media_type == e1000_media_type_copper) {
994 hw->mdix = AUTO_ALL_MODES; 994 hw->mdix = AUTO_ALL_MODES;
995 hw->disable_polarity_correction = FALSE; 995 hw->disable_polarity_correction = FALSE;
996 hw->master_slave = E1000_MASTER_SLAVE; 996 hw->master_slave = E1000_MASTER_SLAVE;
@@ -1166,10 +1166,10 @@ e1000_open(struct net_device *netdev)
1166 if ((err = e1000_setup_all_rx_resources(adapter))) 1166 if ((err = e1000_setup_all_rx_resources(adapter)))
1167 goto err_setup_rx; 1167 goto err_setup_rx;
1168 1168
1169 if((err = e1000_up(adapter))) 1169 if ((err = e1000_up(adapter)))
1170 goto err_up; 1170 goto err_up;
1171 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1171 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1172 if((adapter->hw.mng_cookie.status & 1172 if ((adapter->hw.mng_cookie.status &
1173 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1173 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1174 e1000_update_mng_vlan(adapter); 1174 e1000_update_mng_vlan(adapter);
1175 } 1175 }
@@ -1214,7 +1214,7 @@ e1000_close(struct net_device *netdev)
1214 e1000_free_all_tx_resources(adapter); 1214 e1000_free_all_tx_resources(adapter);
1215 e1000_free_all_rx_resources(adapter); 1215 e1000_free_all_rx_resources(adapter);
1216 1216
1217 if((adapter->hw.mng_cookie.status & 1217 if ((adapter->hw.mng_cookie.status &
1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1220 } 1220 }
@@ -1269,7 +1269,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1269 size = sizeof(struct e1000_buffer) * txdr->count; 1269 size = sizeof(struct e1000_buffer) * txdr->count;
1270 1270
1271 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1271 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1272 if(!txdr->buffer_info) { 1272 if (!txdr->buffer_info) {
1273 DPRINTK(PROBE, ERR, 1273 DPRINTK(PROBE, ERR,
1274 "Unable to allocate memory for the transmit descriptor ring\n"); 1274 "Unable to allocate memory for the transmit descriptor ring\n");
1275 return -ENOMEM; 1275 return -ENOMEM;
@@ -1282,7 +1282,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1282 E1000_ROUNDUP(txdr->size, 4096); 1282 E1000_ROUNDUP(txdr->size, 4096);
1283 1283
1284 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1284 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1285 if(!txdr->desc) { 1285 if (!txdr->desc) {
1286setup_tx_desc_die: 1286setup_tx_desc_die:
1287 vfree(txdr->buffer_info); 1287 vfree(txdr->buffer_info);
1288 DPRINTK(PROBE, ERR, 1288 DPRINTK(PROBE, ERR,
@@ -1298,8 +1298,8 @@ setup_tx_desc_die:
1298 "at %p\n", txdr->size, txdr->desc); 1298 "at %p\n", txdr->size, txdr->desc);
1299 /* Try again, without freeing the previous */ 1299 /* Try again, without freeing the previous */
1300 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1300 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1301 if(!txdr->desc) {
1302 /* Failed allocation, critical failure */ 1301 /* Failed allocation, critical failure */
1302 if (!txdr->desc) {
1303 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1303 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1304 goto setup_tx_desc_die; 1304 goto setup_tx_desc_die;
1305 } 1305 }
@@ -1499,7 +1499,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1499 1499
1500 size = sizeof(struct e1000_ps_page) * rxdr->count; 1500 size = sizeof(struct e1000_ps_page) * rxdr->count;
1501 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1501 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1502 if(!rxdr->ps_page) { 1502 if (!rxdr->ps_page) {
1503 vfree(rxdr->buffer_info); 1503 vfree(rxdr->buffer_info);
1504 DPRINTK(PROBE, ERR, 1504 DPRINTK(PROBE, ERR,
1505 "Unable to allocate memory for the receive descriptor ring\n"); 1505 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1509,7 +1509,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1509 1509
1510 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1510 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1511 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1511 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1512 if(!rxdr->ps_page_dma) { 1512 if (!rxdr->ps_page_dma) {
1513 vfree(rxdr->buffer_info); 1513 vfree(rxdr->buffer_info);
1514 kfree(rxdr->ps_page); 1514 kfree(rxdr->ps_page);
1515 DPRINTK(PROBE, ERR, 1515 DPRINTK(PROBE, ERR,
@@ -1518,7 +1518,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1518 } 1518 }
1519 memset(rxdr->ps_page_dma, 0, size); 1519 memset(rxdr->ps_page_dma, 0, size);
1520 1520
1521 if(adapter->hw.mac_type <= e1000_82547_rev_2) 1521 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1522 desc_len = sizeof(struct e1000_rx_desc); 1522 desc_len = sizeof(struct e1000_rx_desc);
1523 else 1523 else
1524 desc_len = sizeof(union e1000_rx_desc_packet_split); 1524 desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1647,7 +1647,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1647 rctl |= E1000_RCTL_LPE; 1647 rctl |= E1000_RCTL_LPE;
1648 1648
1649 /* Setup buffer sizes */ 1649 /* Setup buffer sizes */
1650 if(adapter->hw.mac_type >= e1000_82571) { 1650 if (adapter->hw.mac_type >= e1000_82571) {
1651 /* We can now specify buffers in 1K increments. 1651 /* We can now specify buffers in 1K increments.
1652 * BSIZE and BSEX are ignored in this case. */ 1652 * BSIZE and BSEX are ignored in this case. */
1653 rctl |= adapter->rx_buffer_len << 0x11; 1653 rctl |= adapter->rx_buffer_len << 0x11;
@@ -1681,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1681 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1681 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1682 1682
1683 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1683 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1684 1684
1685 psrctl |= adapter->rx_ps_bsize0 >> 1685 psrctl |= adapter->rx_ps_bsize0 >>
1686 E1000_PSRCTL_BSIZE0_SHIFT; 1686 E1000_PSRCTL_BSIZE0_SHIFT;
1687 1687
@@ -1743,7 +1743,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1743 1743
1744 if (hw->mac_type >= e1000_82540) { 1744 if (hw->mac_type >= e1000_82540) {
1745 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1745 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1746 if(adapter->itr > 1) 1746 if (adapter->itr > 1)
1747 E1000_WRITE_REG(hw, ITR, 1747 E1000_WRITE_REG(hw, ITR,
1748 1000000000 / (adapter->itr * 256)); 1748 1000000000 / (adapter->itr * 256));
1749 } 1749 }
@@ -1832,13 +1832,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1832 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1832 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1833 if (hw->mac_type >= e1000_82543) { 1833 if (hw->mac_type >= e1000_82543) {
1834 rxcsum = E1000_READ_REG(hw, RXCSUM); 1834 rxcsum = E1000_READ_REG(hw, RXCSUM);
1835 if(adapter->rx_csum == TRUE) { 1835 if (adapter->rx_csum == TRUE) {
1836 rxcsum |= E1000_RXCSUM_TUOFL; 1836 rxcsum |= E1000_RXCSUM_TUOFL;
1837 1837
1838 /* Enable 82571 IPv4 payload checksum for UDP fragments 1838 /* Enable 82571 IPv4 payload checksum for UDP fragments
1839 * Must be used in conjunction with packet-split. */ 1839 * Must be used in conjunction with packet-split. */
1840 if ((hw->mac_type >= e1000_82571) && 1840 if ((hw->mac_type >= e1000_82571) &&
1841 (adapter->rx_ps_pages)) { 1841 (adapter->rx_ps_pages)) {
1842 rxcsum |= E1000_RXCSUM_IPPCSE; 1842 rxcsum |= E1000_RXCSUM_IPPCSE;
1843 } 1843 }
1844 } else { 1844 } else {
@@ -1900,7 +1900,7 @@ static inline void
1900e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1900e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1901 struct e1000_buffer *buffer_info) 1901 struct e1000_buffer *buffer_info)
1902{ 1902{
1903 if(buffer_info->dma) { 1903 if (buffer_info->dma) {
1904 pci_unmap_page(adapter->pdev, 1904 pci_unmap_page(adapter->pdev,
1905 buffer_info->dma, 1905 buffer_info->dma,
1906 buffer_info->length, 1906 buffer_info->length,
@@ -1927,7 +1927,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
1927 1927
1928 /* Free all the Tx ring sk_buffs */ 1928 /* Free all the Tx ring sk_buffs */
1929 1929
1930 for(i = 0; i < tx_ring->count; i++) { 1930 for (i = 0; i < tx_ring->count; i++) {
1931 buffer_info = &tx_ring->buffer_info[i]; 1931 buffer_info = &tx_ring->buffer_info[i];
1932 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1932 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1933 } 1933 }
@@ -2023,10 +2023,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
2023 unsigned int i, j; 2023 unsigned int i, j;
2024 2024
2025 /* Free all the Rx ring sk_buffs */ 2025 /* Free all the Rx ring sk_buffs */
2026 2026 for (i = 0; i < rx_ring->count; i++) {
2027 for(i = 0; i < rx_ring->count; i++) {
2028 buffer_info = &rx_ring->buffer_info[i]; 2027 buffer_info = &rx_ring->buffer_info[i];
2029 if(buffer_info->skb) { 2028 if (buffer_info->skb) {
2030 pci_unmap_single(pdev, 2029 pci_unmap_single(pdev,
2031 buffer_info->dma, 2030 buffer_info->dma,
2032 buffer_info->length, 2031 buffer_info->length,
@@ -2107,7 +2106,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
2107 E1000_WRITE_FLUSH(&adapter->hw); 2106 E1000_WRITE_FLUSH(&adapter->hw);
2108 mdelay(5); 2107 mdelay(5);
2109 2108
2110 if(netif_running(netdev)) 2109 if (netif_running(netdev))
2111 e1000_clean_all_rx_rings(adapter); 2110 e1000_clean_all_rx_rings(adapter);
2112} 2111}
2113 2112
@@ -2123,10 +2122,10 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2123 E1000_WRITE_FLUSH(&adapter->hw); 2122 E1000_WRITE_FLUSH(&adapter->hw);
2124 mdelay(5); 2123 mdelay(5);
2125 2124
2126 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) 2125 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2127 e1000_pci_set_mwi(&adapter->hw); 2126 e1000_pci_set_mwi(&adapter->hw);
2128 2127
2129 if(netif_running(netdev)) { 2128 if (netif_running(netdev)) {
2130 e1000_configure_rx(adapter); 2129 e1000_configure_rx(adapter);
2131 /* No need to loop, because 82542 supports only 1 queue */ 2130 /* No need to loop, because 82542 supports only 1 queue */
2132 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2131 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
@@ -2148,12 +2147,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
2148 struct e1000_adapter *adapter = netdev_priv(netdev); 2147 struct e1000_adapter *adapter = netdev_priv(netdev);
2149 struct sockaddr *addr = p; 2148 struct sockaddr *addr = p;
2150 2149
2151 if(!is_valid_ether_addr(addr->sa_data)) 2150 if (!is_valid_ether_addr(addr->sa_data))
2152 return -EADDRNOTAVAIL; 2151 return -EADDRNOTAVAIL;
2153 2152
2154 /* 82542 2.0 needs to be in reset to write receive address registers */ 2153 /* 82542 2.0 needs to be in reset to write receive address registers */
2155 2154
2156 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2155 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2157 e1000_enter_82542_rst(adapter); 2156 e1000_enter_82542_rst(adapter);
2158 2157
2159 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2158 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2167,17 +2166,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
2167 /* activate the work around */ 2166 /* activate the work around */
2168 adapter->hw.laa_is_present = 1; 2167 adapter->hw.laa_is_present = 1;
2169 2168
2170 /* Hold a copy of the LAA in RAR[14] This is done so that 2169 /* Hold a copy of the LAA in RAR[14] This is done so that
2171 * between the time RAR[0] gets clobbered and the time it 2170 * between the time RAR[0] gets clobbered and the time it
2172 * gets fixed (in e1000_watchdog), the actual LAA is in one 2171 * gets fixed (in e1000_watchdog), the actual LAA is in one
2173 * of the RARs and no incoming packets directed to this port 2172 * of the RARs and no incoming packets directed to this port
2174 * are dropped. Eventaully the LAA will be in RAR[0] and 2173 * are dropped. Eventaully the LAA will be in RAR[0] and
2175 * RAR[14] */ 2174 * RAR[14] */
2176 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 2175 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2177 E1000_RAR_ENTRIES - 1); 2176 E1000_RAR_ENTRIES - 1);
2178 } 2177 }
2179 2178
2180 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2179 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2181 e1000_leave_82542_rst(adapter); 2180 e1000_leave_82542_rst(adapter);
2182 2181
2183 return 0; 2182 return 0;
@@ -2211,9 +2210,9 @@ e1000_set_multi(struct net_device *netdev)
2211 2210
2212 rctl = E1000_READ_REG(hw, RCTL); 2211 rctl = E1000_READ_REG(hw, RCTL);
2213 2212
2214 if(netdev->flags & IFF_PROMISC) { 2213 if (netdev->flags & IFF_PROMISC) {
2215 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2214 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2216 } else if(netdev->flags & IFF_ALLMULTI) { 2215 } else if (netdev->flags & IFF_ALLMULTI) {
2217 rctl |= E1000_RCTL_MPE; 2216 rctl |= E1000_RCTL_MPE;
2218 rctl &= ~E1000_RCTL_UPE; 2217 rctl &= ~E1000_RCTL_UPE;
2219 } else { 2218 } else {
@@ -2224,7 +2223,7 @@ e1000_set_multi(struct net_device *netdev)
2224 2223
2225 /* 82542 2.0 needs to be in reset to write receive address registers */ 2224 /* 82542 2.0 needs to be in reset to write receive address registers */
2226 2225
2227 if(hw->mac_type == e1000_82542_rev2_0) 2226 if (hw->mac_type == e1000_82542_rev2_0)
2228 e1000_enter_82542_rst(adapter); 2227 e1000_enter_82542_rst(adapter);
2229 2228
2230 /* load the first 14 multicast address into the exact filters 1-14 2229 /* load the first 14 multicast address into the exact filters 1-14
@@ -2234,7 +2233,7 @@ e1000_set_multi(struct net_device *netdev)
2234 */ 2233 */
2235 mc_ptr = netdev->mc_list; 2234 mc_ptr = netdev->mc_list;
2236 2235
2237 for(i = 1; i < rar_entries; i++) { 2236 for (i = 1; i < rar_entries; i++) {
2238 if (mc_ptr) { 2237 if (mc_ptr) {
2239 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2238 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2240 mc_ptr = mc_ptr->next; 2239 mc_ptr = mc_ptr->next;
@@ -2246,17 +2245,17 @@ e1000_set_multi(struct net_device *netdev)
2246 2245
2247 /* clear the old settings from the multicast hash table */ 2246 /* clear the old settings from the multicast hash table */
2248 2247
2249 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2248 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
2250 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2249 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2251 2250
2252 /* load any remaining addresses into the hash table */ 2251 /* load any remaining addresses into the hash table */
2253 2252
2254 for(; mc_ptr; mc_ptr = mc_ptr->next) { 2253 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2255 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); 2254 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2256 e1000_mta_set(hw, hash_value); 2255 e1000_mta_set(hw, hash_value);
2257 } 2256 }
2258 2257
2259 if(hw->mac_type == e1000_82542_rev2_0) 2258 if (hw->mac_type == e1000_82542_rev2_0)
2260 e1000_leave_82542_rst(adapter); 2259 e1000_leave_82542_rst(adapter);
2261} 2260}
2262 2261
@@ -2282,8 +2281,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2282 struct net_device *netdev = adapter->netdev; 2281 struct net_device *netdev = adapter->netdev;
2283 uint32_t tctl; 2282 uint32_t tctl;
2284 2283
2285 if(atomic_read(&adapter->tx_fifo_stall)) { 2284 if (atomic_read(&adapter->tx_fifo_stall)) {
2286 if((E1000_READ_REG(&adapter->hw, TDT) == 2285 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2287 E1000_READ_REG(&adapter->hw, TDH)) && 2286 E1000_READ_REG(&adapter->hw, TDH)) &&
2288 (E1000_READ_REG(&adapter->hw, TDFT) == 2287 (E1000_READ_REG(&adapter->hw, TDFT) ==
2289 E1000_READ_REG(&adapter->hw, TDFH)) && 2288 E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2335,18 +2334,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2335 e1000_check_for_link(&adapter->hw); 2334 e1000_check_for_link(&adapter->hw);
2336 if (adapter->hw.mac_type == e1000_82573) { 2335 if (adapter->hw.mac_type == e1000_82573) {
2337 e1000_enable_tx_pkt_filtering(&adapter->hw); 2336 e1000_enable_tx_pkt_filtering(&adapter->hw);
2338 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2337 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2339 e1000_update_mng_vlan(adapter); 2338 e1000_update_mng_vlan(adapter);
2340 } 2339 }
2341 2340
2342 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 2341 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2343 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 2342 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2344 link = !adapter->hw.serdes_link_down; 2343 link = !adapter->hw.serdes_link_down;
2345 else 2344 else
2346 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; 2345 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2347 2346
2348 if(link) { 2347 if (link) {
2349 if(!netif_carrier_ok(netdev)) { 2348 if (!netif_carrier_ok(netdev)) {
2350 e1000_get_speed_and_duplex(&adapter->hw, 2349 e1000_get_speed_and_duplex(&adapter->hw,
2351 &adapter->link_speed, 2350 &adapter->link_speed,
2352 &adapter->link_duplex); 2351 &adapter->link_duplex);
@@ -2377,7 +2376,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2377 adapter->smartspeed = 0; 2376 adapter->smartspeed = 0;
2378 } 2377 }
2379 } else { 2378 } else {
2380 if(netif_carrier_ok(netdev)) { 2379 if (netif_carrier_ok(netdev)) {
2381 adapter->link_speed = 0; 2380 adapter->link_speed = 0;
2382 adapter->link_duplex = 0; 2381 adapter->link_duplex = 0;
2383 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2382 DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2417,12 +2416,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2417 } 2416 }
2418 2417
2419 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ 2418 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2420 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { 2419 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2421 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total 2420 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2422 * asymmetrical Tx or Rx gets ITR=8000; everyone 2421 * asymmetrical Tx or Rx gets ITR=8000; everyone
2423 * else is between 2000-8000. */ 2422 * else is between 2000-8000. */
2424 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; 2423 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2425 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 2424 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2426 adapter->gotcl - adapter->gorcl : 2425 adapter->gotcl - adapter->gorcl :
2427 adapter->gorcl - adapter->gotcl) / 10000; 2426 adapter->gorcl - adapter->gotcl) / 10000;
2428 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2427 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2435,7 +2434,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2435 /* Force detection of hung controller every watchdog period */ 2434 /* Force detection of hung controller every watchdog period */
2436 adapter->detect_tx_hung = TRUE; 2435 adapter->detect_tx_hung = TRUE;
2437 2436
2438 /* With 82571 controllers, LAA may be overwritten due to controller 2437 /* With 82571 controllers, LAA may be overwritten due to controller
2439 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2438 * reset from the other port. Set the appropriate LAA in RAR[0] */
2440 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) 2439 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2441 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2440 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2464,7 +2463,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2464 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2463 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2465 int err; 2464 int err;
2466 2465
2467 if(skb_shinfo(skb)->tso_size) { 2466 if (skb_shinfo(skb)->tso_size) {
2468 if (skb_header_cloned(skb)) { 2467 if (skb_header_cloned(skb)) {
2469 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2468 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2470 if (err) 2469 if (err)
@@ -2473,7 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2473 2472
2474 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 2473 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2475 mss = skb_shinfo(skb)->tso_size; 2474 mss = skb_shinfo(skb)->tso_size;
2476 if(skb->protocol == ntohs(ETH_P_IP)) { 2475 if (skb->protocol == ntohs(ETH_P_IP)) {
2477 skb->nh.iph->tot_len = 0; 2476 skb->nh.iph->tot_len = 0;
2478 skb->nh.iph->check = 0; 2477 skb->nh.iph->check = 0;
2479 skb->h.th->check = 2478 skb->h.th->check =
@@ -2485,7 +2484,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2485 cmd_length = E1000_TXD_CMD_IP; 2484 cmd_length = E1000_TXD_CMD_IP;
2486 ipcse = skb->h.raw - skb->data - 1; 2485 ipcse = skb->h.raw - skb->data - 1;
2487#ifdef NETIF_F_TSO_IPV6 2486#ifdef NETIF_F_TSO_IPV6
2488 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 2487 } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
2489 skb->nh.ipv6h->payload_len = 0; 2488 skb->nh.ipv6h->payload_len = 0;
2490 skb->h.th->check = 2489 skb->h.th->check =
2491 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2490 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2540,7 +2539,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2540 unsigned int i; 2539 unsigned int i;
2541 uint8_t css; 2540 uint8_t css;
2542 2541
2543 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2542 if (likely(skb->ip_summed == CHECKSUM_HW)) {
2544 css = skb->h.raw - skb->data; 2543 css = skb->h.raw - skb->data;
2545 2544
2546 i = tx_ring->next_to_use; 2545 i = tx_ring->next_to_use;
@@ -2580,7 +2579,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2580 2579
2581 i = tx_ring->next_to_use; 2580 i = tx_ring->next_to_use;
2582 2581
2583 while(len) { 2582 while (len) {
2584 buffer_info = &tx_ring->buffer_info[i]; 2583 buffer_info = &tx_ring->buffer_info[i];
2585 size = min(len, max_per_txd); 2584 size = min(len, max_per_txd);
2586#ifdef NETIF_F_TSO 2585#ifdef NETIF_F_TSO
@@ -2596,7 +2595,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2596 2595
2597 /* Workaround for premature desc write-backs 2596 /* Workaround for premature desc write-backs
2598 * in TSO mode. Append 4-byte sentinel desc */ 2597 * in TSO mode. Append 4-byte sentinel desc */
2599 if(unlikely(mss && !nr_frags && size == len && size > 8)) 2598 if (unlikely(mss && !nr_frags && size == len && size > 8))
2600 size -= 4; 2599 size -= 4;
2601#endif 2600#endif
2602 /* work-around for errata 10 and it applies 2601 /* work-around for errata 10 and it applies
@@ -2604,13 +2603,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2604 * The fix is to make sure that the first descriptor of a 2603 * The fix is to make sure that the first descriptor of a
2605 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2604 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2606 */ 2605 */
2607 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2606 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2608 (size > 2015) && count == 0)) 2607 (size > 2015) && count == 0))
2609 size = 2015; 2608 size = 2015;
2610 2609
2611 /* Workaround for potential 82544 hang in PCI-X. Avoid 2610 /* Workaround for potential 82544 hang in PCI-X. Avoid
2612 * terminating buffers within evenly-aligned dwords. */ 2611 * terminating buffers within evenly-aligned dwords. */
2613 if(unlikely(adapter->pcix_82544 && 2612 if (unlikely(adapter->pcix_82544 &&
2614 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2613 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2615 size > 4)) 2614 size > 4))
2616 size -= 4; 2615 size -= 4;
@@ -2626,29 +2625,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2626 len -= size; 2625 len -= size;
2627 offset += size; 2626 offset += size;
2628 count++; 2627 count++;
2629 if(unlikely(++i == tx_ring->count)) i = 0; 2628 if (unlikely(++i == tx_ring->count)) i = 0;
2630 } 2629 }
2631 2630
2632 for(f = 0; f < nr_frags; f++) { 2631 for (f = 0; f < nr_frags; f++) {
2633 struct skb_frag_struct *frag; 2632 struct skb_frag_struct *frag;
2634 2633
2635 frag = &skb_shinfo(skb)->frags[f]; 2634 frag = &skb_shinfo(skb)->frags[f];
2636 len = frag->size; 2635 len = frag->size;
2637 offset = frag->page_offset; 2636 offset = frag->page_offset;
2638 2637
2639 while(len) { 2638 while (len) {
2640 buffer_info = &tx_ring->buffer_info[i]; 2639 buffer_info = &tx_ring->buffer_info[i];
2641 size = min(len, max_per_txd); 2640 size = min(len, max_per_txd);
2642#ifdef NETIF_F_TSO 2641#ifdef NETIF_F_TSO
2643 /* Workaround for premature desc write-backs 2642 /* Workaround for premature desc write-backs
2644 * in TSO mode. Append 4-byte sentinel desc */ 2643 * in TSO mode. Append 4-byte sentinel desc */
2645 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2644 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2646 size -= 4; 2645 size -= 4;
2647#endif 2646#endif
2648 /* Workaround for potential 82544 hang in PCI-X. 2647 /* Workaround for potential 82544 hang in PCI-X.
2649 * Avoid terminating buffers within evenly-aligned 2648 * Avoid terminating buffers within evenly-aligned
2650 * dwords. */ 2649 * dwords. */
2651 if(unlikely(adapter->pcix_82544 && 2650 if (unlikely(adapter->pcix_82544 &&
2652 !((unsigned long)(frag->page+offset+size-1) & 4) && 2651 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2653 size > 4)) 2652 size > 4))
2654 size -= 4; 2653 size -= 4;
@@ -2665,7 +2664,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2665 len -= size; 2664 len -= size;
2666 offset += size; 2665 offset += size;
2667 count++; 2666 count++;
2668 if(unlikely(++i == tx_ring->count)) i = 0; 2667 if (unlikely(++i == tx_ring->count)) i = 0;
2669 } 2668 }
2670 } 2669 }
2671 2670
@@ -2685,35 +2684,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2685 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2684 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2686 unsigned int i; 2685 unsigned int i;
2687 2686
2688 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2687 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2689 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2688 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2690 E1000_TXD_CMD_TSE; 2689 E1000_TXD_CMD_TSE;
2691 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2690 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2692 2691
2693 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2692 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2694 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2693 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2695 } 2694 }
2696 2695
2697 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2696 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2698 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2697 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2699 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2698 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2700 } 2699 }
2701 2700
2702 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 2701 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2703 txd_lower |= E1000_TXD_CMD_VLE; 2702 txd_lower |= E1000_TXD_CMD_VLE;
2704 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 2703 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2705 } 2704 }
2706 2705
2707 i = tx_ring->next_to_use; 2706 i = tx_ring->next_to_use;
2708 2707
2709 while(count--) { 2708 while (count--) {
2710 buffer_info = &tx_ring->buffer_info[i]; 2709 buffer_info = &tx_ring->buffer_info[i];
2711 tx_desc = E1000_TX_DESC(*tx_ring, i); 2710 tx_desc = E1000_TX_DESC(*tx_ring, i);
2712 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2711 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2713 tx_desc->lower.data = 2712 tx_desc->lower.data =
2714 cpu_to_le32(txd_lower | buffer_info->length); 2713 cpu_to_le32(txd_lower | buffer_info->length);
2715 tx_desc->upper.data = cpu_to_le32(txd_upper); 2714 tx_desc->upper.data = cpu_to_le32(txd_upper);
2716 if(unlikely(++i == tx_ring->count)) i = 0; 2715 if (unlikely(++i == tx_ring->count)) i = 0;
2717 } 2716 }
2718 2717
2719 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 2718 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2748,20 +2747,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2748 2747
2749 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 2748 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2750 2749
2751 if(adapter->link_duplex != HALF_DUPLEX) 2750 if (adapter->link_duplex != HALF_DUPLEX)
2752 goto no_fifo_stall_required; 2751 goto no_fifo_stall_required;
2753 2752
2754 if(atomic_read(&adapter->tx_fifo_stall)) 2753 if (atomic_read(&adapter->tx_fifo_stall))
2755 return 1; 2754 return 1;
2756 2755
2757 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 2756 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2758 atomic_set(&adapter->tx_fifo_stall, 1); 2757 atomic_set(&adapter->tx_fifo_stall, 1);
2759 return 1; 2758 return 1;
2760 } 2759 }
2761 2760
2762no_fifo_stall_required: 2761no_fifo_stall_required:
2763 adapter->tx_fifo_head += skb_fifo_len; 2762 adapter->tx_fifo_head += skb_fifo_len;
2764 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) 2763 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
2765 adapter->tx_fifo_head -= adapter->tx_fifo_size; 2764 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2766 return 0; 2765 return 0;
2767} 2766}
@@ -2772,27 +2771,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2772{ 2771{
2773 struct e1000_hw *hw = &adapter->hw; 2772 struct e1000_hw *hw = &adapter->hw;
2774 uint16_t length, offset; 2773 uint16_t length, offset;
2775 if(vlan_tx_tag_present(skb)) { 2774 if (vlan_tx_tag_present(skb)) {
2776 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 2775 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2777 ( adapter->hw.mng_cookie.status & 2776 ( adapter->hw.mng_cookie.status &
2778 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 2777 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2779 return 0; 2778 return 0;
2780 } 2779 }
2781 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 2780 if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2782 struct ethhdr *eth = (struct ethhdr *) skb->data; 2781 struct ethhdr *eth = (struct ethhdr *) skb->data;
2783 if((htons(ETH_P_IP) == eth->h_proto)) { 2782 if ((htons(ETH_P_IP) == eth->h_proto)) {
2784 const struct iphdr *ip = 2783 const struct iphdr *ip =
2785 (struct iphdr *)((uint8_t *)skb->data+14); 2784 (struct iphdr *)((uint8_t *)skb->data+14);
2786 if(IPPROTO_UDP == ip->protocol) { 2785 if (IPPROTO_UDP == ip->protocol) {
2787 struct udphdr *udp = 2786 struct udphdr *udp =
2788 (struct udphdr *)((uint8_t *)ip + 2787 (struct udphdr *)((uint8_t *)ip +
2789 (ip->ihl << 2)); 2788 (ip->ihl << 2));
2790 if(ntohs(udp->dest) == 67) { 2789 if (ntohs(udp->dest) == 67) {
2791 offset = (uint8_t *)udp + 8 - skb->data; 2790 offset = (uint8_t *)udp + 8 - skb->data;
2792 length = skb->len - offset; 2791 length = skb->len - offset;
2793 2792
2794 return e1000_mng_write_dhcp_info(hw, 2793 return e1000_mng_write_dhcp_info(hw,
2795 (uint8_t *)udp + 8, 2794 (uint8_t *)udp + 8,
2796 length); 2795 length);
2797 } 2796 }
2798 } 2797 }
@@ -2815,7 +2814,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2815 unsigned int nr_frags = 0; 2814 unsigned int nr_frags = 0;
2816 unsigned int mss = 0; 2815 unsigned int mss = 0;
2817 int count = 0; 2816 int count = 0;
2818 int tso; 2817 int tso;
2819 unsigned int f; 2818 unsigned int f;
2820 len -= skb->data_len; 2819 len -= skb->data_len;
2821 2820
@@ -2838,7 +2837,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2838 * 4 = ceil(buffer len/mss). To make sure we don't 2837 * 4 = ceil(buffer len/mss). To make sure we don't
2839 * overrun the FIFO, adjust the max buffer len if mss 2838 * overrun the FIFO, adjust the max buffer len if mss
2840 * drops. */ 2839 * drops. */
2841 if(mss) { 2840 if (mss) {
2842 uint8_t hdr_len; 2841 uint8_t hdr_len;
2843 max_per_txd = min(mss << 2, max_per_txd); 2842 max_per_txd = min(mss << 2, max_per_txd);
2844 max_txd_pwr = fls(max_per_txd) - 1; 2843 max_txd_pwr = fls(max_per_txd) - 1;
@@ -2861,12 +2860,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2861 } 2860 }
2862 } 2861 }
2863 2862
2864 if((mss) || (skb->ip_summed == CHECKSUM_HW))
2865 /* reserve a descriptor for the offload context */ 2863 /* reserve a descriptor for the offload context */
2864 if ((mss) || (skb->ip_summed == CHECKSUM_HW))
2866 count++; 2865 count++;
2867 count++; 2866 count++;
2868#else 2867#else
2869 if(skb->ip_summed == CHECKSUM_HW) 2868 if (skb->ip_summed == CHECKSUM_HW)
2870 count++; 2869 count++;
2871#endif 2870#endif
2872 2871
@@ -2879,24 +2878,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2879 2878
2880 count += TXD_USE_COUNT(len, max_txd_pwr); 2879 count += TXD_USE_COUNT(len, max_txd_pwr);
2881 2880
2882 if(adapter->pcix_82544) 2881 if (adapter->pcix_82544)
2883 count++; 2882 count++;
2884 2883
2885 /* work-around for errata 10 and it applies to all controllers 2884 /* work-around for errata 10 and it applies to all controllers
2886 * in PCI-X mode, so add one more descriptor to the count 2885 * in PCI-X mode, so add one more descriptor to the count
2887 */ 2886 */
2888 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 2887 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2889 (len > 2015))) 2888 (len > 2015)))
2890 count++; 2889 count++;
2891 2890
2892 nr_frags = skb_shinfo(skb)->nr_frags; 2891 nr_frags = skb_shinfo(skb)->nr_frags;
2893 for(f = 0; f < nr_frags; f++) 2892 for (f = 0; f < nr_frags; f++)
2894 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2893 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2895 max_txd_pwr); 2894 max_txd_pwr);
2896 if(adapter->pcix_82544) 2895 if (adapter->pcix_82544)
2897 count += nr_frags; 2896 count += nr_frags;
2898 2897
2899 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2898 if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2900 e1000_transfer_dhcp_info(adapter, skb); 2899 e1000_transfer_dhcp_info(adapter, skb);
2901 2900
2902 local_irq_save(flags); 2901 local_irq_save(flags);
@@ -2914,8 +2913,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2914 return NETDEV_TX_BUSY; 2913 return NETDEV_TX_BUSY;
2915 } 2914 }
2916 2915
2917 if(unlikely(adapter->hw.mac_type == e1000_82547)) { 2916 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
2918 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2917 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2919 netif_stop_queue(netdev); 2918 netif_stop_queue(netdev);
2920 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2919 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2921 spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2920 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2923,13 +2922,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2923 } 2922 }
2924 } 2923 }
2925 2924
2926 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2925 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2927 tx_flags |= E1000_TX_FLAGS_VLAN; 2926 tx_flags |= E1000_TX_FLAGS_VLAN;
2928 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2927 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2929 } 2928 }
2930 2929
2931 first = tx_ring->next_to_use; 2930 first = tx_ring->next_to_use;
2932 2931
2933 tso = e1000_tso(adapter, tx_ring, skb); 2932 tso = e1000_tso(adapter, tx_ring, skb);
2934 if (tso < 0) { 2933 if (tso < 0) {
2935 dev_kfree_skb_any(skb); 2934 dev_kfree_skb_any(skb);
@@ -3018,9 +3017,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3018 struct e1000_adapter *adapter = netdev_priv(netdev); 3017 struct e1000_adapter *adapter = netdev_priv(netdev);
3019 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3018 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3020 3019
3021 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3020 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3022 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3021 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3023 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3022 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3024 return -EINVAL; 3023 return -EINVAL;
3025 } 3024 }
3026 3025
@@ -3068,7 +3067,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3068 3067
3069 netdev->mtu = new_mtu; 3068 netdev->mtu = new_mtu;
3070 3069
3071 if(netif_running(netdev)) { 3070 if (netif_running(netdev)) {
3072 e1000_down(adapter); 3071 e1000_down(adapter);
3073 e1000_up(adapter); 3072 e1000_up(adapter);
3074 } 3073 }
@@ -3155,7 +3154,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3155 hw->collision_delta = E1000_READ_REG(hw, COLC); 3154 hw->collision_delta = E1000_READ_REG(hw, COLC);
3156 adapter->stats.colc += hw->collision_delta; 3155 adapter->stats.colc += hw->collision_delta;
3157 3156
3158 if(hw->mac_type >= e1000_82543) { 3157 if (hw->mac_type >= e1000_82543) {
3159 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); 3158 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3160 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); 3159 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3161 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); 3160 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3163,7 +3162,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3163 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 3162 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3164 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 3163 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3165 } 3164 }
3166 if(hw->mac_type > e1000_82547_rev_2) { 3165 if (hw->mac_type > e1000_82547_rev_2) {
3167 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3166 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3168 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3167 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3169 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3168 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3207,14 +3206,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
3207 3206
3208 /* Phy Stats */ 3207 /* Phy Stats */
3209 3208
3210 if(hw->media_type == e1000_media_type_copper) { 3209 if (hw->media_type == e1000_media_type_copper) {
3211 if((adapter->link_speed == SPEED_1000) && 3210 if ((adapter->link_speed == SPEED_1000) &&
3212 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3211 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3213 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3212 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3214 adapter->phy_stats.idle_errors += phy_tmp; 3213 adapter->phy_stats.idle_errors += phy_tmp;
3215 } 3214 }
3216 3215
3217 if((hw->mac_type <= e1000_82546) && 3216 if ((hw->mac_type <= e1000_82546) &&
3218 (hw->phy_type == e1000_phy_m88) && 3217 (hw->phy_type == e1000_phy_m88) &&
3219 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3218 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3220 adapter->phy_stats.receive_errors += phy_tmp; 3219 adapter->phy_stats.receive_errors += phy_tmp;
@@ -3279,7 +3278,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3279 return IRQ_NONE; /* Not our interrupt */ 3278 return IRQ_NONE; /* Not our interrupt */
3280 } 3279 }
3281 3280
3282 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3281 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3283 hw->get_link_status = 1; 3282 hw->get_link_status = 1;
3284 mod_timer(&adapter->watchdog_timer, jiffies); 3283 mod_timer(&adapter->watchdog_timer, jiffies);
3285 } 3284 }
@@ -3311,26 +3310,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3311 3310
3312#else /* if !CONFIG_E1000_NAPI */ 3311#else /* if !CONFIG_E1000_NAPI */
3313 /* Writing IMC and IMS is needed for 82547. 3312 /* Writing IMC and IMS is needed for 82547.
3314 Due to Hub Link bus being occupied, an interrupt 3313 * Due to Hub Link bus being occupied, an interrupt
3315 de-assertion message is not able to be sent. 3314 * de-assertion message is not able to be sent.
3316 When an interrupt assertion message is generated later, 3315 * When an interrupt assertion message is generated later,
3317 two messages are re-ordered and sent out. 3316 * two messages are re-ordered and sent out.
3318 That causes APIC to think 82547 is in de-assertion 3317 * That causes APIC to think 82547 is in de-assertion
3319 state, while 82547 is in assertion state, resulting 3318 * state, while 82547 is in assertion state, resulting
3320 in dead lock. Writing IMC forces 82547 into 3319 * in dead lock. Writing IMC forces 82547 into
3321 de-assertion state. 3320 * de-assertion state.
3322 */ 3321 */
3323 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 3322 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3324 atomic_inc(&adapter->irq_sem); 3323 atomic_inc(&adapter->irq_sem);
3325 E1000_WRITE_REG(hw, IMC, ~0); 3324 E1000_WRITE_REG(hw, IMC, ~0);
3326 } 3325 }
3327 3326
3328 for(i = 0; i < E1000_MAX_INTR; i++) 3327 for (i = 0; i < E1000_MAX_INTR; i++)
3329 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3328 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3330 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3329 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3331 break; 3330 break;
3332 3331
3333 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3332 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3334 e1000_irq_enable(adapter); 3333 e1000_irq_enable(adapter);
3335 3334
3336#endif /* CONFIG_E1000_NAPI */ 3335#endif /* CONFIG_E1000_NAPI */
@@ -3382,9 +3381,9 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3382 3381
3383 *budget -= work_done; 3382 *budget -= work_done;
3384 poll_dev->quota -= work_done; 3383 poll_dev->quota -= work_done;
3385 3384
3386 /* If no Tx and not enough Rx work done, exit the polling mode */ 3385 /* If no Tx and not enough Rx work done, exit the polling mode */
3387 if((!tx_cleaned && (work_done == 0)) || 3386 if ((!tx_cleaned && (work_done == 0)) ||
3388 !netif_running(adapter->netdev)) { 3387 !netif_running(adapter->netdev)) {
3389quit_polling: 3388quit_polling:
3390 netif_rx_complete(poll_dev); 3389 netif_rx_complete(poll_dev);
@@ -3416,7 +3415,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3416 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3415 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3417 3416
3418 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3417 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3419 for(cleaned = FALSE; !cleaned; ) { 3418 for (cleaned = FALSE; !cleaned; ) {
3420 tx_desc = E1000_TX_DESC(*tx_ring, i); 3419 tx_desc = E1000_TX_DESC(*tx_ring, i);
3421 buffer_info = &tx_ring->buffer_info[i]; 3420 buffer_info = &tx_ring->buffer_info[i];
3422 cleaned = (i == eop); 3421 cleaned = (i == eop);
@@ -3427,7 +3426,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3427 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3426 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3428 memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); 3427 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3429 3428
3430 if(unlikely(++i == tx_ring->count)) i = 0; 3429 if (unlikely(++i == tx_ring->count)) i = 0;
3431 } 3430 }
3432 3431
3433#ifdef CONFIG_E1000_MQ 3432#ifdef CONFIG_E1000_MQ
@@ -3442,7 +3441,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3442 3441
3443 spin_lock(&tx_ring->tx_lock); 3442 spin_lock(&tx_ring->tx_lock);
3444 3443
3445 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3444 if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3446 netif_carrier_ok(netdev))) 3445 netif_carrier_ok(netdev)))
3447 netif_wake_queue(netdev); 3446 netif_wake_queue(netdev);
3448 3447
@@ -3504,21 +3503,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3504 skb->ip_summed = CHECKSUM_NONE; 3503 skb->ip_summed = CHECKSUM_NONE;
3505 3504
3506 /* 82543 or newer only */ 3505 /* 82543 or newer only */
3507 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 3506 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
3508 /* Ignore Checksum bit is set */ 3507 /* Ignore Checksum bit is set */
3509 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 3508 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3510 /* TCP/UDP checksum error bit is set */ 3509 /* TCP/UDP checksum error bit is set */
3511 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 3510 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3512 /* let the stack verify checksum errors */ 3511 /* let the stack verify checksum errors */
3513 adapter->hw_csum_err++; 3512 adapter->hw_csum_err++;
3514 return; 3513 return;
3515 } 3514 }
3516 /* TCP/UDP Checksum has not been calculated */ 3515 /* TCP/UDP Checksum has not been calculated */
3517 if(adapter->hw.mac_type <= e1000_82547_rev_2) { 3516 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
3518 if(!(status & E1000_RXD_STAT_TCPCS)) 3517 if (!(status & E1000_RXD_STAT_TCPCS))
3519 return; 3518 return;
3520 } else { 3519 } else {
3521 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 3520 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
3522 return; 3521 return;
3523 } 3522 }
3524 /* It must be a TCP or UDP packet with a valid checksum */ 3523 /* It must be a TCP or UDP packet with a valid checksum */
@@ -3571,7 +3570,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3571 struct sk_buff *skb; 3570 struct sk_buff *skb;
3572 u8 status; 3571 u8 status;
3573#ifdef CONFIG_E1000_NAPI 3572#ifdef CONFIG_E1000_NAPI
3574 if(*work_done >= work_to_do) 3573 if (*work_done >= work_to_do)
3575 break; 3574 break;
3576 (*work_done)++; 3575 (*work_done)++;
3577#endif 3576#endif
@@ -3625,7 +3624,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3625 } 3624 }
3626 } 3625 }
3627 3626
3628 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 3627 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
3629 last_byte = *(skb->data + length - 1); 3628 last_byte = *(skb->data + length - 1);
3630 if (TBI_ACCEPT(&adapter->hw, status, 3629 if (TBI_ACCEPT(&adapter->hw, status,
3631 rx_desc->errors, length, last_byte)) { 3630 rx_desc->errors, length, last_byte)) {
@@ -3672,9 +3671,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3672 (uint32_t)(status) | 3671 (uint32_t)(status) |
3673 ((uint32_t)(rx_desc->errors) << 24), 3672 ((uint32_t)(rx_desc->errors) << 24),
3674 rx_desc->csum, skb); 3673 rx_desc->csum, skb);
3674
3675 skb->protocol = eth_type_trans(skb, netdev); 3675 skb->protocol = eth_type_trans(skb, netdev);
3676#ifdef CONFIG_E1000_NAPI 3676#ifdef CONFIG_E1000_NAPI
3677 if(unlikely(adapter->vlgrp && 3677 if (unlikely(adapter->vlgrp &&
3678 (status & E1000_RXD_STAT_VP))) { 3678 (status & E1000_RXD_STAT_VP))) {
3679 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3679 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3680 le16_to_cpu(rx_desc->special) & 3680 le16_to_cpu(rx_desc->special) &
@@ -3683,7 +3683,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3683 netif_receive_skb(skb); 3683 netif_receive_skb(skb);
3684 } 3684 }
3685#else /* CONFIG_E1000_NAPI */ 3685#else /* CONFIG_E1000_NAPI */
3686 if(unlikely(adapter->vlgrp && 3686 if (unlikely(adapter->vlgrp &&
3687 (status & E1000_RXD_STAT_VP))) { 3687 (status & E1000_RXD_STAT_VP))) {
3688 vlan_hwaccel_rx(skb, adapter->vlgrp, 3688 vlan_hwaccel_rx(skb, adapter->vlgrp,
3689 le16_to_cpu(rx_desc->special) & 3689 le16_to_cpu(rx_desc->special) &
@@ -3748,12 +3748,12 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3748 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3748 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3749 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3749 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3750 3750
3751 while(staterr & E1000_RXD_STAT_DD) { 3751 while (staterr & E1000_RXD_STAT_DD) {
3752 buffer_info = &rx_ring->buffer_info[i]; 3752 buffer_info = &rx_ring->buffer_info[i];
3753 ps_page = &rx_ring->ps_page[i]; 3753 ps_page = &rx_ring->ps_page[i];
3754 ps_page_dma = &rx_ring->ps_page_dma[i]; 3754 ps_page_dma = &rx_ring->ps_page_dma[i];
3755#ifdef CONFIG_E1000_NAPI 3755#ifdef CONFIG_E1000_NAPI
3756 if(unlikely(*work_done >= work_to_do)) 3756 if (unlikely(*work_done >= work_to_do))
3757 break; 3757 break;
3758 (*work_done)++; 3758 (*work_done)++;
3759#endif 3759#endif
@@ -3765,21 +3765,21 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3765 3765
3766 skb = buffer_info->skb; 3766 skb = buffer_info->skb;
3767 3767
3768 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { 3768 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3769 E1000_DBG("%s: Packet Split buffers didn't pick up" 3769 E1000_DBG("%s: Packet Split buffers didn't pick up"
3770 " the full packet\n", netdev->name); 3770 " the full packet\n", netdev->name);
3771 dev_kfree_skb_irq(skb); 3771 dev_kfree_skb_irq(skb);
3772 goto next_desc; 3772 goto next_desc;
3773 } 3773 }
3774 3774
3775 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3775 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3776 dev_kfree_skb_irq(skb); 3776 dev_kfree_skb_irq(skb);
3777 goto next_desc; 3777 goto next_desc;
3778 } 3778 }
3779 3779
3780 length = le16_to_cpu(rx_desc->wb.middle.length0); 3780 length = le16_to_cpu(rx_desc->wb.middle.length0);
3781 3781
3782 if(unlikely(!length)) { 3782 if (unlikely(!length)) {
3783 E1000_DBG("%s: Last part of the packet spanning" 3783 E1000_DBG("%s: Last part of the packet spanning"
3784 " multiple descriptors\n", netdev->name); 3784 " multiple descriptors\n", netdev->name);
3785 dev_kfree_skb_irq(skb); 3785 dev_kfree_skb_irq(skb);
@@ -3789,8 +3789,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3789 /* Good Receive */ 3789 /* Good Receive */
3790 skb_put(skb, length); 3790 skb_put(skb, length);
3791 3791
3792 for(j = 0; j < adapter->rx_ps_pages; j++) { 3792 for (j = 0; j < adapter->rx_ps_pages; j++) {
3793 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3793 if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3794 break; 3794 break;
3795 3795
3796 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 3796 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3810,11 +3810,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3810 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3810 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3811 skb->protocol = eth_type_trans(skb, netdev); 3811 skb->protocol = eth_type_trans(skb, netdev);
3812 3812
3813 if(likely(rx_desc->wb.upper.header_status & 3813 if (likely(rx_desc->wb.upper.header_status &
3814 E1000_RXDPS_HDRSTAT_HDRSP)) 3814 E1000_RXDPS_HDRSTAT_HDRSP))
3815 adapter->rx_hdr_split++; 3815 adapter->rx_hdr_split++;
3816#ifdef CONFIG_E1000_NAPI 3816#ifdef CONFIG_E1000_NAPI
3817 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3817 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3818 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3818 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3819 le16_to_cpu(rx_desc->wb.middle.vlan) & 3819 le16_to_cpu(rx_desc->wb.middle.vlan) &
3820 E1000_RXD_SPC_VLAN_MASK); 3820 E1000_RXD_SPC_VLAN_MASK);
@@ -3822,7 +3822,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3822 netif_receive_skb(skb); 3822 netif_receive_skb(skb);
3823 } 3823 }
3824#else /* CONFIG_E1000_NAPI */ 3824#else /* CONFIG_E1000_NAPI */
3825 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3825 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3826 vlan_hwaccel_rx(skb, adapter->vlgrp, 3826 vlan_hwaccel_rx(skb, adapter->vlgrp,
3827 le16_to_cpu(rx_desc->wb.middle.vlan) & 3827 le16_to_cpu(rx_desc->wb.middle.vlan) &
3828 E1000_RXD_SPC_VLAN_MASK); 3828 E1000_RXD_SPC_VLAN_MASK);
@@ -3887,7 +3887,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3887 } 3887 }
3888 3888
3889 3889
3890 if(unlikely(!skb)) { 3890 if (unlikely(!skb)) {
3891 /* Better luck next round */ 3891 /* Better luck next round */
3892 adapter->alloc_rx_buff_failed++; 3892 adapter->alloc_rx_buff_failed++;
3893 break; 3893 break;
@@ -3952,7 +3952,8 @@ map_skb:
3952 rx_desc = E1000_RX_DESC(*rx_ring, i); 3952 rx_desc = E1000_RX_DESC(*rx_ring, i);
3953 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3953 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3954 3954
3955 if(unlikely(++i == rx_ring->count)) i = 0; 3955 if (unlikely(++i == rx_ring->count))
3956 i = 0;
3956 buffer_info = &rx_ring->buffer_info[i]; 3957 buffer_info = &rx_ring->buffer_info[i];
3957 } 3958 }
3958 3959
@@ -3997,7 +3998,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3997 while (cleaned_count--) { 3998 while (cleaned_count--) {
3998 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3999 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3999 4000
4000 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 4001 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4001 if (j < adapter->rx_ps_pages) { 4002 if (j < adapter->rx_ps_pages) {
4002 if (likely(!ps_page->ps_page[j])) { 4003 if (likely(!ps_page->ps_page[j])) {
4003 ps_page->ps_page[j] = 4004 ps_page->ps_page[j] =
@@ -4013,7 +4014,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4013 PCI_DMA_FROMDEVICE); 4014 PCI_DMA_FROMDEVICE);
4014 } 4015 }
4015 /* Refresh the desc even if buffer_addrs didn't 4016 /* Refresh the desc even if buffer_addrs didn't
4016 * change because each write-back erases 4017 * change because each write-back erases
4017 * this info. 4018 * this info.
4018 */ 4019 */
4019 rx_desc->read.buffer_addr[j+1] = 4020 rx_desc->read.buffer_addr[j+1] =
@@ -4045,7 +4046,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4045 4046
4046 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 4047 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4047 4048
4048 if(unlikely(++i == rx_ring->count)) i = 0; 4049 if (unlikely(++i == rx_ring->count)) i = 0;
4049 buffer_info = &rx_ring->buffer_info[i]; 4050 buffer_info = &rx_ring->buffer_info[i];
4050 ps_page = &rx_ring->ps_page[i]; 4051 ps_page = &rx_ring->ps_page[i];
4051 ps_page_dma = &rx_ring->ps_page_dma[i]; 4052 ps_page_dma = &rx_ring->ps_page_dma[i];
@@ -4080,24 +4081,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4080 uint16_t phy_status; 4081 uint16_t phy_status;
4081 uint16_t phy_ctrl; 4082 uint16_t phy_ctrl;
4082 4083
4083 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4084 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4084 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4085 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4085 return; 4086 return;
4086 4087
4087 if(adapter->smartspeed == 0) { 4088 if (adapter->smartspeed == 0) {
4088 /* If Master/Slave config fault is asserted twice, 4089 /* If Master/Slave config fault is asserted twice,
4089 * we assume back-to-back */ 4090 * we assume back-to-back */
4090 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4091 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4091 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4092 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4092 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); 4093 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4093 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4094 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4094 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4095 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4095 if(phy_ctrl & CR_1000T_MS_ENABLE) { 4096 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4096 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4097 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4097 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, 4098 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4098 phy_ctrl); 4099 phy_ctrl);
4099 adapter->smartspeed++; 4100 adapter->smartspeed++;
4100 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4101 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4101 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, 4102 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4102 &phy_ctrl)) { 4103 &phy_ctrl)) {
4103 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4104 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -4107,12 +4108,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4107 } 4108 }
4108 } 4109 }
4109 return; 4110 return;
4110 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4111 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4111 /* If still no link, perhaps using 2/3 pair cable */ 4112 /* If still no link, perhaps using 2/3 pair cable */
4112 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); 4113 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4113 phy_ctrl |= CR_1000T_MS_ENABLE; 4114 phy_ctrl |= CR_1000T_MS_ENABLE;
4114 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); 4115 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4115 if(!e1000_phy_setup_autoneg(&adapter->hw) && 4116 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4116 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { 4117 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4117 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4118 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4118 MII_CR_RESTART_AUTO_NEG); 4119 MII_CR_RESTART_AUTO_NEG);
@@ -4120,7 +4121,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
4120 } 4121 }
4121 } 4122 }
4122 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4123 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4123 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4124 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4124 adapter->smartspeed = 0; 4125 adapter->smartspeed = 0;
4125} 4126}
4126 4127
@@ -4161,7 +4162,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4161 uint16_t spddplx; 4162 uint16_t spddplx;
4162 unsigned long flags; 4163 unsigned long flags;
4163 4164
4164 if(adapter->hw.media_type != e1000_media_type_copper) 4165 if (adapter->hw.media_type != e1000_media_type_copper)
4165 return -EOPNOTSUPP; 4166 return -EOPNOTSUPP;
4166 4167
4167 switch (cmd) { 4168 switch (cmd) {
@@ -4169,10 +4170,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4169 data->phy_id = adapter->hw.phy_addr; 4170 data->phy_id = adapter->hw.phy_addr;
4170 break; 4171 break;
4171 case SIOCGMIIREG: 4172 case SIOCGMIIREG:
4172 if(!capable(CAP_NET_ADMIN)) 4173 if (!capable(CAP_NET_ADMIN))
4173 return -EPERM; 4174 return -EPERM;
4174 spin_lock_irqsave(&adapter->stats_lock, flags); 4175 spin_lock_irqsave(&adapter->stats_lock, flags);
4175 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, 4176 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4176 &data->val_out)) { 4177 &data->val_out)) {
4177 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4178 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4178 return -EIO; 4179 return -EIO;
@@ -4180,23 +4181,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4180 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4181 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4181 break; 4182 break;
4182 case SIOCSMIIREG: 4183 case SIOCSMIIREG:
4183 if(!capable(CAP_NET_ADMIN)) 4184 if (!capable(CAP_NET_ADMIN))
4184 return -EPERM; 4185 return -EPERM;
4185 if(data->reg_num & ~(0x1F)) 4186 if (data->reg_num & ~(0x1F))
4186 return -EFAULT; 4187 return -EFAULT;
4187 mii_reg = data->val_in; 4188 mii_reg = data->val_in;
4188 spin_lock_irqsave(&adapter->stats_lock, flags); 4189 spin_lock_irqsave(&adapter->stats_lock, flags);
4189 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, 4190 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4190 mii_reg)) { 4191 mii_reg)) {
4191 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4192 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4192 return -EIO; 4193 return -EIO;
4193 } 4194 }
4194 if(adapter->hw.phy_type == e1000_phy_m88) { 4195 if (adapter->hw.phy_type == e1000_phy_m88) {
4195 switch (data->reg_num) { 4196 switch (data->reg_num) {
4196 case PHY_CTRL: 4197 case PHY_CTRL:
4197 if(mii_reg & MII_CR_POWER_DOWN) 4198 if (mii_reg & MII_CR_POWER_DOWN)
4198 break; 4199 break;
4199 if(mii_reg & MII_CR_AUTO_NEG_EN) { 4200 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4200 adapter->hw.autoneg = 1; 4201 adapter->hw.autoneg = 1;
4201 adapter->hw.autoneg_advertised = 0x2F; 4202 adapter->hw.autoneg_advertised = 0x2F;
4202 } else { 4203 } else {
@@ -4211,14 +4212,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4211 HALF_DUPLEX; 4212 HALF_DUPLEX;
4212 retval = e1000_set_spd_dplx(adapter, 4213 retval = e1000_set_spd_dplx(adapter,
4213 spddplx); 4214 spddplx);
4214 if(retval) { 4215 if (retval) {
4215 spin_unlock_irqrestore( 4216 spin_unlock_irqrestore(
4216 &adapter->stats_lock, 4217 &adapter->stats_lock,
4217 flags); 4218 flags);
4218 return retval; 4219 return retval;
4219 } 4220 }
4220 } 4221 }
4221 if(netif_running(adapter->netdev)) { 4222 if (netif_running(adapter->netdev)) {
4222 e1000_down(adapter); 4223 e1000_down(adapter);
4223 e1000_up(adapter); 4224 e1000_up(adapter);
4224 } else 4225 } else
@@ -4226,7 +4227,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4226 break; 4227 break;
4227 case M88E1000_PHY_SPEC_CTRL: 4228 case M88E1000_PHY_SPEC_CTRL:
4228 case M88E1000_EXT_PHY_SPEC_CTRL: 4229 case M88E1000_EXT_PHY_SPEC_CTRL:
4229 if(e1000_phy_reset(&adapter->hw)) { 4230 if (e1000_phy_reset(&adapter->hw)) {
4230 spin_unlock_irqrestore( 4231 spin_unlock_irqrestore(
4231 &adapter->stats_lock, flags); 4232 &adapter->stats_lock, flags);
4232 return -EIO; 4233 return -EIO;
@@ -4236,9 +4237,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4236 } else { 4237 } else {
4237 switch (data->reg_num) { 4238 switch (data->reg_num) {
4238 case PHY_CTRL: 4239 case PHY_CTRL:
4239 if(mii_reg & MII_CR_POWER_DOWN) 4240 if (mii_reg & MII_CR_POWER_DOWN)
4240 break; 4241 break;
4241 if(netif_running(adapter->netdev)) { 4242 if (netif_running(adapter->netdev)) {
4242 e1000_down(adapter); 4243 e1000_down(adapter);
4243 e1000_up(adapter); 4244 e1000_up(adapter);
4244 } else 4245 } else
@@ -4260,7 +4261,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
4260 struct e1000_adapter *adapter = hw->back; 4261 struct e1000_adapter *adapter = hw->back;
4261 int ret_val = pci_set_mwi(adapter->pdev); 4262 int ret_val = pci_set_mwi(adapter->pdev);
4262 4263
4263 if(ret_val) 4264 if (ret_val)
4264 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4265 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4265} 4266}
4266 4267
@@ -4309,7 +4310,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4309 e1000_irq_disable(adapter); 4310 e1000_irq_disable(adapter);
4310 adapter->vlgrp = grp; 4311 adapter->vlgrp = grp;
4311 4312
4312 if(grp) { 4313 if (grp) {
4313 /* enable VLAN tag insert/strip */ 4314 /* enable VLAN tag insert/strip */
4314 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4315 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4315 ctrl |= E1000_CTRL_VME; 4316 ctrl |= E1000_CTRL_VME;
@@ -4331,7 +4332,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4331 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4332 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4332 rctl &= ~E1000_RCTL_VFE; 4333 rctl &= ~E1000_RCTL_VFE;
4333 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4334 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4334 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4335 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
4335 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4336 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4336 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4337 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4337 } 4338 }
@@ -4345,9 +4346,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4345{ 4346{
4346 struct e1000_adapter *adapter = netdev_priv(netdev); 4347 struct e1000_adapter *adapter = netdev_priv(netdev);
4347 uint32_t vfta, index; 4348 uint32_t vfta, index;
4348 if((adapter->hw.mng_cookie.status & 4349
4349 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4350 if ((adapter->hw.mng_cookie.status &
4350 (vid == adapter->mng_vlan_id)) 4351 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4352 (vid == adapter->mng_vlan_id))
4351 return; 4353 return;
4352 /* add VID to filter table */ 4354 /* add VID to filter table */
4353 index = (vid >> 5) & 0x7F; 4355 index = (vid >> 5) & 0x7F;
@@ -4364,13 +4366,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4364 4366
4365 e1000_irq_disable(adapter); 4367 e1000_irq_disable(adapter);
4366 4368
4367 if(adapter->vlgrp) 4369 if (adapter->vlgrp)
4368 adapter->vlgrp->vlan_devices[vid] = NULL; 4370 adapter->vlgrp->vlan_devices[vid] = NULL;
4369 4371
4370 e1000_irq_enable(adapter); 4372 e1000_irq_enable(adapter);
4371 4373
4372 if((adapter->hw.mng_cookie.status & 4374 if ((adapter->hw.mng_cookie.status &
4373 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4375 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4374 (vid == adapter->mng_vlan_id)) { 4376 (vid == adapter->mng_vlan_id)) {
4375 /* release control to f/w */ 4377 /* release control to f/w */
4376 e1000_release_hw_control(adapter); 4378 e1000_release_hw_control(adapter);
@@ -4389,10 +4391,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
4389{ 4391{
4390 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 4392 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4391 4393
4392 if(adapter->vlgrp) { 4394 if (adapter->vlgrp) {
4393 uint16_t vid; 4395 uint16_t vid;
4394 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4396 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4395 if(!adapter->vlgrp->vlan_devices[vid]) 4397 if (!adapter->vlgrp->vlan_devices[vid])
4396 continue; 4398 continue;
4397 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4399 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4398 } 4400 }
@@ -4405,13 +4407,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4405 adapter->hw.autoneg = 0; 4407 adapter->hw.autoneg = 0;
4406 4408
4407 /* Fiber NICs only allow 1000 gbps Full duplex */ 4409 /* Fiber NICs only allow 1000 gbps Full duplex */
4408 if((adapter->hw.media_type == e1000_media_type_fiber) && 4410 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4409 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4411 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4410 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4412 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4411 return -EINVAL; 4413 return -EINVAL;
4412 } 4414 }
4413 4415
4414 switch(spddplx) { 4416 switch (spddplx) {
4415 case SPEED_10 + DUPLEX_HALF: 4417 case SPEED_10 + DUPLEX_HALF:
4416 adapter->hw.forced_speed_duplex = e1000_10_half; 4418 adapter->hw.forced_speed_duplex = e1000_10_half;
4417 break; 4419 break;
@@ -4496,7 +4498,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4496 4498
4497 netif_device_detach(netdev); 4499 netif_device_detach(netdev);
4498 4500
4499 if(netif_running(netdev)) 4501 if (netif_running(netdev))
4500 e1000_down(adapter); 4502 e1000_down(adapter);
4501 4503
4502#ifdef CONFIG_PM 4504#ifdef CONFIG_PM
@@ -4508,21 +4510,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4508#endif 4510#endif
4509 4511
4510 status = E1000_READ_REG(&adapter->hw, STATUS); 4512 status = E1000_READ_REG(&adapter->hw, STATUS);
4511 if(status & E1000_STATUS_LU) 4513 if (status & E1000_STATUS_LU)
4512 wufc &= ~E1000_WUFC_LNKC; 4514 wufc &= ~E1000_WUFC_LNKC;
4513 4515
4514 if(wufc) { 4516 if (wufc) {
4515 e1000_setup_rctl(adapter); 4517 e1000_setup_rctl(adapter);
4516 e1000_set_multi(netdev); 4518 e1000_set_multi(netdev);
4517 4519
4518 /* turn on all-multi mode if wake on multicast is enabled */ 4520 /* turn on all-multi mode if wake on multicast is enabled */
4519 if(adapter->wol & E1000_WUFC_MC) { 4521 if (adapter->wol & E1000_WUFC_MC) {
4520 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4522 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4521 rctl |= E1000_RCTL_MPE; 4523 rctl |= E1000_RCTL_MPE;
4522 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4524 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4523 } 4525 }
4524 4526
4525 if(adapter->hw.mac_type >= e1000_82540) { 4527 if (adapter->hw.mac_type >= e1000_82540) {
4526 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4528 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4527 /* advertise wake from D3Cold */ 4529 /* advertise wake from D3Cold */
4528 #define E1000_CTRL_ADVD3WUC 0x00100000 4530 #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4533,7 +4535,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4533 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4535 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4534 } 4536 }
4535 4537
4536 if(adapter->hw.media_type == e1000_media_type_fiber || 4538 if (adapter->hw.media_type == e1000_media_type_fiber ||
4537 adapter->hw.media_type == e1000_media_type_internal_serdes) { 4539 adapter->hw.media_type == e1000_media_type_internal_serdes) {
4538 /* keep the laser running in D3 */ 4540 /* keep the laser running in D3 */
4539 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4541 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4563,10 +4565,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4563 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); 4565 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4564 } 4566 }
4565 4567
4566 if(adapter->hw.mac_type >= e1000_82540 && 4568 if (adapter->hw.mac_type >= e1000_82540 &&
4567 adapter->hw.media_type == e1000_media_type_copper) { 4569 adapter->hw.media_type == e1000_media_type_copper) {
4568 manc = E1000_READ_REG(&adapter->hw, MANC); 4570 manc = E1000_READ_REG(&adapter->hw, MANC);
4569 if(manc & E1000_MANC_SMBUS_EN) { 4571 if (manc & E1000_MANC_SMBUS_EN) {
4570 manc |= E1000_MANC_ARP_EN; 4572 manc |= E1000_MANC_ARP_EN;
4571 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4573 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4572 retval = pci_enable_wake(pdev, PCI_D3hot, 1); 4574 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -4617,12 +4619,12 @@ e1000_resume(struct pci_dev *pdev)
4617 e1000_reset(adapter); 4619 e1000_reset(adapter);
4618 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4620 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
4619 4621
4620 if(netif_running(netdev)) 4622 if (netif_running(netdev))
4621 e1000_up(adapter); 4623 e1000_up(adapter);
4622 4624
4623 netif_device_attach(netdev); 4625 netif_device_attach(netdev);
4624 4626
4625 if(adapter->hw.mac_type >= e1000_82540 && 4627 if (adapter->hw.mac_type >= e1000_82540 &&
4626 adapter->hw.media_type == e1000_media_type_copper) { 4628 adapter->hw.media_type == e1000_media_type_copper) {
4627 manc = E1000_READ_REG(&adapter->hw, MANC); 4629 manc = E1000_READ_REG(&adapter->hw, MANC);
4628 manc &= ~(E1000_MANC_ARP_EN); 4630 manc &= ~(E1000_MANC_ARP_EN);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index aac64de61437..9790db974dc1 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -47,7 +47,7 @@
47 BUG(); \ 47 BUG(); \
48 } else { \ 48 } else { \
49 msleep(x); \ 49 msleep(x); \
50 } } while(0) 50 } } while (0)
51 51
52/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
53 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 0a7918c62557..3768d83cd577 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -227,7 +227,7 @@ static int __devinit
227e1000_validate_option(int *value, struct e1000_option *opt, 227e1000_validate_option(int *value, struct e1000_option *opt,
228 struct e1000_adapter *adapter) 228 struct e1000_adapter *adapter)
229{ 229{
230 if(*value == OPTION_UNSET) { 230 if (*value == OPTION_UNSET) {
231 *value = opt->def; 231 *value = opt->def;
232 return 0; 232 return 0;
233 } 233 }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
244 } 244 }
245 break; 245 break;
246 case range_option: 246 case range_option:
247 if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 DPRINTK(PROBE, INFO, 248 DPRINTK(PROBE, INFO,
249 "%s set to %i\n", opt->name, *value); 249 "%s set to %i\n", opt->name, *value);
250 return 0; 250 return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
254 int i; 254 int i;
255 struct e1000_opt_list *ent; 255 struct e1000_opt_list *ent;
256 256
257 for(i = 0; i < opt->arg.l.nr; i++) { 257 for (i = 0; i < opt->arg.l.nr; i++) {
258 ent = &opt->arg.l.p[i]; 258 ent = &opt->arg.l.p[i];
259 if(*value == ent->i) { 259 if (*value == ent->i) {
260 if(ent->str[0] != '\0') 260 if (ent->str[0] != '\0')
261 DPRINTK(PROBE, INFO, "%s\n", ent->str); 261 DPRINTK(PROBE, INFO, "%s\n", ent->str);
262 return 0; 262 return 0;
263 } 263 }
@@ -291,7 +291,7 @@ void __devinit
291e1000_check_options(struct e1000_adapter *adapter) 291e1000_check_options(struct e1000_adapter *adapter)
292{ 292{
293 int bd = adapter->bd_number; 293 int bd = adapter->bd_number;
294 if(bd >= E1000_MAX_NIC) { 294 if (bd >= E1000_MAX_NIC) {
295 DPRINTK(PROBE, NOTICE, 295 DPRINTK(PROBE, NOTICE,
296 "Warning: no configuration for board #%i\n", bd); 296 "Warning: no configuration for board #%i\n", bd);
297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 297 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,7 +315,7 @@ e1000_check_options(struct e1000_adapter *adapter)
315 if (num_TxDescriptors > bd) { 315 if (num_TxDescriptors > bd) {
316 tx_ring->count = TxDescriptors[bd]; 316 tx_ring->count = TxDescriptors[bd];
317 e1000_validate_option(&tx_ring->count, &opt, adapter); 317 e1000_validate_option(&tx_ring->count, &opt, adapter);
318 E1000_ROUNDUP(tx_ring->count, 318 E1000_ROUNDUP(tx_ring->count,
319 REQ_TX_DESCRIPTOR_MULTIPLE); 319 REQ_TX_DESCRIPTOR_MULTIPLE);
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
@@ -341,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter)
341 if (num_RxDescriptors > bd) { 341 if (num_RxDescriptors > bd) {
342 rx_ring->count = RxDescriptors[bd]; 342 rx_ring->count = RxDescriptors[bd];
343 e1000_validate_option(&rx_ring->count, &opt, adapter); 343 e1000_validate_option(&rx_ring->count, &opt, adapter);
344 E1000_ROUNDUP(rx_ring->count, 344 E1000_ROUNDUP(rx_ring->count,
345 REQ_RX_DESCRIPTOR_MULTIPLE); 345 REQ_RX_DESCRIPTOR_MULTIPLE);
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
403 403
404 if (num_TxIntDelay > bd) { 404 if (num_TxIntDelay > bd) {
405 adapter->tx_int_delay = TxIntDelay[bd]; 405 adapter->tx_int_delay = TxIntDelay[bd];
406 e1000_validate_option(&adapter->tx_int_delay, &opt, 406 e1000_validate_option(&adapter->tx_int_delay, &opt,
407 adapter); 407 adapter);
408 } else { 408 } else {
409 adapter->tx_int_delay = opt.def; 409 adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
421 421
422 if (num_TxAbsIntDelay > bd) { 422 if (num_TxAbsIntDelay > bd) {
423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 423 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 424 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
425 adapter); 425 adapter);
426 } else { 426 } else {
427 adapter->tx_abs_int_delay = opt.def; 427 adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
439 439
440 if (num_RxIntDelay > bd) { 440 if (num_RxIntDelay > bd) {
441 adapter->rx_int_delay = RxIntDelay[bd]; 441 adapter->rx_int_delay = RxIntDelay[bd];
442 e1000_validate_option(&adapter->rx_int_delay, &opt, 442 e1000_validate_option(&adapter->rx_int_delay, &opt,
443 adapter); 443 adapter);
444 } else { 444 } else {
445 adapter->rx_int_delay = opt.def; 445 adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
457 457
458 if (num_RxAbsIntDelay > bd) { 458 if (num_RxAbsIntDelay > bd) {
459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 459 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 460 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
461 adapter); 461 adapter);
462 } else { 462 } else {
463 adapter->rx_abs_int_delay = opt.def; 463 adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
475 475
476 if (num_InterruptThrottleRate > bd) { 476 if (num_InterruptThrottleRate > bd) {
477 adapter->itr = InterruptThrottleRate[bd]; 477 adapter->itr = InterruptThrottleRate[bd];
478 switch(adapter->itr) { 478 switch (adapter->itr) {
479 case 0: 479 case 0:
480 DPRINTK(PROBE, INFO, "%s turned off\n", 480 DPRINTK(PROBE, INFO, "%s turned off\n",
481 opt.name); 481 opt.name);
482 break; 482 break;
483 case 1: 483 case 1:
484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 484 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
485 opt.name); 485 opt.name);
486 break; 486 break;
487 default: 487 default:
488 e1000_validate_option(&adapter->itr, &opt, 488 e1000_validate_option(&adapter->itr, &opt,
489 adapter); 489 adapter);
490 break; 490 break;
491 } 491 }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
494 } 494 }
495 } 495 }
496 496
497 switch(adapter->hw.media_type) { 497 switch (adapter->hw.media_type) {
498 case e1000_media_type_fiber: 498 case e1000_media_type_fiber:
499 case e1000_media_type_internal_serdes: 499 case e1000_media_type_internal_serdes:
500 e1000_check_fiber_options(adapter); 500 e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
518e1000_check_fiber_options(struct e1000_adapter *adapter) 518e1000_check_fiber_options(struct e1000_adapter *adapter)
519{ 519{
520 int bd = adapter->bd_number; 520 int bd = adapter->bd_number;
521 if(num_Speed > bd) { 521 if (num_Speed > bd) {
522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 522 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
523 "parameter ignored\n"); 523 "parameter ignored\n");
524 } 524 }
525 525
526 if(num_Duplex > bd) { 526 if (num_Duplex > bd) {
527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 527 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
528 "parameter ignored\n"); 528 "parameter ignored\n");
529 } 529 }
530 530
531 if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 531 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 532 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
533 "not valid for fiber adapters, " 533 "not valid for fiber adapters, "
534 "parameter ignored\n"); 534 "parameter ignored\n");
@@ -598,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
598 } 598 }
599 } 599 }
600 600
601 if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 601 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
602 DPRINTK(PROBE, INFO, 602 DPRINTK(PROBE, INFO,
603 "AutoNeg specified along with Speed or Duplex, " 603 "AutoNeg specified along with Speed or Duplex, "
604 "parameter ignored\n"); 604 "parameter ignored\n");
@@ -659,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
659 switch (speed + dplx) { 659 switch (speed + dplx) {
660 case 0: 660 case 0:
661 adapter->hw.autoneg = adapter->fc_autoneg = 1; 661 adapter->hw.autoneg = adapter->fc_autoneg = 1;
662 if((num_Speed > bd) && (speed != 0 || dplx != 0)) 662 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
663 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
664 "Speed and duplex autonegotiation enabled\n"); 664 "Speed and duplex autonegotiation enabled\n");
665 break; 665 break;