aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c456
1 files changed, 305 insertions, 151 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index f77624f5f17b..98ef9f85482f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.0.38-k4"DRIVERNAPI 39#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -73,6 +73,11 @@ static struct pci_device_id e1000_pci_tbl[] = {
73 INTEL_E1000_ETHERNET_DEVICE(0x1026), 73 INTEL_E1000_ETHERNET_DEVICE(0x1026),
74 INTEL_E1000_ETHERNET_DEVICE(0x1027), 74 INTEL_E1000_ETHERNET_DEVICE(0x1027),
75 INTEL_E1000_ETHERNET_DEVICE(0x1028), 75 INTEL_E1000_ETHERNET_DEVICE(0x1028),
76 INTEL_E1000_ETHERNET_DEVICE(0x1049),
77 INTEL_E1000_ETHERNET_DEVICE(0x104A),
78 INTEL_E1000_ETHERNET_DEVICE(0x104B),
79 INTEL_E1000_ETHERNET_DEVICE(0x104C),
80 INTEL_E1000_ETHERNET_DEVICE(0x104D),
76 INTEL_E1000_ETHERNET_DEVICE(0x105E), 81 INTEL_E1000_ETHERNET_DEVICE(0x105E),
77 INTEL_E1000_ETHERNET_DEVICE(0x105F), 82 INTEL_E1000_ETHERNET_DEVICE(0x105F),
78 INTEL_E1000_ETHERNET_DEVICE(0x1060), 83 INTEL_E1000_ETHERNET_DEVICE(0x1060),
@@ -96,6 +101,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
96 INTEL_E1000_ETHERNET_DEVICE(0x109A), 101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
97 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 102 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
98 INTEL_E1000_ETHERNET_DEVICE(0x10B9), 103 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
104 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
105 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
99 /* required last entry */ 106 /* required last entry */
100 {0,} 107 {0,}
101}; 108};
@@ -133,7 +140,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
133static void e1000_set_multi(struct net_device *netdev); 140static void e1000_set_multi(struct net_device *netdev);
134static void e1000_update_phy_info(unsigned long data); 141static void e1000_update_phy_info(unsigned long data);
135static void e1000_watchdog(unsigned long data); 142static void e1000_watchdog(unsigned long data);
136static void e1000_watchdog_task(struct e1000_adapter *adapter);
137static void e1000_82547_tx_fifo_stall(unsigned long data); 143static void e1000_82547_tx_fifo_stall(unsigned long data);
138static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 144static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
139static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 145static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -178,8 +184,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
178static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 184static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
179static void e1000_restore_vlan(struct e1000_adapter *adapter); 185static void e1000_restore_vlan(struct e1000_adapter *adapter);
180 186
181#ifdef CONFIG_PM
182static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 187static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
188#ifdef CONFIG_PM
183static int e1000_resume(struct pci_dev *pdev); 189static int e1000_resume(struct pci_dev *pdev);
184#endif 190#endif
185static void e1000_shutdown(struct pci_dev *pdev); 191static void e1000_shutdown(struct pci_dev *pdev);
@@ -206,8 +212,8 @@ static struct pci_driver e1000_driver = {
206 .probe = e1000_probe, 212 .probe = e1000_probe,
207 .remove = __devexit_p(e1000_remove), 213 .remove = __devexit_p(e1000_remove),
208 /* Power Managment Hooks */ 214 /* Power Managment Hooks */
209#ifdef CONFIG_PM
210 .suspend = e1000_suspend, 215 .suspend = e1000_suspend,
216#ifdef CONFIG_PM
211 .resume = e1000_resume, 217 .resume = e1000_resume,
212#endif 218#endif
213 .shutdown = e1000_shutdown, 219 .shutdown = e1000_shutdown,
@@ -261,6 +267,44 @@ e1000_exit_module(void)
261 267
262module_exit(e1000_exit_module); 268module_exit(e1000_exit_module);
263 269
270static int e1000_request_irq(struct e1000_adapter *adapter)
271{
272 struct net_device *netdev = adapter->netdev;
273 int flags, err = 0;
274
275 flags = IRQF_SHARED;
276#ifdef CONFIG_PCI_MSI
277 if (adapter->hw.mac_type > e1000_82547_rev_2) {
278 adapter->have_msi = TRUE;
279 if ((err = pci_enable_msi(adapter->pdev))) {
280 DPRINTK(PROBE, ERR,
281 "Unable to allocate MSI interrupt Error: %d\n", err);
282 adapter->have_msi = FALSE;
283 }
284 }
285 if (adapter->have_msi)
286 flags &= ~IRQF_SHARED;
287#endif
288 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
289 netdev->name, netdev)))
290 DPRINTK(PROBE, ERR,
291 "Unable to allocate interrupt Error: %d\n", err);
292
293 return err;
294}
295
296static void e1000_free_irq(struct e1000_adapter *adapter)
297{
298 struct net_device *netdev = adapter->netdev;
299
300 free_irq(adapter->pdev->irq, netdev);
301
302#ifdef CONFIG_PCI_MSI
303 if (adapter->have_msi)
304 pci_disable_msi(adapter->pdev);
305#endif
306}
307
264/** 308/**
265 * e1000_irq_disable - Mask off interrupt generation on the NIC 309 * e1000_irq_disable - Mask off interrupt generation on the NIC
266 * @adapter: board private structure 310 * @adapter: board private structure
@@ -329,6 +373,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
329{ 373{
330 uint32_t ctrl_ext; 374 uint32_t ctrl_ext;
331 uint32_t swsm; 375 uint32_t swsm;
376 uint32_t extcnf;
332 377
333 /* Let firmware taken over control of h/w */ 378 /* Let firmware taken over control of h/w */
334 switch (adapter->hw.mac_type) { 379 switch (adapter->hw.mac_type) {
@@ -343,6 +388,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
343 swsm = E1000_READ_REG(&adapter->hw, SWSM); 388 swsm = E1000_READ_REG(&adapter->hw, SWSM);
344 E1000_WRITE_REG(&adapter->hw, SWSM, 389 E1000_WRITE_REG(&adapter->hw, SWSM,
345 swsm & ~E1000_SWSM_DRV_LOAD); 390 swsm & ~E1000_SWSM_DRV_LOAD);
391 case e1000_ich8lan:
392 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
393 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
394 extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
395 break;
346 default: 396 default:
347 break; 397 break;
348 } 398 }
@@ -364,6 +414,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
364{ 414{
365 uint32_t ctrl_ext; 415 uint32_t ctrl_ext;
366 uint32_t swsm; 416 uint32_t swsm;
417 uint32_t extcnf;
367 /* Let firmware know the driver has taken over */ 418 /* Let firmware know the driver has taken over */
368 switch (adapter->hw.mac_type) { 419 switch (adapter->hw.mac_type) {
369 case e1000_82571: 420 case e1000_82571:
@@ -378,6 +429,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
378 E1000_WRITE_REG(&adapter->hw, SWSM, 429 E1000_WRITE_REG(&adapter->hw, SWSM,
379 swsm | E1000_SWSM_DRV_LOAD); 430 swsm | E1000_SWSM_DRV_LOAD);
380 break; 431 break;
432 case e1000_ich8lan:
433 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
434 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
435 extcnf | E1000_EXTCNF_CTRL_SWFLAG);
436 break;
381 default: 437 default:
382 break; 438 break;
383 } 439 }
@@ -387,18 +443,10 @@ int
387e1000_up(struct e1000_adapter *adapter) 443e1000_up(struct e1000_adapter *adapter)
388{ 444{
389 struct net_device *netdev = adapter->netdev; 445 struct net_device *netdev = adapter->netdev;
390 int i, err; 446 int i;
391 447
392 /* hardware has been reset, we need to reload some things */ 448 /* hardware has been reset, we need to reload some things */
393 449
394 /* Reset the PHY if it was previously powered down */
395 if (adapter->hw.media_type == e1000_media_type_copper) {
396 uint16_t mii_reg;
397 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
398 if (mii_reg & MII_CR_POWER_DOWN)
399 e1000_phy_hw_reset(&adapter->hw);
400 }
401
402 e1000_set_multi(netdev); 450 e1000_set_multi(netdev);
403 451
404 e1000_restore_vlan(adapter); 452 e1000_restore_vlan(adapter);
@@ -415,24 +463,6 @@ e1000_up(struct e1000_adapter *adapter)
415 E1000_DESC_UNUSED(ring)); 463 E1000_DESC_UNUSED(ring));
416 } 464 }
417 465
418#ifdef CONFIG_PCI_MSI
419 if (adapter->hw.mac_type > e1000_82547_rev_2) {
420 adapter->have_msi = TRUE;
421 if ((err = pci_enable_msi(adapter->pdev))) {
422 DPRINTK(PROBE, ERR,
423 "Unable to allocate MSI interrupt Error: %d\n", err);
424 adapter->have_msi = FALSE;
425 }
426 }
427#endif
428 if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
429 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
430 netdev->name, netdev))) {
431 DPRINTK(PROBE, ERR,
432 "Unable to allocate interrupt Error: %d\n", err);
433 return err;
434 }
435
436 adapter->tx_queue_len = netdev->tx_queue_len; 466 adapter->tx_queue_len = netdev->tx_queue_len;
437 467
438 mod_timer(&adapter->watchdog_timer, jiffies); 468 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -445,21 +475,60 @@ e1000_up(struct e1000_adapter *adapter)
445 return 0; 475 return 0;
446} 476}
447 477
478/**
479 * e1000_power_up_phy - restore link in case the phy was powered down
480 * @adapter: address of board private structure
481 *
482 * The phy may be powered down to save power and turn off link when the
483 * driver is unloaded and wake on lan is not enabled (among others)
484 * *** this routine MUST be followed by a call to e1000_reset ***
485 *
486 **/
487
488static void e1000_power_up_phy(struct e1000_adapter *adapter)
489{
490 uint16_t mii_reg = 0;
491
492 /* Just clear the power down bit to wake the phy back up */
493 if (adapter->hw.media_type == e1000_media_type_copper) {
494 /* according to the manual, the phy will retain its
495 * settings across a power-down/up cycle */
496 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
497 mii_reg &= ~MII_CR_POWER_DOWN;
498 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
499 }
500}
501
502static void e1000_power_down_phy(struct e1000_adapter *adapter)
503{
504 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
505 e1000_check_mng_mode(&adapter->hw);
506 /* Power down the PHY so no link is implied when interface is down
507 * The PHY cannot be powered down if any of the following is TRUE
508 * (a) WoL is enabled
509 * (b) AMT is active
510 * (c) SoL/IDER session is active */
511 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
512 adapter->hw.mac_type != e1000_ich8lan &&
513 adapter->hw.media_type == e1000_media_type_copper &&
514 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
515 !mng_mode_enabled &&
516 !e1000_check_phy_reset_block(&adapter->hw)) {
517 uint16_t mii_reg = 0;
518 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
519 mii_reg |= MII_CR_POWER_DOWN;
520 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
521 mdelay(1);
522 }
523}
524
448void 525void
449e1000_down(struct e1000_adapter *adapter) 526e1000_down(struct e1000_adapter *adapter)
450{ 527{
451 struct net_device *netdev = adapter->netdev; 528 struct net_device *netdev = adapter->netdev;
452 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
453 e1000_check_mng_mode(&adapter->hw);
454 529
455 e1000_irq_disable(adapter); 530 e1000_irq_disable(adapter);
456 531
457 free_irq(adapter->pdev->irq, netdev);
458#ifdef CONFIG_PCI_MSI
459 if (adapter->hw.mac_type > e1000_82547_rev_2 &&
460 adapter->have_msi == TRUE)
461 pci_disable_msi(adapter->pdev);
462#endif
463 del_timer_sync(&adapter->tx_fifo_stall_timer); 532 del_timer_sync(&adapter->tx_fifo_stall_timer);
464 del_timer_sync(&adapter->watchdog_timer); 533 del_timer_sync(&adapter->watchdog_timer);
465 del_timer_sync(&adapter->phy_info_timer); 534 del_timer_sync(&adapter->phy_info_timer);
@@ -476,23 +545,17 @@ e1000_down(struct e1000_adapter *adapter)
476 e1000_reset(adapter); 545 e1000_reset(adapter);
477 e1000_clean_all_tx_rings(adapter); 546 e1000_clean_all_tx_rings(adapter);
478 e1000_clean_all_rx_rings(adapter); 547 e1000_clean_all_rx_rings(adapter);
548}
479 549
480 /* Power down the PHY so no link is implied when interface is down * 550void
481 * The PHY cannot be powered down if any of the following is TRUE * 551e1000_reinit_locked(struct e1000_adapter *adapter)
482 * (a) WoL is enabled 552{
483 * (b) AMT is active 553 WARN_ON(in_interrupt());
484 * (c) SoL/IDER session is active */ 554 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
485 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 555 msleep(1);
486 adapter->hw.media_type == e1000_media_type_copper && 556 e1000_down(adapter);
487 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && 557 e1000_up(adapter);
488 !mng_mode_enabled && 558 clear_bit(__E1000_RESETTING, &adapter->flags);
489 !e1000_check_phy_reset_block(&adapter->hw)) {
490 uint16_t mii_reg;
491 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
492 mii_reg |= MII_CR_POWER_DOWN;
493 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
494 mdelay(1);
495 }
496} 559}
497 560
498void 561void
@@ -518,6 +581,9 @@ e1000_reset(struct e1000_adapter *adapter)
518 case e1000_82573: 581 case e1000_82573:
519 pba = E1000_PBA_12K; 582 pba = E1000_PBA_12K;
520 break; 583 break;
584 case e1000_ich8lan:
585 pba = E1000_PBA_8K;
586 break;
521 default: 587 default:
522 pba = E1000_PBA_48K; 588 pba = E1000_PBA_48K;
523 break; 589 break;
@@ -542,6 +608,12 @@ e1000_reset(struct e1000_adapter *adapter)
542 /* Set the FC high water mark to 90% of the FIFO size. 608 /* Set the FC high water mark to 90% of the FIFO size.
543 * Required to clear last 3 LSB */ 609 * Required to clear last 3 LSB */
544 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; 610 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
611 /* We can't use 90% on small FIFOs because the remainder
612 * would be less than 1 full frame. In this case, we size
613 * it to allow at least a full frame above the high water
614 * mark. */
615 if (pba < E1000_PBA_16K)
616 fc_high_water_mark = (pba * 1024) - 1600;
545 617
546 adapter->hw.fc_high_water = fc_high_water_mark; 618 adapter->hw.fc_high_water = fc_high_water_mark;
547 adapter->hw.fc_low_water = fc_high_water_mark - 8; 619 adapter->hw.fc_low_water = fc_high_water_mark - 8;
@@ -564,6 +636,23 @@ e1000_reset(struct e1000_adapter *adapter)
564 636
565 e1000_reset_adaptive(&adapter->hw); 637 e1000_reset_adaptive(&adapter->hw);
566 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 638 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
639
640 if (!adapter->smart_power_down &&
641 (adapter->hw.mac_type == e1000_82571 ||
642 adapter->hw.mac_type == e1000_82572)) {
643 uint16_t phy_data = 0;
644 /* speed up time to link by disabling smart power down, ignore
645 * the return value of this function because there is nothing
646 * different we would do if it failed */
647 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
648 &phy_data);
649 phy_data &= ~IGP02E1000_PM_SPD;
650 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
651 phy_data);
652 }
653
654 if (adapter->hw.mac_type < e1000_ich8lan)
655 /* FIXME: this code is duplicate and wrong for PCI Express */
567 if (adapter->en_mng_pt) { 656 if (adapter->en_mng_pt) {
568 manc = E1000_READ_REG(&adapter->hw, MANC); 657 manc = E1000_READ_REG(&adapter->hw, MANC);
569 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); 658 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
@@ -590,6 +679,7 @@ e1000_probe(struct pci_dev *pdev,
590 struct net_device *netdev; 679 struct net_device *netdev;
591 struct e1000_adapter *adapter; 680 struct e1000_adapter *adapter;
592 unsigned long mmio_start, mmio_len; 681 unsigned long mmio_start, mmio_len;
682 unsigned long flash_start, flash_len;
593 683
594 static int cards_found = 0; 684 static int cards_found = 0;
595 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ 685 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
@@ -599,10 +689,12 @@ e1000_probe(struct pci_dev *pdev,
599 if ((err = pci_enable_device(pdev))) 689 if ((err = pci_enable_device(pdev)))
600 return err; 690 return err;
601 691
602 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 692 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
693 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
603 pci_using_dac = 1; 694 pci_using_dac = 1;
604 } else { 695 } else {
605 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 696 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
697 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
606 E1000_ERR("No usable DMA configuration, aborting\n"); 698 E1000_ERR("No usable DMA configuration, aborting\n");
607 return err; 699 return err;
608 } 700 }
@@ -682,6 +774,19 @@ e1000_probe(struct pci_dev *pdev,
682 if ((err = e1000_sw_init(adapter))) 774 if ((err = e1000_sw_init(adapter)))
683 goto err_sw_init; 775 goto err_sw_init;
684 776
777 /* Flash BAR mapping must happen after e1000_sw_init
778 * because it depends on mac_type */
779 if ((adapter->hw.mac_type == e1000_ich8lan) &&
780 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
781 flash_start = pci_resource_start(pdev, 1);
782 flash_len = pci_resource_len(pdev, 1);
783 adapter->hw.flash_address = ioremap(flash_start, flash_len);
784 if (!adapter->hw.flash_address) {
785 err = -EIO;
786 goto err_flashmap;
787 }
788 }
789
685 if ((err = e1000_check_phy_reset_block(&adapter->hw))) 790 if ((err = e1000_check_phy_reset_block(&adapter->hw)))
686 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 791 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
687 792
@@ -700,6 +805,8 @@ e1000_probe(struct pci_dev *pdev,
700 NETIF_F_HW_VLAN_TX | 805 NETIF_F_HW_VLAN_TX |
701 NETIF_F_HW_VLAN_RX | 806 NETIF_F_HW_VLAN_RX |
702 NETIF_F_HW_VLAN_FILTER; 807 NETIF_F_HW_VLAN_FILTER;
808 if (adapter->hw.mac_type == e1000_ich8lan)
809 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
703 } 810 }
704 811
705#ifdef NETIF_F_TSO 812#ifdef NETIF_F_TSO
@@ -715,11 +822,17 @@ e1000_probe(struct pci_dev *pdev,
715 if (pci_using_dac) 822 if (pci_using_dac)
716 netdev->features |= NETIF_F_HIGHDMA; 823 netdev->features |= NETIF_F_HIGHDMA;
717 824
718 /* hard_start_xmit is safe against parallel locking */
719 netdev->features |= NETIF_F_LLTX; 825 netdev->features |= NETIF_F_LLTX;
720 826
721 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 827 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
722 828
829 /* initialize eeprom parameters */
830
831 if (e1000_init_eeprom_params(&adapter->hw)) {
832 E1000_ERR("EEPROM initialization failed\n");
833 return -EIO;
834 }
835
723 /* before reading the EEPROM, reset the controller to 836 /* before reading the EEPROM, reset the controller to
724 * put the device in a known good starting state */ 837 * put the device in a known good starting state */
725 838
@@ -758,9 +871,6 @@ e1000_probe(struct pci_dev *pdev,
758 adapter->watchdog_timer.function = &e1000_watchdog; 871 adapter->watchdog_timer.function = &e1000_watchdog;
759 adapter->watchdog_timer.data = (unsigned long) adapter; 872 adapter->watchdog_timer.data = (unsigned long) adapter;
760 873
761 INIT_WORK(&adapter->watchdog_task,
762 (void (*)(void *))e1000_watchdog_task, adapter);
763
764 init_timer(&adapter->phy_info_timer); 874 init_timer(&adapter->phy_info_timer);
765 adapter->phy_info_timer.function = &e1000_update_phy_info; 875 adapter->phy_info_timer.function = &e1000_update_phy_info;
766 adapter->phy_info_timer.data = (unsigned long) adapter; 876 adapter->phy_info_timer.data = (unsigned long) adapter;
@@ -790,6 +900,11 @@ e1000_probe(struct pci_dev *pdev,
790 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 900 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
791 eeprom_apme_mask = E1000_EEPROM_82544_APM; 901 eeprom_apme_mask = E1000_EEPROM_82544_APM;
792 break; 902 break;
903 case e1000_ich8lan:
904 e1000_read_eeprom(&adapter->hw,
905 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
906 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
907 break;
793 case e1000_82546: 908 case e1000_82546:
794 case e1000_82546_rev_3: 909 case e1000_82546_rev_3:
795 case e1000_82571: 910 case e1000_82571:
@@ -849,6 +964,9 @@ e1000_probe(struct pci_dev *pdev,
849 return 0; 964 return 0;
850 965
851err_register: 966err_register:
967 if (adapter->hw.flash_address)
968 iounmap(adapter->hw.flash_address);
969err_flashmap:
852err_sw_init: 970err_sw_init:
853err_eeprom: 971err_eeprom:
854 iounmap(adapter->hw.hw_addr); 972 iounmap(adapter->hw.hw_addr);
@@ -882,6 +1000,7 @@ e1000_remove(struct pci_dev *pdev)
882 flush_scheduled_work(); 1000 flush_scheduled_work();
883 1001
884 if (adapter->hw.mac_type >= e1000_82540 && 1002 if (adapter->hw.mac_type >= e1000_82540 &&
1003 adapter->hw.mac_type != e1000_ich8lan &&
885 adapter->hw.media_type == e1000_media_type_copper) { 1004 adapter->hw.media_type == e1000_media_type_copper) {
886 manc = E1000_READ_REG(&adapter->hw, MANC); 1005 manc = E1000_READ_REG(&adapter->hw, MANC);
887 if (manc & E1000_MANC_SMBUS_EN) { 1006 if (manc & E1000_MANC_SMBUS_EN) {
@@ -910,6 +1029,8 @@ e1000_remove(struct pci_dev *pdev)
910#endif 1029#endif
911 1030
912 iounmap(adapter->hw.hw_addr); 1031 iounmap(adapter->hw.hw_addr);
1032 if (adapter->hw.flash_address)
1033 iounmap(adapter->hw.flash_address);
913 pci_release_regions(pdev); 1034 pci_release_regions(pdev);
914 1035
915 free_netdev(netdev); 1036 free_netdev(netdev);
@@ -947,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
947 1068
948 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
949 1070
950 adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; 1071 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
951 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; 1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
952 hw->max_frame_size = netdev->mtu + 1073 hw->max_frame_size = netdev->mtu +
953 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
@@ -960,13 +1081,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
960 return -EIO; 1081 return -EIO;
961 } 1082 }
962 1083
963 /* initialize eeprom parameters */
964
965 if (e1000_init_eeprom_params(hw)) {
966 E1000_ERR("EEPROM initialization failed\n");
967 return -EIO;
968 }
969
970 switch (hw->mac_type) { 1084 switch (hw->mac_type) {
971 default: 1085 default:
972 break; 1086 break;
@@ -1078,6 +1192,10 @@ e1000_open(struct net_device *netdev)
1078 struct e1000_adapter *adapter = netdev_priv(netdev); 1192 struct e1000_adapter *adapter = netdev_priv(netdev);
1079 int err; 1193 int err;
1080 1194
1195 /* disallow open during test */
1196 if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
1197 return -EBUSY;
1198
1081 /* allocate transmit descriptors */ 1199 /* allocate transmit descriptors */
1082 1200
1083 if ((err = e1000_setup_all_tx_resources(adapter))) 1201 if ((err = e1000_setup_all_tx_resources(adapter)))
@@ -1088,6 +1206,12 @@ e1000_open(struct net_device *netdev)
1088 if ((err = e1000_setup_all_rx_resources(adapter))) 1206 if ((err = e1000_setup_all_rx_resources(adapter)))
1089 goto err_setup_rx; 1207 goto err_setup_rx;
1090 1208
1209 err = e1000_request_irq(adapter);
1210 if (err)
1211 goto err_up;
1212
1213 e1000_power_up_phy(adapter);
1214
1091 if ((err = e1000_up(adapter))) 1215 if ((err = e1000_up(adapter)))
1092 goto err_up; 1216 goto err_up;
1093 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1217 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -1131,7 +1255,10 @@ e1000_close(struct net_device *netdev)
1131{ 1255{
1132 struct e1000_adapter *adapter = netdev_priv(netdev); 1256 struct e1000_adapter *adapter = netdev_priv(netdev);
1133 1257
1258 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1134 e1000_down(adapter); 1259 e1000_down(adapter);
1260 e1000_power_down_phy(adapter);
1261 e1000_free_irq(adapter);
1135 1262
1136 e1000_free_all_tx_resources(adapter); 1263 e1000_free_all_tx_resources(adapter);
1137 e1000_free_all_rx_resources(adapter); 1264 e1000_free_all_rx_resources(adapter);
@@ -1189,8 +1316,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1189 int size; 1316 int size;
1190 1317
1191 size = sizeof(struct e1000_buffer) * txdr->count; 1318 size = sizeof(struct e1000_buffer) * txdr->count;
1192 1319 txdr->buffer_info = vmalloc(size);
1193 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1194 if (!txdr->buffer_info) { 1320 if (!txdr->buffer_info) {
1195 DPRINTK(PROBE, ERR, 1321 DPRINTK(PROBE, ERR,
1196 "Unable to allocate memory for the transmit descriptor ring\n"); 1322 "Unable to allocate memory for the transmit descriptor ring\n");
@@ -1302,13 +1428,13 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1302 tdba = adapter->tx_ring[0].dma; 1428 tdba = adapter->tx_ring[0].dma;
1303 tdlen = adapter->tx_ring[0].count * 1429 tdlen = adapter->tx_ring[0].count *
1304 sizeof(struct e1000_tx_desc); 1430 sizeof(struct e1000_tx_desc);
1305 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1306 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1307 E1000_WRITE_REG(hw, TDLEN, tdlen); 1431 E1000_WRITE_REG(hw, TDLEN, tdlen);
1308 E1000_WRITE_REG(hw, TDH, 0); 1432 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1433 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1309 E1000_WRITE_REG(hw, TDT, 0); 1434 E1000_WRITE_REG(hw, TDT, 0);
1310 adapter->tx_ring[0].tdh = E1000_TDH; 1435 E1000_WRITE_REG(hw, TDH, 0);
1311 adapter->tx_ring[0].tdt = E1000_TDT; 1436 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1437 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1312 break; 1438 break;
1313 } 1439 }
1314 1440
@@ -1418,7 +1544,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1418 int size, desc_len; 1544 int size, desc_len;
1419 1545
1420 size = sizeof(struct e1000_buffer) * rxdr->count; 1546 size = sizeof(struct e1000_buffer) * rxdr->count;
1421 rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); 1547 rxdr->buffer_info = vmalloc(size);
1422 if (!rxdr->buffer_info) { 1548 if (!rxdr->buffer_info) {
1423 DPRINTK(PROBE, ERR, 1549 DPRINTK(PROBE, ERR,
1424 "Unable to allocate memory for the receive descriptor ring\n"); 1550 "Unable to allocate memory for the receive descriptor ring\n");
@@ -1560,9 +1686,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1560 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1686 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1561 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1687 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1562 1688
1563 if (adapter->hw.mac_type > e1000_82543)
1564 rctl |= E1000_RCTL_SECRC;
1565
1566 if (adapter->hw.tbi_compatibility_on == 1) 1689 if (adapter->hw.tbi_compatibility_on == 1)
1567 rctl |= E1000_RCTL_SBP; 1690 rctl |= E1000_RCTL_SBP;
1568 else 1691 else
@@ -1628,7 +1751,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1628 rfctl |= E1000_RFCTL_IPV6_DIS; 1751 rfctl |= E1000_RFCTL_IPV6_DIS;
1629 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1752 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1630 1753
1631 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1754 rctl |= E1000_RCTL_DTYP_PS;
1632 1755
1633 psrctl |= adapter->rx_ps_bsize0 >> 1756 psrctl |= adapter->rx_ps_bsize0 >>
1634 E1000_PSRCTL_BSIZE0_SHIFT; 1757 E1000_PSRCTL_BSIZE0_SHIFT;
@@ -1712,13 +1835,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1712 case 1: 1835 case 1:
1713 default: 1836 default:
1714 rdba = adapter->rx_ring[0].dma; 1837 rdba = adapter->rx_ring[0].dma;
1715 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1716 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1717 E1000_WRITE_REG(hw, RDLEN, rdlen); 1838 E1000_WRITE_REG(hw, RDLEN, rdlen);
1718 E1000_WRITE_REG(hw, RDH, 0); 1839 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1840 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1719 E1000_WRITE_REG(hw, RDT, 0); 1841 E1000_WRITE_REG(hw, RDT, 0);
1720 adapter->rx_ring[0].rdh = E1000_RDH; 1842 E1000_WRITE_REG(hw, RDH, 0);
1721 adapter->rx_ring[0].rdt = E1000_RDT; 1843 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1844 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1722 break; 1845 break;
1723 } 1846 }
1724 1847
@@ -1741,9 +1864,6 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1741 E1000_WRITE_REG(hw, RXCSUM, rxcsum); 1864 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1742 } 1865 }
1743 1866
1744 if (hw->mac_type == e1000_82573)
1745 E1000_WRITE_REG(hw, ERT, 0x0100);
1746
1747 /* Enable Receives */ 1867 /* Enable Receives */
1748 E1000_WRITE_REG(hw, RCTL, rctl); 1868 E1000_WRITE_REG(hw, RCTL, rctl);
1749} 1869}
@@ -2083,6 +2203,12 @@ e1000_set_multi(struct net_device *netdev)
2083 uint32_t rctl; 2203 uint32_t rctl;
2084 uint32_t hash_value; 2204 uint32_t hash_value;
2085 int i, rar_entries = E1000_RAR_ENTRIES; 2205 int i, rar_entries = E1000_RAR_ENTRIES;
2206 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2207 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2208 E1000_NUM_MTA_REGISTERS;
2209
2210 if (adapter->hw.mac_type == e1000_ich8lan)
2211 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2086 2212
2087 /* reserve RAR[14] for LAA over-write work-around */ 2213 /* reserve RAR[14] for LAA over-write work-around */
2088 if (adapter->hw.mac_type == e1000_82571) 2214 if (adapter->hw.mac_type == e1000_82571)
@@ -2121,14 +2247,18 @@ e1000_set_multi(struct net_device *netdev)
2121 mc_ptr = mc_ptr->next; 2247 mc_ptr = mc_ptr->next;
2122 } else { 2248 } else {
2123 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2249 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2250 E1000_WRITE_FLUSH(hw);
2124 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2251 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2252 E1000_WRITE_FLUSH(hw);
2125 } 2253 }
2126 } 2254 }
2127 2255
2128 /* clear the old settings from the multicast hash table */ 2256 /* clear the old settings from the multicast hash table */
2129 2257
2130 for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) 2258 for (i = 0; i < mta_reg_count; i++) {
2131 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 2259 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2260 E1000_WRITE_FLUSH(hw);
2261 }
2132 2262
2133 /* load any remaining addresses into the hash table */ 2263 /* load any remaining addresses into the hash table */
2134 2264
@@ -2201,19 +2331,19 @@ static void
2201e1000_watchdog(unsigned long data) 2331e1000_watchdog(unsigned long data)
2202{ 2332{
2203 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2333 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2204
2205 /* Do the rest outside of interrupt context */
2206 schedule_work(&adapter->watchdog_task);
2207}
2208
2209static void
2210e1000_watchdog_task(struct e1000_adapter *adapter)
2211{
2212 struct net_device *netdev = adapter->netdev; 2334 struct net_device *netdev = adapter->netdev;
2213 struct e1000_tx_ring *txdr = adapter->tx_ring; 2335 struct e1000_tx_ring *txdr = adapter->tx_ring;
2214 uint32_t link, tctl; 2336 uint32_t link, tctl;
2215 2337 int32_t ret_val;
2216 e1000_check_for_link(&adapter->hw); 2338
2339 ret_val = e1000_check_for_link(&adapter->hw);
2340 if ((ret_val == E1000_ERR_PHY) &&
2341 (adapter->hw.phy_type == e1000_phy_igp_3) &&
2342 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2343 /* See e1000_kumeran_lock_loss_workaround() */
2344 DPRINTK(LINK, INFO,
2345 "Gigabit has been disabled, downgrading speed\n");
2346 }
2217 if (adapter->hw.mac_type == e1000_82573) { 2347 if (adapter->hw.mac_type == e1000_82573) {
2218 e1000_enable_tx_pkt_filtering(&adapter->hw); 2348 e1000_enable_tx_pkt_filtering(&adapter->hw);
2219 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2349 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2394,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2394 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2524 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2395 int err; 2525 int err;
2396 2526
2397 if (skb_shinfo(skb)->gso_size) { 2527 if (skb_is_gso(skb)) {
2398 if (skb_header_cloned(skb)) { 2528 if (skb_header_cloned(skb)) {
2399 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2529 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2400 if (err) 2530 if (err)
@@ -2519,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2519 * tso gets written back prematurely before the data is fully 2649 * tso gets written back prematurely before the data is fully
2520 * DMA'd to the controller */ 2650 * DMA'd to the controller */
2521 if (!skb->data_len && tx_ring->last_tx_tso && 2651 if (!skb->data_len && tx_ring->last_tx_tso &&
2522 !skb_shinfo(skb)->gso_size) { 2652 !skb_is_gso(skb)) {
2523 tx_ring->last_tx_tso = 0; 2653 tx_ring->last_tx_tso = 0;
2524 size -= 4; 2654 size -= 4;
2525 } 2655 }
@@ -2779,9 +2909,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2779 case e1000_82571: 2909 case e1000_82571:
2780 case e1000_82572: 2910 case e1000_82572:
2781 case e1000_82573: 2911 case e1000_82573:
2912 case e1000_ich8lan:
2782 pull_size = min((unsigned int)4, skb->data_len); 2913 pull_size = min((unsigned int)4, skb->data_len);
2783 if (!__pskb_pull_tail(skb, pull_size)) { 2914 if (!__pskb_pull_tail(skb, pull_size)) {
2784 printk(KERN_ERR 2915 DPRINTK(DRV, ERR,
2785 "__pskb_pull_tail failed.\n"); 2916 "__pskb_pull_tail failed.\n");
2786 dev_kfree_skb_any(skb); 2917 dev_kfree_skb_any(skb);
2787 return NETDEV_TX_OK; 2918 return NETDEV_TX_OK;
@@ -2806,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2806 2937
2807#ifdef NETIF_F_TSO 2938#ifdef NETIF_F_TSO
2808 /* Controller Erratum workaround */ 2939 /* Controller Erratum workaround */
2809 if (!skb->data_len && tx_ring->last_tx_tso && 2940 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
2810 !skb_shinfo(skb)->gso_size)
2811 count++; 2941 count++;
2812#endif 2942#endif
2813 2943
@@ -2919,8 +3049,7 @@ e1000_reset_task(struct net_device *netdev)
2919{ 3049{
2920 struct e1000_adapter *adapter = netdev_priv(netdev); 3050 struct e1000_adapter *adapter = netdev_priv(netdev);
2921 3051
2922 e1000_down(adapter); 3052 e1000_reinit_locked(adapter);
2923 e1000_up(adapter);
2924} 3053}
2925 3054
2926/** 3055/**
@@ -2964,6 +3093,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2964 /* Adapter-specific max frame size limits. */ 3093 /* Adapter-specific max frame size limits. */
2965 switch (adapter->hw.mac_type) { 3094 switch (adapter->hw.mac_type) {
2966 case e1000_undefined ... e1000_82542_rev2_1: 3095 case e1000_undefined ... e1000_82542_rev2_1:
3096 case e1000_ich8lan:
2967 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3097 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2968 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3098 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2969 return -EINVAL; 3099 return -EINVAL;
@@ -2997,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2997 break; 3127 break;
2998 } 3128 }
2999 3129
3000 /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3130 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3001 * means we reserve 2 more, this pushes us to allocate from the next 3131 * means we reserve 2 more, this pushes us to allocate from the next
3002 * larger slab size 3132 * larger slab size
3003 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3133 * i.e. RXBUFFER_2048 --> size-4096 slab */
@@ -3018,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3018 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3019 3149
3020 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3150 /* adjust allocation if LPE protects us, and we aren't using SBP */
3021#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
3022 if (!adapter->hw.tbi_compatibility_on && 3151 if (!adapter->hw.tbi_compatibility_on &&
3023 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || 3152 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3024 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3153 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
@@ -3026,10 +3155,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3026 3155
3027 netdev->mtu = new_mtu; 3156 netdev->mtu = new_mtu;
3028 3157
3029 if (netif_running(netdev)) { 3158 if (netif_running(netdev))
3030 e1000_down(adapter); 3159 e1000_reinit_locked(adapter);
3031 e1000_up(adapter);
3032 }
3033 3160
3034 adapter->hw.max_frame_size = max_frame; 3161 adapter->hw.max_frame_size = max_frame;
3035 3162
@@ -3074,12 +3201,15 @@ e1000_update_stats(struct e1000_adapter *adapter)
3074 adapter->stats.bprc += E1000_READ_REG(hw, BPRC); 3201 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
3075 adapter->stats.mprc += E1000_READ_REG(hw, MPRC); 3202 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
3076 adapter->stats.roc += E1000_READ_REG(hw, ROC); 3203 adapter->stats.roc += E1000_READ_REG(hw, ROC);
3204
3205 if (adapter->hw.mac_type != e1000_ich8lan) {
3077 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); 3206 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3078 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); 3207 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3079 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); 3208 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3080 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); 3209 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3081 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); 3210 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3082 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); 3211 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
3212 }
3083 3213
3084 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); 3214 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
3085 adapter->stats.mpc += E1000_READ_REG(hw, MPC); 3215 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
@@ -3107,12 +3237,16 @@ e1000_update_stats(struct e1000_adapter *adapter)
3107 adapter->stats.totl += E1000_READ_REG(hw, TOTL); 3237 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
3108 adapter->stats.toth += E1000_READ_REG(hw, TOTH); 3238 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
3109 adapter->stats.tpr += E1000_READ_REG(hw, TPR); 3239 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
3240
3241 if (adapter->hw.mac_type != e1000_ich8lan) {
3110 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); 3242 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3111 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); 3243 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3112 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); 3244 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3113 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); 3245 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3114 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); 3246 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3115 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); 3247 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
3248 }
3249
3116 adapter->stats.mptc += E1000_READ_REG(hw, MPTC); 3250 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
3117 adapter->stats.bptc += E1000_READ_REG(hw, BPTC); 3251 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
3118 3252
@@ -3134,6 +3268,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
3134 if (hw->mac_type > e1000_82547_rev_2) { 3268 if (hw->mac_type > e1000_82547_rev_2) {
3135 adapter->stats.iac += E1000_READ_REG(hw, IAC); 3269 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3136 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3270 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3271
3272 if (adapter->hw.mac_type != e1000_ich8lan) {
3137 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3273 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3138 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); 3274 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3139 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); 3275 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
@@ -3141,6 +3277,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3141 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); 3277 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3142 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); 3278 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3143 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); 3279 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
3280 }
3144 } 3281 }
3145 3282
3146 /* Fill out the OS statistics structure */ 3283 /* Fill out the OS statistics structure */
@@ -3249,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3249 E1000_WRITE_REG(hw, IMC, ~0); 3386 E1000_WRITE_REG(hw, IMC, ~0);
3250 E1000_WRITE_FLUSH(hw); 3387 E1000_WRITE_FLUSH(hw);
3251 } 3388 }
3252 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3389 if (likely(netif_rx_schedule_prep(netdev)))
3253 __netif_rx_schedule(&adapter->polling_netdev[0]); 3390 __netif_rx_schedule(netdev);
3254 else 3391 else
3255 e1000_irq_enable(adapter); 3392 e1000_irq_enable(adapter);
3256#else 3393#else
@@ -3293,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3293{ 3430{
3294 struct e1000_adapter *adapter; 3431 struct e1000_adapter *adapter;
3295 int work_to_do = min(*budget, poll_dev->quota); 3432 int work_to_do = min(*budget, poll_dev->quota);
3296 int tx_cleaned = 0, i = 0, work_done = 0; 3433 int tx_cleaned = 0, work_done = 0;
3297 3434
3298 /* Must NOT use netdev_priv macro here. */ 3435 /* Must NOT use netdev_priv macro here. */
3299 adapter = poll_dev->priv; 3436 adapter = poll_dev->priv;
3300 3437
3301 /* Keep link state information with original netdev */ 3438 /* Keep link state information with original netdev */
3302 if (!netif_carrier_ok(adapter->netdev)) 3439 if (!netif_carrier_ok(poll_dev))
3303 goto quit_polling; 3440 goto quit_polling;
3304 3441
3305 while (poll_dev != &adapter->polling_netdev[i]) { 3442 /* e1000_clean is called per-cpu. This lock protects
3306 i++; 3443 * tx_ring[0] from being cleaned by multiple cpus
3307 BUG_ON(i == adapter->num_rx_queues); 3444 * simultaneously. A failure obtaining the lock means
3445 * tx_ring[0] is currently being cleaned anyway. */
3446 if (spin_trylock(&adapter->tx_queue_lock)) {
3447 tx_cleaned = e1000_clean_tx_irq(adapter,
3448 &adapter->tx_ring[0]);
3449 spin_unlock(&adapter->tx_queue_lock);
3308 } 3450 }
3309 3451
3310 if (likely(adapter->num_tx_queues == 1)) { 3452 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3311 /* e1000_clean is called per-cpu. This lock protects
3312 * tx_ring[0] from being cleaned by multiple cpus
3313 * simultaneously. A failure obtaining the lock means
3314 * tx_ring[0] is currently being cleaned anyway. */
3315 if (spin_trylock(&adapter->tx_queue_lock)) {
3316 tx_cleaned = e1000_clean_tx_irq(adapter,
3317 &adapter->tx_ring[0]);
3318 spin_unlock(&adapter->tx_queue_lock);
3319 }
3320 } else
3321 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3322
3323 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3324 &work_done, work_to_do); 3453 &work_done, work_to_do);
3325 3454
3326 *budget -= work_done; 3455 *budget -= work_done;
@@ -3328,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3328 3457
3329 /* If no Tx and not enough Rx work done, exit the polling mode */ 3458 /* If no Tx and not enough Rx work done, exit the polling mode */
3330 if ((!tx_cleaned && (work_done == 0)) || 3459 if ((!tx_cleaned && (work_done == 0)) ||
3331 !netif_running(adapter->netdev)) { 3460 !netif_running(poll_dev)) {
3332quit_polling: 3461quit_polling:
3333 netif_rx_complete(poll_dev); 3462 netif_rx_complete(poll_dev);
3334 e1000_irq_enable(adapter); 3463 e1000_irq_enable(adapter);
@@ -3543,11 +3672,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3543 3672
3544 length = le16_to_cpu(rx_desc->length); 3673 length = le16_to_cpu(rx_desc->length);
3545 3674
3675 /* adjust length to remove Ethernet CRC */
3676 length -= 4;
3677
3546 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 3678 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
3547 /* All receives must fit into a single buffer */ 3679 /* All receives must fit into a single buffer */
3548 E1000_DBG("%s: Receive packet consumed multiple" 3680 E1000_DBG("%s: Receive packet consumed multiple"
3549 " buffers\n", netdev->name); 3681 " buffers\n", netdev->name);
3550 dev_kfree_skb_irq(skb); 3682 /* recycle */
3683 buffer_info-> skb = skb;
3551 goto next_desc; 3684 goto next_desc;
3552 } 3685 }
3553 3686
@@ -3575,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3575#define E1000_CB_LENGTH 256 3708#define E1000_CB_LENGTH 256
3576 if (length < E1000_CB_LENGTH) { 3709 if (length < E1000_CB_LENGTH) {
3577 struct sk_buff *new_skb = 3710 struct sk_buff *new_skb =
3578 dev_alloc_skb(length + NET_IP_ALIGN); 3711 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
3579 if (new_skb) { 3712 if (new_skb) {
3580 skb_reserve(new_skb, NET_IP_ALIGN); 3713 skb_reserve(new_skb, NET_IP_ALIGN);
3581 new_skb->dev = netdev; 3714 new_skb->dev = netdev;
@@ -3675,7 +3808,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3675 buffer_info = &rx_ring->buffer_info[i]; 3808 buffer_info = &rx_ring->buffer_info[i];
3676 3809
3677 while (staterr & E1000_RXD_STAT_DD) { 3810 while (staterr & E1000_RXD_STAT_DD) {
3678 buffer_info = &rx_ring->buffer_info[i];
3679 ps_page = &rx_ring->ps_page[i]; 3811 ps_page = &rx_ring->ps_page[i];
3680 ps_page_dma = &rx_ring->ps_page_dma[i]; 3812 ps_page_dma = &rx_ring->ps_page_dma[i];
3681#ifdef CONFIG_E1000_NAPI 3813#ifdef CONFIG_E1000_NAPI
@@ -3747,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3747 pci_dma_sync_single_for_device(pdev, 3879 pci_dma_sync_single_for_device(pdev,
3748 ps_page_dma->ps_page_dma[0], 3880 ps_page_dma->ps_page_dma[0],
3749 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3881 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3882 /* remove the CRC */
3883 l1 -= 4;
3750 skb_put(skb, l1); 3884 skb_put(skb, l1);
3751 length += l1;
3752 goto copydone; 3885 goto copydone;
3753 } /* if */ 3886 } /* if */
3754 } 3887 }
@@ -3767,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3767 skb->truesize += length; 3900 skb->truesize += length;
3768 } 3901 }
3769 3902
3903 /* strip the ethernet crc, problem is we're using pages now so
3904 * this whole operation can get a little cpu intensive */
3905 pskb_trim(skb, skb->len - 4);
3906
3770copydone: 3907copydone:
3771 e1000_rx_checksum(adapter, staterr, 3908 e1000_rx_checksum(adapter, staterr,
3772 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 3909 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
@@ -3842,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3842 3979
3843 while (cleaned_count--) { 3980 while (cleaned_count--) {
3844 if (!(skb = buffer_info->skb)) 3981 if (!(skb = buffer_info->skb))
3845 skb = dev_alloc_skb(bufsz); 3982 skb = netdev_alloc_skb(netdev, bufsz);
3846 else { 3983 else {
3847 skb_trim(skb, 0); 3984 skb_trim(skb, 0);
3848 goto map_skb; 3985 goto map_skb;
@@ -3860,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3860 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
3861 "at %p\n", bufsz, skb->data); 3998 "at %p\n", bufsz, skb->data);
3862 /* Try again, without freeing the previous */ 3999 /* Try again, without freeing the previous */
3863 skb = dev_alloc_skb(bufsz); 4000 skb = netdev_alloc_skb(netdev, bufsz);
3864 /* Failed allocation, critical failure */ 4001 /* Failed allocation, critical failure */
3865 if (!skb) { 4002 if (!skb) {
3866 dev_kfree_skb(oldskb); 4003 dev_kfree_skb(oldskb);
@@ -3984,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3984 rx_desc->read.buffer_addr[j+1] = ~0; 4121 rx_desc->read.buffer_addr[j+1] = ~0;
3985 } 4122 }
3986 4123
3987 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4124 skb = netdev_alloc_skb(netdev,
4125 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3988 4126
3989 if (unlikely(!skb)) { 4127 if (unlikely(!skb)) {
3990 adapter->alloc_rx_buff_failed++; 4128 adapter->alloc_rx_buff_failed++;
@@ -4180,10 +4318,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4180 return retval; 4318 return retval;
4181 } 4319 }
4182 } 4320 }
4183 if (netif_running(adapter->netdev)) { 4321 if (netif_running(adapter->netdev))
4184 e1000_down(adapter); 4322 e1000_reinit_locked(adapter);
4185 e1000_up(adapter); 4323 else
4186 } else
4187 e1000_reset(adapter); 4324 e1000_reset(adapter);
4188 break; 4325 break;
4189 case M88E1000_PHY_SPEC_CTRL: 4326 case M88E1000_PHY_SPEC_CTRL:
@@ -4200,10 +4337,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4200 case PHY_CTRL: 4337 case PHY_CTRL:
4201 if (mii_reg & MII_CR_POWER_DOWN) 4338 if (mii_reg & MII_CR_POWER_DOWN)
4202 break; 4339 break;
4203 if (netif_running(adapter->netdev)) { 4340 if (netif_running(adapter->netdev))
4204 e1000_down(adapter); 4341 e1000_reinit_locked(adapter);
4205 e1000_up(adapter); 4342 else
4206 } else
4207 e1000_reset(adapter); 4343 e1000_reset(adapter);
4208 break; 4344 break;
4209 } 4345 }
@@ -4250,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4250 pci_write_config_word(adapter->pdev, reg, *value); 4386 pci_write_config_word(adapter->pdev, reg, *value);
4251} 4387}
4252 4388
4389#if 0
4253uint32_t 4390uint32_t
4254e1000_io_read(struct e1000_hw *hw, unsigned long port) 4391e1000_io_read(struct e1000_hw *hw, unsigned long port)
4255{ 4392{
4256 return inl(port); 4393 return inl(port);
4257} 4394}
4395#endif /* 0 */
4258 4396
4259void 4397void
4260e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4398e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
@@ -4277,18 +4415,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4277 ctrl |= E1000_CTRL_VME; 4415 ctrl |= E1000_CTRL_VME;
4278 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4416 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4279 4417
4418 if (adapter->hw.mac_type != e1000_ich8lan) {
4280 /* enable VLAN receive filtering */ 4419 /* enable VLAN receive filtering */
4281 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4420 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4282 rctl |= E1000_RCTL_VFE; 4421 rctl |= E1000_RCTL_VFE;
4283 rctl &= ~E1000_RCTL_CFIEN; 4422 rctl &= ~E1000_RCTL_CFIEN;
4284 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4423 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4285 e1000_update_mng_vlan(adapter); 4424 e1000_update_mng_vlan(adapter);
4425 }
4286 } else { 4426 } else {
4287 /* disable VLAN tag insert/strip */ 4427 /* disable VLAN tag insert/strip */
4288 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 4428 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4289 ctrl &= ~E1000_CTRL_VME; 4429 ctrl &= ~E1000_CTRL_VME;
4290 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4430 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4291 4431
4432 if (adapter->hw.mac_type != e1000_ich8lan) {
4292 /* disable VLAN filtering */ 4433 /* disable VLAN filtering */
4293 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4434 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4294 rctl &= ~E1000_RCTL_VFE; 4435 rctl &= ~E1000_RCTL_VFE;
@@ -4297,6 +4438,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4297 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4438 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4298 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4439 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4299 } 4440 }
4441 }
4300 } 4442 }
4301 4443
4302 e1000_irq_enable(adapter); 4444 e1000_irq_enable(adapter);
@@ -4458,12 +4600,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4458 struct e1000_adapter *adapter = netdev_priv(netdev); 4600 struct e1000_adapter *adapter = netdev_priv(netdev);
4459 uint32_t ctrl, ctrl_ext, rctl, manc, status; 4601 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4460 uint32_t wufc = adapter->wol; 4602 uint32_t wufc = adapter->wol;
4603#ifdef CONFIG_PM
4461 int retval = 0; 4604 int retval = 0;
4605#endif
4462 4606
4463 netif_device_detach(netdev); 4607 netif_device_detach(netdev);
4464 4608
4465 if (netif_running(netdev)) 4609 if (netif_running(netdev)) {
4610 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4466 e1000_down(adapter); 4611 e1000_down(adapter);
4612 }
4467 4613
4468#ifdef CONFIG_PM 4614#ifdef CONFIG_PM
4469 /* Implement our own version of pci_save_state(pdev) because pci- 4615 /* Implement our own version of pci_save_state(pdev) because pci-
@@ -4521,7 +4667,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4521 pci_enable_wake(pdev, PCI_D3cold, 0); 4667 pci_enable_wake(pdev, PCI_D3cold, 0);
4522 } 4668 }
4523 4669
4670 /* FIXME: this code is incorrect for PCI Express */
4524 if (adapter->hw.mac_type >= e1000_82540 && 4671 if (adapter->hw.mac_type >= e1000_82540 &&
4672 adapter->hw.mac_type != e1000_ich8lan &&
4525 adapter->hw.media_type == e1000_media_type_copper) { 4673 adapter->hw.media_type == e1000_media_type_copper) {
4526 manc = E1000_READ_REG(&adapter->hw, MANC); 4674 manc = E1000_READ_REG(&adapter->hw, MANC);
4527 if (manc & E1000_MANC_SMBUS_EN) { 4675 if (manc & E1000_MANC_SMBUS_EN) {
@@ -4532,6 +4680,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4532 } 4680 }
4533 } 4681 }
4534 4682
4683 if (adapter->hw.phy_type == e1000_phy_igp_3)
4684 e1000_phy_powerdown_workaround(&adapter->hw);
4685
4535 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4686 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4536 * would have already happened in close and is redundant. */ 4687 * would have already happened in close and is redundant. */
4537 e1000_release_hw_control(adapter); 4688 e1000_release_hw_control(adapter);
@@ -4567,7 +4718,9 @@ e1000_resume(struct pci_dev *pdev)
4567 4718
4568 netif_device_attach(netdev); 4719 netif_device_attach(netdev);
4569 4720
4721 /* FIXME: this code is incorrect for PCI Express */
4570 if (adapter->hw.mac_type >= e1000_82540 && 4722 if (adapter->hw.mac_type >= e1000_82540 &&
4723 adapter->hw.mac_type != e1000_ich8lan &&
4571 adapter->hw.media_type == e1000_media_type_copper) { 4724 adapter->hw.media_type == e1000_media_type_copper) {
4572 manc = E1000_READ_REG(&adapter->hw, MANC); 4725 manc = E1000_READ_REG(&adapter->hw, MANC);
4573 manc &= ~(E1000_MANC_ARP_EN); 4726 manc &= ~(E1000_MANC_ARP_EN);
@@ -4601,6 +4754,7 @@ static void
4601e1000_netpoll(struct net_device *netdev) 4754e1000_netpoll(struct net_device *netdev)
4602{ 4755{
4603 struct e1000_adapter *adapter = netdev_priv(netdev); 4756 struct e1000_adapter *adapter = netdev_priv(netdev);
4757
4604 disable_irq(adapter->pdev->irq); 4758 disable_irq(adapter->pdev->irq);
4605 e1000_intr(adapter->pdev->irq, netdev, NULL); 4759 e1000_intr(adapter->pdev->irq, netdev, NULL);
4606 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4760 e1000_clean_tx_irq(adapter, adapter->tx_ring);