aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c1078
1 files changed, 822 insertions, 256 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ee687c902a20..6b72f6acdd54 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -43,7 +43,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43#else 43#else
44#define DRIVERNAPI "-NAPI" 44#define DRIVERNAPI "-NAPI"
45#endif 45#endif
46#define DRV_VERSION "6.0.60-k2"DRIVERNAPI 46#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
47char e1000_driver_version[] = DRV_VERSION; 47char e1000_driver_version[] = DRV_VERSION;
48char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 48char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49 49
@@ -80,6 +80,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
80 INTEL_E1000_ETHERNET_DEVICE(0x1026), 80 INTEL_E1000_ETHERNET_DEVICE(0x1026),
81 INTEL_E1000_ETHERNET_DEVICE(0x1027), 81 INTEL_E1000_ETHERNET_DEVICE(0x1027),
82 INTEL_E1000_ETHERNET_DEVICE(0x1028), 82 INTEL_E1000_ETHERNET_DEVICE(0x1028),
83 INTEL_E1000_ETHERNET_DEVICE(0x105E),
84 INTEL_E1000_ETHERNET_DEVICE(0x105F),
85 INTEL_E1000_ETHERNET_DEVICE(0x1060),
83 INTEL_E1000_ETHERNET_DEVICE(0x1075), 86 INTEL_E1000_ETHERNET_DEVICE(0x1075),
84 INTEL_E1000_ETHERNET_DEVICE(0x1076), 87 INTEL_E1000_ETHERNET_DEVICE(0x1076),
85 INTEL_E1000_ETHERNET_DEVICE(0x1077), 88 INTEL_E1000_ETHERNET_DEVICE(0x1077),
@@ -88,10 +91,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
88 INTEL_E1000_ETHERNET_DEVICE(0x107A), 91 INTEL_E1000_ETHERNET_DEVICE(0x107A),
89 INTEL_E1000_ETHERNET_DEVICE(0x107B), 92 INTEL_E1000_ETHERNET_DEVICE(0x107B),
90 INTEL_E1000_ETHERNET_DEVICE(0x107C), 93 INTEL_E1000_ETHERNET_DEVICE(0x107C),
94 INTEL_E1000_ETHERNET_DEVICE(0x107D),
95 INTEL_E1000_ETHERNET_DEVICE(0x107E),
96 INTEL_E1000_ETHERNET_DEVICE(0x107F),
91 INTEL_E1000_ETHERNET_DEVICE(0x108A), 97 INTEL_E1000_ETHERNET_DEVICE(0x108A),
92 INTEL_E1000_ETHERNET_DEVICE(0x108B), 98 INTEL_E1000_ETHERNET_DEVICE(0x108B),
93 INTEL_E1000_ETHERNET_DEVICE(0x108C), 99 INTEL_E1000_ETHERNET_DEVICE(0x108C),
94 INTEL_E1000_ETHERNET_DEVICE(0x1099), 100 INTEL_E1000_ETHERNET_DEVICE(0x109A),
95 /* required last entry */ 101 /* required last entry */
96 {0,} 102 {0,}
97}; 103};
@@ -102,10 +108,18 @@ int e1000_up(struct e1000_adapter *adapter);
102void e1000_down(struct e1000_adapter *adapter); 108void e1000_down(struct e1000_adapter *adapter);
103void e1000_reset(struct e1000_adapter *adapter); 109void e1000_reset(struct e1000_adapter *adapter);
104int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 110int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
105int e1000_setup_tx_resources(struct e1000_adapter *adapter); 111int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
106int e1000_setup_rx_resources(struct e1000_adapter *adapter); 112int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
107void e1000_free_tx_resources(struct e1000_adapter *adapter); 113void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
108void e1000_free_rx_resources(struct e1000_adapter *adapter); 114void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
115int e1000_setup_tx_resources(struct e1000_adapter *adapter,
116 struct e1000_tx_ring *txdr);
117int e1000_setup_rx_resources(struct e1000_adapter *adapter,
118 struct e1000_rx_ring *rxdr);
119void e1000_free_tx_resources(struct e1000_adapter *adapter,
120 struct e1000_tx_ring *tx_ring);
121void e1000_free_rx_resources(struct e1000_adapter *adapter,
122 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter); 123void e1000_update_stats(struct e1000_adapter *adapter);
110 124
111/* Local Function Prototypes */ 125/* Local Function Prototypes */
@@ -114,14 +128,22 @@ static int e1000_init_module(void);
114static void e1000_exit_module(void); 128static void e1000_exit_module(void);
115static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 129static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
116static void __devexit e1000_remove(struct pci_dev *pdev); 130static void __devexit e1000_remove(struct pci_dev *pdev);
131static int e1000_alloc_queues(struct e1000_adapter *adapter);
132#ifdef CONFIG_E1000_MQ
133static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
134#endif
117static int e1000_sw_init(struct e1000_adapter *adapter); 135static int e1000_sw_init(struct e1000_adapter *adapter);
118static int e1000_open(struct net_device *netdev); 136static int e1000_open(struct net_device *netdev);
119static int e1000_close(struct net_device *netdev); 137static int e1000_close(struct net_device *netdev);
120static void e1000_configure_tx(struct e1000_adapter *adapter); 138static void e1000_configure_tx(struct e1000_adapter *adapter);
121static void e1000_configure_rx(struct e1000_adapter *adapter); 139static void e1000_configure_rx(struct e1000_adapter *adapter);
122static void e1000_setup_rctl(struct e1000_adapter *adapter); 140static void e1000_setup_rctl(struct e1000_adapter *adapter);
123static void e1000_clean_tx_ring(struct e1000_adapter *adapter); 141static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_rx_ring(struct e1000_adapter *adapter); 142static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
143static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
144 struct e1000_tx_ring *tx_ring);
145static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
146 struct e1000_rx_ring *rx_ring);
125static void e1000_set_multi(struct net_device *netdev); 147static void e1000_set_multi(struct net_device *netdev);
126static void e1000_update_phy_info(unsigned long data); 148static void e1000_update_phy_info(unsigned long data);
127static void e1000_watchdog(unsigned long data); 149static void e1000_watchdog(unsigned long data);
@@ -132,19 +154,26 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
132static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 154static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
133static int e1000_set_mac(struct net_device *netdev, void *p); 155static int e1000_set_mac(struct net_device *netdev, void *p);
134static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs); 156static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
135static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter); 157static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
158 struct e1000_tx_ring *tx_ring);
136#ifdef CONFIG_E1000_NAPI 159#ifdef CONFIG_E1000_NAPI
137static int e1000_clean(struct net_device *netdev, int *budget); 160static int e1000_clean(struct net_device *poll_dev, int *budget);
138static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 161static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
162 struct e1000_rx_ring *rx_ring,
139 int *work_done, int work_to_do); 163 int *work_done, int work_to_do);
140static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 164static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
165 struct e1000_rx_ring *rx_ring,
141 int *work_done, int work_to_do); 166 int *work_done, int work_to_do);
142#else 167#else
143static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 168static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
144static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); 169 struct e1000_rx_ring *rx_ring);
170static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
171 struct e1000_rx_ring *rx_ring);
145#endif 172#endif
146static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 173static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
147static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); 174 struct e1000_rx_ring *rx_ring);
175static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring);
148static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 177static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
149static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 178static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
150 int cmd); 179 int cmd);
@@ -172,6 +201,11 @@ static int e1000_resume(struct pci_dev *pdev);
172static void e1000_netpoll (struct net_device *netdev); 201static void e1000_netpoll (struct net_device *netdev);
173#endif 202#endif
174 203
204#ifdef CONFIG_E1000_MQ
205/* for multiple Rx queues */
206void e1000_rx_schedule(void *data);
207#endif
208
175/* Exported from other modules */ 209/* Exported from other modules */
176 210
177extern void e1000_check_options(struct e1000_adapter *adapter); 211extern void e1000_check_options(struct e1000_adapter *adapter);
@@ -289,7 +323,7 @@ int
289e1000_up(struct e1000_adapter *adapter) 323e1000_up(struct e1000_adapter *adapter)
290{ 324{
291 struct net_device *netdev = adapter->netdev; 325 struct net_device *netdev = adapter->netdev;
292 int err; 326 int i, err;
293 327
294 /* hardware has been reset, we need to reload some things */ 328 /* hardware has been reset, we need to reload some things */
295 329
@@ -308,7 +342,8 @@ e1000_up(struct e1000_adapter *adapter)
308 e1000_configure_tx(adapter); 342 e1000_configure_tx(adapter);
309 e1000_setup_rctl(adapter); 343 e1000_setup_rctl(adapter);
310 e1000_configure_rx(adapter); 344 e1000_configure_rx(adapter);
311 adapter->alloc_rx_buf(adapter); 345 for (i = 0; i < adapter->num_queues; i++)
346 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
312 347
313#ifdef CONFIG_PCI_MSI 348#ifdef CONFIG_PCI_MSI
314 if(adapter->hw.mac_type > e1000_82547_rev_2) { 349 if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -344,6 +379,9 @@ e1000_down(struct e1000_adapter *adapter)
344 struct net_device *netdev = adapter->netdev; 379 struct net_device *netdev = adapter->netdev;
345 380
346 e1000_irq_disable(adapter); 381 e1000_irq_disable(adapter);
382#ifdef CONFIG_E1000_MQ
383 while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
384#endif
347 free_irq(adapter->pdev->irq, netdev); 385 free_irq(adapter->pdev->irq, netdev);
348#ifdef CONFIG_PCI_MSI 386#ifdef CONFIG_PCI_MSI
349 if(adapter->hw.mac_type > e1000_82547_rev_2 && 387 if(adapter->hw.mac_type > e1000_82547_rev_2 &&
@@ -363,11 +401,10 @@ e1000_down(struct e1000_adapter *adapter)
363 netif_stop_queue(netdev); 401 netif_stop_queue(netdev);
364 402
365 e1000_reset(adapter); 403 e1000_reset(adapter);
366 e1000_clean_tx_ring(adapter); 404 e1000_clean_all_tx_rings(adapter);
367 e1000_clean_rx_ring(adapter); 405 e1000_clean_all_rx_rings(adapter);
368 406
369 /* If WoL is not enabled 407 /* If WoL is not enabled and management mode is not IAMT
370 * and management mode is not IAMT
371 * Power down the PHY so no link is implied when interface is down */ 408 * Power down the PHY so no link is implied when interface is down */
372 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 409 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
373 adapter->hw.media_type == e1000_media_type_copper && 410 adapter->hw.media_type == e1000_media_type_copper &&
@@ -398,6 +435,10 @@ e1000_reset(struct e1000_adapter *adapter)
398 case e1000_82547_rev_2: 435 case e1000_82547_rev_2:
399 pba = E1000_PBA_30K; 436 pba = E1000_PBA_30K;
400 break; 437 break;
438 case e1000_82571:
439 case e1000_82572:
440 pba = E1000_PBA_38K;
441 break;
401 case e1000_82573: 442 case e1000_82573:
402 pba = E1000_PBA_12K; 443 pba = E1000_PBA_12K;
403 break; 444 break;
@@ -475,6 +516,7 @@ e1000_probe(struct pci_dev *pdev,
475 struct net_device *netdev; 516 struct net_device *netdev;
476 struct e1000_adapter *adapter; 517 struct e1000_adapter *adapter;
477 unsigned long mmio_start, mmio_len; 518 unsigned long mmio_start, mmio_len;
519 uint32_t ctrl_ext;
478 uint32_t swsm; 520 uint32_t swsm;
479 521
480 static int cards_found = 0; 522 static int cards_found = 0;
@@ -614,8 +656,9 @@ e1000_probe(struct pci_dev *pdev,
614 if(e1000_read_mac_addr(&adapter->hw)) 656 if(e1000_read_mac_addr(&adapter->hw))
615 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 657 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
616 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 658 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
659 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
617 660
618 if(!is_valid_ether_addr(netdev->dev_addr)) { 661 if(!is_valid_ether_addr(netdev->perm_addr)) {
619 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 662 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
620 err = -EIO; 663 err = -EIO;
621 goto err_eeprom; 664 goto err_eeprom;
@@ -687,6 +730,12 @@ e1000_probe(struct pci_dev *pdev,
687 730
688 /* Let firmware know the driver has taken over */ 731 /* Let firmware know the driver has taken over */
689 switch(adapter->hw.mac_type) { 732 switch(adapter->hw.mac_type) {
733 case e1000_82571:
734 case e1000_82572:
735 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
736 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
737 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
738 break;
690 case e1000_82573: 739 case e1000_82573:
691 swsm = E1000_READ_REG(&adapter->hw, SWSM); 740 swsm = E1000_READ_REG(&adapter->hw, SWSM);
692 E1000_WRITE_REG(&adapter->hw, SWSM, 741 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -731,7 +780,11 @@ e1000_remove(struct pci_dev *pdev)
731{ 780{
732 struct net_device *netdev = pci_get_drvdata(pdev); 781 struct net_device *netdev = pci_get_drvdata(pdev);
733 struct e1000_adapter *adapter = netdev_priv(netdev); 782 struct e1000_adapter *adapter = netdev_priv(netdev);
783 uint32_t ctrl_ext;
734 uint32_t manc, swsm; 784 uint32_t manc, swsm;
785#ifdef CONFIG_E1000_NAPI
786 int i;
787#endif
735 788
736 flush_scheduled_work(); 789 flush_scheduled_work();
737 790
@@ -745,6 +798,12 @@ e1000_remove(struct pci_dev *pdev)
745 } 798 }
746 799
747 switch(adapter->hw.mac_type) { 800 switch(adapter->hw.mac_type) {
801 case e1000_82571:
802 case e1000_82572:
803 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
804 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
805 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
806 break;
748 case e1000_82573: 807 case e1000_82573:
749 swsm = E1000_READ_REG(&adapter->hw, SWSM); 808 swsm = E1000_READ_REG(&adapter->hw, SWSM);
750 E1000_WRITE_REG(&adapter->hw, SWSM, 809 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -756,13 +815,27 @@ e1000_remove(struct pci_dev *pdev)
756 } 815 }
757 816
758 unregister_netdev(netdev); 817 unregister_netdev(netdev);
818#ifdef CONFIG_E1000_NAPI
819 for (i = 0; i < adapter->num_queues; i++)
820 __dev_put(&adapter->polling_netdev[i]);
821#endif
759 822
760 if(!e1000_check_phy_reset_block(&adapter->hw)) 823 if(!e1000_check_phy_reset_block(&adapter->hw))
761 e1000_phy_hw_reset(&adapter->hw); 824 e1000_phy_hw_reset(&adapter->hw);
762 825
826 kfree(adapter->tx_ring);
827 kfree(adapter->rx_ring);
828#ifdef CONFIG_E1000_NAPI
829 kfree(adapter->polling_netdev);
830#endif
831
763 iounmap(adapter->hw.hw_addr); 832 iounmap(adapter->hw.hw_addr);
764 pci_release_regions(pdev); 833 pci_release_regions(pdev);
765 834
835#ifdef CONFIG_E1000_MQ
836 free_percpu(adapter->cpu_netdev);
837 free_percpu(adapter->cpu_tx_ring);
838#endif
766 free_netdev(netdev); 839 free_netdev(netdev);
767 840
768 pci_disable_device(pdev); 841 pci_disable_device(pdev);
@@ -783,6 +856,9 @@ e1000_sw_init(struct e1000_adapter *adapter)
783 struct e1000_hw *hw = &adapter->hw; 856 struct e1000_hw *hw = &adapter->hw;
784 struct net_device *netdev = adapter->netdev; 857 struct net_device *netdev = adapter->netdev;
785 struct pci_dev *pdev = adapter->pdev; 858 struct pci_dev *pdev = adapter->pdev;
859#ifdef CONFIG_E1000_NAPI
860 int i;
861#endif
786 862
787 /* PCI config space info */ 863 /* PCI config space info */
788 864
@@ -840,14 +916,123 @@ e1000_sw_init(struct e1000_adapter *adapter)
840 hw->master_slave = E1000_MASTER_SLAVE; 916 hw->master_slave = E1000_MASTER_SLAVE;
841 } 917 }
842 918
919#ifdef CONFIG_E1000_MQ
920 /* Number of supported queues */
921 switch (hw->mac_type) {
922 case e1000_82571:
923 case e1000_82572:
924 adapter->num_queues = 2;
925 break;
926 default:
927 adapter->num_queues = 1;
928 break;
929 }
930 adapter->num_queues = min(adapter->num_queues, num_online_cpus());
931#else
932 adapter->num_queues = 1;
933#endif
934
935 if (e1000_alloc_queues(adapter)) {
936 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
937 return -ENOMEM;
938 }
939
940#ifdef CONFIG_E1000_NAPI
941 for (i = 0; i < adapter->num_queues; i++) {
942 adapter->polling_netdev[i].priv = adapter;
943 adapter->polling_netdev[i].poll = &e1000_clean;
944 adapter->polling_netdev[i].weight = 64;
945 dev_hold(&adapter->polling_netdev[i]);
946 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
947 }
948#endif
949
950#ifdef CONFIG_E1000_MQ
951 e1000_setup_queue_mapping(adapter);
952#endif
953
843 atomic_set(&adapter->irq_sem, 1); 954 atomic_set(&adapter->irq_sem, 1);
844 spin_lock_init(&adapter->stats_lock); 955 spin_lock_init(&adapter->stats_lock);
845 spin_lock_init(&adapter->tx_lock);
846 956
847 return 0; 957 return 0;
848} 958}
849 959
850/** 960/**
961 * e1000_alloc_queues - Allocate memory for all rings
962 * @adapter: board private structure to initialize
963 *
964 * We allocate one ring per queue at run-time since we don't know the
965 * number of queues at compile-time. The polling_netdev array is
966 * intended for Multiqueue, but should work fine with a single queue.
967 **/
968
969static int __devinit
970e1000_alloc_queues(struct e1000_adapter *adapter)
971{
972 int size;
973
974 size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
975 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
976 if (!adapter->tx_ring)
977 return -ENOMEM;
978 memset(adapter->tx_ring, 0, size);
979
980 size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
981 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
982 if (!adapter->rx_ring) {
983 kfree(adapter->tx_ring);
984 return -ENOMEM;
985 }
986 memset(adapter->rx_ring, 0, size);
987
988#ifdef CONFIG_E1000_NAPI
989 size = sizeof(struct net_device) * adapter->num_queues;
990 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
991 if (!adapter->polling_netdev) {
992 kfree(adapter->tx_ring);
993 kfree(adapter->rx_ring);
994 return -ENOMEM;
995 }
996 memset(adapter->polling_netdev, 0, size);
997#endif
998
999 return E1000_SUCCESS;
1000}
1001
1002#ifdef CONFIG_E1000_MQ
1003static void __devinit
1004e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1005{
1006 int i, cpu;
1007
1008 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1009 adapter->rx_sched_call_data.info = adapter->netdev;
1010 cpus_clear(adapter->rx_sched_call_data.cpumask);
1011
1012 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1013 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1014
1015 lock_cpu_hotplug();
1016 i = 0;
1017 for_each_online_cpu(cpu) {
1018 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
1019 /* This is incomplete because we'd like to assign separate
1020 * physical cpus to these netdev polling structures and
1021 * avoid saturating a subset of cpus.
1022 */
1023 if (i < adapter->num_queues) {
1024 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1025 adapter->cpu_for_queue[i] = cpu;
1026 } else
1027 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1028
1029 i++;
1030 }
1031 unlock_cpu_hotplug();
1032}
1033#endif
1034
1035/**
851 * e1000_open - Called when a network interface is made active 1036 * e1000_open - Called when a network interface is made active
852 * @netdev: network interface device structure 1037 * @netdev: network interface device structure
853 * 1038 *
@@ -868,12 +1053,12 @@ e1000_open(struct net_device *netdev)
868 1053
869 /* allocate transmit descriptors */ 1054 /* allocate transmit descriptors */
870 1055
871 if((err = e1000_setup_tx_resources(adapter))) 1056 if ((err = e1000_setup_all_tx_resources(adapter)))
872 goto err_setup_tx; 1057 goto err_setup_tx;
873 1058
874 /* allocate receive descriptors */ 1059 /* allocate receive descriptors */
875 1060
876 if((err = e1000_setup_rx_resources(adapter))) 1061 if ((err = e1000_setup_all_rx_resources(adapter)))
877 goto err_setup_rx; 1062 goto err_setup_rx;
878 1063
879 if((err = e1000_up(adapter))) 1064 if((err = e1000_up(adapter)))
@@ -887,9 +1072,9 @@ e1000_open(struct net_device *netdev)
887 return E1000_SUCCESS; 1072 return E1000_SUCCESS;
888 1073
889err_up: 1074err_up:
890 e1000_free_rx_resources(adapter); 1075 e1000_free_all_rx_resources(adapter);
891err_setup_rx: 1076err_setup_rx:
892 e1000_free_tx_resources(adapter); 1077 e1000_free_all_tx_resources(adapter);
893err_setup_tx: 1078err_setup_tx:
894 e1000_reset(adapter); 1079 e1000_reset(adapter);
895 1080
@@ -915,8 +1100,8 @@ e1000_close(struct net_device *netdev)
915 1100
916 e1000_down(adapter); 1101 e1000_down(adapter);
917 1102
918 e1000_free_tx_resources(adapter); 1103 e1000_free_all_tx_resources(adapter);
919 e1000_free_rx_resources(adapter); 1104 e1000_free_all_rx_resources(adapter);
920 1105
921 if((adapter->hw.mng_cookie.status & 1106 if((adapter->hw.mng_cookie.status &
922 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1107 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
@@ -951,14 +1136,15 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
951/** 1136/**
952 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1137 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
953 * @adapter: board private structure 1138 * @adapter: board private structure
1139 * @txdr: tx descriptor ring (for a specific queue) to setup
954 * 1140 *
955 * Return 0 on success, negative on failure 1141 * Return 0 on success, negative on failure
956 **/ 1142 **/
957 1143
958int 1144int
959e1000_setup_tx_resources(struct e1000_adapter *adapter) 1145e1000_setup_tx_resources(struct e1000_adapter *adapter,
1146 struct e1000_tx_ring *txdr)
960{ 1147{
961 struct e1000_desc_ring *txdr = &adapter->tx_ring;
962 struct pci_dev *pdev = adapter->pdev; 1148 struct pci_dev *pdev = adapter->pdev;
963 int size; 1149 int size;
964 1150
@@ -970,6 +1156,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
970 return -ENOMEM; 1156 return -ENOMEM;
971 } 1157 }
972 memset(txdr->buffer_info, 0, size); 1158 memset(txdr->buffer_info, 0, size);
1159 memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
973 1160
974 /* round up to nearest 4K */ 1161 /* round up to nearest 4K */
975 1162
@@ -1018,11 +1205,41 @@ setup_tx_desc_die:
1018 1205
1019 txdr->next_to_use = 0; 1206 txdr->next_to_use = 0;
1020 txdr->next_to_clean = 0; 1207 txdr->next_to_clean = 0;
1208 spin_lock_init(&txdr->tx_lock);
1021 1209
1022 return 0; 1210 return 0;
1023} 1211}
1024 1212
1025/** 1213/**
1214 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1215 * (Descriptors) for all queues
1216 * @adapter: board private structure
1217 *
1218 * If this function returns with an error, then it's possible one or
1219 * more of the rings is populated (while the rest are not). It is the
1220 * callers duty to clean those orphaned rings.
1221 *
1222 * Return 0 on success, negative on failure
1223 **/
1224
1225int
1226e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1227{
1228 int i, err = 0;
1229
1230 for (i = 0; i < adapter->num_queues; i++) {
1231 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1232 if (err) {
1233 DPRINTK(PROBE, ERR,
1234 "Allocation for Tx Queue %u failed\n", i);
1235 break;
1236 }
1237 }
1238
1239 return err;
1240}
1241
1242/**
1026 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1243 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1027 * @adapter: board private structure 1244 * @adapter: board private structure
1028 * 1245 *
@@ -1032,23 +1249,43 @@ setup_tx_desc_die:
1032static void 1249static void
1033e1000_configure_tx(struct e1000_adapter *adapter) 1250e1000_configure_tx(struct e1000_adapter *adapter)
1034{ 1251{
1035 uint64_t tdba = adapter->tx_ring.dma; 1252 uint64_t tdba;
1036 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc); 1253 struct e1000_hw *hw = &adapter->hw;
1037 uint32_t tctl, tipg; 1254 uint32_t tdlen, tctl, tipg, tarc;
1038
1039 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1040 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1041
1042 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
1043 1255
1044 /* Setup the HW Tx Head and Tail descriptor pointers */ 1256 /* Setup the HW Tx Head and Tail descriptor pointers */
1045 1257
1046 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1258 switch (adapter->num_queues) {
1047 E1000_WRITE_REG(&adapter->hw, TDT, 0); 1259 case 2:
1260 tdba = adapter->tx_ring[1].dma;
1261 tdlen = adapter->tx_ring[1].count *
1262 sizeof(struct e1000_tx_desc);
1263 E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
1264 E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
1265 E1000_WRITE_REG(hw, TDLEN1, tdlen);
1266 E1000_WRITE_REG(hw, TDH1, 0);
1267 E1000_WRITE_REG(hw, TDT1, 0);
1268 adapter->tx_ring[1].tdh = E1000_TDH1;
1269 adapter->tx_ring[1].tdt = E1000_TDT1;
1270 /* Fall Through */
1271 case 1:
1272 default:
1273 tdba = adapter->tx_ring[0].dma;
1274 tdlen = adapter->tx_ring[0].count *
1275 sizeof(struct e1000_tx_desc);
1276 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1277 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1278 E1000_WRITE_REG(hw, TDLEN, tdlen);
1279 E1000_WRITE_REG(hw, TDH, 0);
1280 E1000_WRITE_REG(hw, TDT, 0);
1281 adapter->tx_ring[0].tdh = E1000_TDH;
1282 adapter->tx_ring[0].tdt = E1000_TDT;
1283 break;
1284 }
1048 1285
1049 /* Set the default values for the Tx Inter Packet Gap timer */ 1286 /* Set the default values for the Tx Inter Packet Gap timer */
1050 1287
1051 switch (adapter->hw.mac_type) { 1288 switch (hw->mac_type) {
1052 case e1000_82542_rev2_0: 1289 case e1000_82542_rev2_0:
1053 case e1000_82542_rev2_1: 1290 case e1000_82542_rev2_1:
1054 tipg = DEFAULT_82542_TIPG_IPGT; 1291 tipg = DEFAULT_82542_TIPG_IPGT;
@@ -1056,67 +1293,81 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1056 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1293 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1057 break; 1294 break;
1058 default: 1295 default:
1059 if(adapter->hw.media_type == e1000_media_type_fiber || 1296 if (hw->media_type == e1000_media_type_fiber ||
1060 adapter->hw.media_type == e1000_media_type_internal_serdes) 1297 hw->media_type == e1000_media_type_internal_serdes)
1061 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1298 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1062 else 1299 else
1063 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1300 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1064 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1301 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1065 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1302 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1066 } 1303 }
1067 E1000_WRITE_REG(&adapter->hw, TIPG, tipg); 1304 E1000_WRITE_REG(hw, TIPG, tipg);
1068 1305
1069 /* Set the Tx Interrupt Delay register */ 1306 /* Set the Tx Interrupt Delay register */
1070 1307
1071 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); 1308 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1072 if(adapter->hw.mac_type >= e1000_82540) 1309 if (hw->mac_type >= e1000_82540)
1073 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay); 1310 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1074 1311
1075 /* Program the Transmit Control Register */ 1312 /* Program the Transmit Control Register */
1076 1313
1077 tctl = E1000_READ_REG(&adapter->hw, TCTL); 1314 tctl = E1000_READ_REG(hw, TCTL);
1078 1315
1079 tctl &= ~E1000_TCTL_CT; 1316 tctl &= ~E1000_TCTL_CT;
1080 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | 1317 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
1081 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1318 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1082 1319
1083 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 1320 E1000_WRITE_REG(hw, TCTL, tctl);
1084 1321
1085 e1000_config_collision_dist(&adapter->hw); 1322 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1323 tarc = E1000_READ_REG(hw, TARC0);
1324 tarc |= ((1 << 25) | (1 << 21));
1325 E1000_WRITE_REG(hw, TARC0, tarc);
1326 tarc = E1000_READ_REG(hw, TARC1);
1327 tarc |= (1 << 25);
1328 if (tctl & E1000_TCTL_MULR)
1329 tarc &= ~(1 << 28);
1330 else
1331 tarc |= (1 << 28);
1332 E1000_WRITE_REG(hw, TARC1, tarc);
1333 }
1334
1335 e1000_config_collision_dist(hw);
1086 1336
1087 /* Setup Transmit Descriptor Settings for eop descriptor */ 1337 /* Setup Transmit Descriptor Settings for eop descriptor */
1088 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | 1338 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
1089 E1000_TXD_CMD_IFCS; 1339 E1000_TXD_CMD_IFCS;
1090 1340
1091 if(adapter->hw.mac_type < e1000_82543) 1341 if (hw->mac_type < e1000_82543)
1092 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1342 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1093 else 1343 else
1094 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1344 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1095 1345
1096 /* Cache if we're 82544 running in PCI-X because we'll 1346 /* Cache if we're 82544 running in PCI-X because we'll
1097 * need this to apply a workaround later in the send path. */ 1347 * need this to apply a workaround later in the send path. */
1098 if(adapter->hw.mac_type == e1000_82544 && 1348 if (hw->mac_type == e1000_82544 &&
1099 adapter->hw.bus_type == e1000_bus_type_pcix) 1349 hw->bus_type == e1000_bus_type_pcix)
1100 adapter->pcix_82544 = 1; 1350 adapter->pcix_82544 = 1;
1101} 1351}
1102 1352
1103/** 1353/**
1104 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1354 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1105 * @adapter: board private structure 1355 * @adapter: board private structure
1356 * @rxdr: rx descriptor ring (for a specific queue) to setup
1106 * 1357 *
1107 * Returns 0 on success, negative on failure 1358 * Returns 0 on success, negative on failure
1108 **/ 1359 **/
1109 1360
1110int 1361int
1111e1000_setup_rx_resources(struct e1000_adapter *adapter) 1362e1000_setup_rx_resources(struct e1000_adapter *adapter,
1363 struct e1000_rx_ring *rxdr)
1112{ 1364{
1113 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1114 struct pci_dev *pdev = adapter->pdev; 1365 struct pci_dev *pdev = adapter->pdev;
1115 int size, desc_len; 1366 int size, desc_len;
1116 1367
1117 size = sizeof(struct e1000_buffer) * rxdr->count; 1368 size = sizeof(struct e1000_buffer) * rxdr->count;
1118 rxdr->buffer_info = vmalloc(size); 1369 rxdr->buffer_info = vmalloc(size);
1119 if(!rxdr->buffer_info) { 1370 if (!rxdr->buffer_info) {
1120 DPRINTK(PROBE, ERR, 1371 DPRINTK(PROBE, ERR,
1121 "Unable to allocate memory for the receive descriptor ring\n"); 1372 "Unable to allocate memory for the receive descriptor ring\n");
1122 return -ENOMEM; 1373 return -ENOMEM;
@@ -1156,13 +1407,13 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
1156 1407
1157 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1408 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1158 1409
1159 if(!rxdr->desc) { 1410 if (!rxdr->desc) {
1411 DPRINTK(PROBE, ERR,
1412 "Unable to allocate memory for the receive descriptor ring\n");
1160setup_rx_desc_die: 1413setup_rx_desc_die:
1161 vfree(rxdr->buffer_info); 1414 vfree(rxdr->buffer_info);
1162 kfree(rxdr->ps_page); 1415 kfree(rxdr->ps_page);
1163 kfree(rxdr->ps_page_dma); 1416 kfree(rxdr->ps_page_dma);
1164 DPRINTK(PROBE, ERR,
1165 "Unable to allocate memory for the receive descriptor ring\n");
1166 return -ENOMEM; 1417 return -ENOMEM;
1167 } 1418 }
1168 1419
@@ -1174,9 +1425,12 @@ setup_rx_desc_die:
1174 "at %p\n", rxdr->size, rxdr->desc); 1425 "at %p\n", rxdr->size, rxdr->desc);
1175 /* Try again, without freeing the previous */ 1426 /* Try again, without freeing the previous */
1176 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1427 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1177 if(!rxdr->desc) {
1178 /* Failed allocation, critical failure */ 1428 /* Failed allocation, critical failure */
1429 if (!rxdr->desc) {
1179 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1430 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1431 DPRINTK(PROBE, ERR,
1432 "Unable to allocate memory "
1433 "for the receive descriptor ring\n");
1180 goto setup_rx_desc_die; 1434 goto setup_rx_desc_die;
1181 } 1435 }
1182 1436
@@ -1188,10 +1442,7 @@ setup_rx_desc_die:
1188 DPRINTK(PROBE, ERR, 1442 DPRINTK(PROBE, ERR,
1189 "Unable to allocate aligned memory " 1443 "Unable to allocate aligned memory "
1190 "for the receive descriptor ring\n"); 1444 "for the receive descriptor ring\n");
1191 vfree(rxdr->buffer_info); 1445 goto setup_rx_desc_die;
1192 kfree(rxdr->ps_page);
1193 kfree(rxdr->ps_page_dma);
1194 return -ENOMEM;
1195 } else { 1446 } else {
1196 /* Free old allocation, new allocation was successful */ 1447 /* Free old allocation, new allocation was successful */
1197 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1448 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
@@ -1206,15 +1457,48 @@ setup_rx_desc_die:
1206} 1457}
1207 1458
1208/** 1459/**
1460 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1461 * (Descriptors) for all queues
1462 * @adapter: board private structure
1463 *
1464 * If this function returns with an error, then it's possible one or
1465 * more of the rings is populated (while the rest are not). It is the
1466 * callers duty to clean those orphaned rings.
1467 *
1468 * Return 0 on success, negative on failure
1469 **/
1470
1471int
1472e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1473{
1474 int i, err = 0;
1475
1476 for (i = 0; i < adapter->num_queues; i++) {
1477 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1478 if (err) {
1479 DPRINTK(PROBE, ERR,
1480 "Allocation for Rx Queue %u failed\n", i);
1481 break;
1482 }
1483 }
1484
1485 return err;
1486}
1487
1488/**
1209 * e1000_setup_rctl - configure the receive control registers 1489 * e1000_setup_rctl - configure the receive control registers
1210 * @adapter: Board private structure 1490 * @adapter: Board private structure
1211 **/ 1491 **/
1212 1492#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1493 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1213static void 1494static void
1214e1000_setup_rctl(struct e1000_adapter *adapter) 1495e1000_setup_rctl(struct e1000_adapter *adapter)
1215{ 1496{
1216 uint32_t rctl, rfctl; 1497 uint32_t rctl, rfctl;
1217 uint32_t psrctl = 0; 1498 uint32_t psrctl = 0;
1499#ifdef CONFIG_E1000_PACKET_SPLIT
1500 uint32_t pages = 0;
1501#endif
1218 1502
1219 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1503 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1220 1504
@@ -1235,7 +1519,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1235 rctl |= E1000_RCTL_LPE; 1519 rctl |= E1000_RCTL_LPE;
1236 1520
1237 /* Setup buffer sizes */ 1521 /* Setup buffer sizes */
1238 if(adapter->hw.mac_type == e1000_82573) { 1522 if(adapter->hw.mac_type >= e1000_82571) {
1239 /* We can now specify buffers in 1K increments. 1523 /* We can now specify buffers in 1K increments.
1240 * BSIZE and BSEX are ignored in this case. */ 1524 * BSIZE and BSEX are ignored in this case. */
1241 rctl |= adapter->rx_buffer_len << 0x11; 1525 rctl |= adapter->rx_buffer_len << 0x11;
@@ -1268,11 +1552,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1268 * followed by the page buffers. Therefore, skb->data is 1552 * followed by the page buffers. Therefore, skb->data is
1269 * sized to hold the largest protocol header. 1553 * sized to hold the largest protocol header.
1270 */ 1554 */
1271 adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 1555 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1272 && (adapter->netdev->mtu 1556 if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
1273 < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); 1557 PAGE_SIZE <= 16384)
1558 adapter->rx_ps_pages = pages;
1559 else
1560 adapter->rx_ps_pages = 0;
1274#endif 1561#endif
1275 if(adapter->rx_ps) { 1562 if (adapter->rx_ps_pages) {
1276 /* Configure extra packet-split registers */ 1563 /* Configure extra packet-split registers */
1277 rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1564 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1278 rfctl |= E1000_RFCTL_EXTEN; 1565 rfctl |= E1000_RFCTL_EXTEN;
@@ -1284,12 +1571,19 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1284 1571
1285 psrctl |= adapter->rx_ps_bsize0 >> 1572 psrctl |= adapter->rx_ps_bsize0 >>
1286 E1000_PSRCTL_BSIZE0_SHIFT; 1573 E1000_PSRCTL_BSIZE0_SHIFT;
1287 psrctl |= PAGE_SIZE >> 1574
1288 E1000_PSRCTL_BSIZE1_SHIFT; 1575 switch (adapter->rx_ps_pages) {
1289 psrctl |= PAGE_SIZE << 1576 case 3:
1290 E1000_PSRCTL_BSIZE2_SHIFT; 1577 psrctl |= PAGE_SIZE <<
1291 psrctl |= PAGE_SIZE << 1578 E1000_PSRCTL_BSIZE3_SHIFT;
1292 E1000_PSRCTL_BSIZE3_SHIFT; 1579 case 2:
1580 psrctl |= PAGE_SIZE <<
1581 E1000_PSRCTL_BSIZE2_SHIFT;
1582 case 1:
1583 psrctl |= PAGE_SIZE >>
1584 E1000_PSRCTL_BSIZE1_SHIFT;
1585 break;
1586 }
1293 1587
1294 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); 1588 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1295 } 1589 }
@@ -1307,91 +1601,181 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1307static void 1601static void
1308e1000_configure_rx(struct e1000_adapter *adapter) 1602e1000_configure_rx(struct e1000_adapter *adapter)
1309{ 1603{
1310 uint64_t rdba = adapter->rx_ring.dma; 1604 uint64_t rdba;
1311 uint32_t rdlen, rctl, rxcsum; 1605 struct e1000_hw *hw = &adapter->hw;
1606 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
1607#ifdef CONFIG_E1000_MQ
1608 uint32_t reta, mrqc;
1609 int i;
1610#endif
1312 1611
1313 if(adapter->rx_ps) { 1612 if (adapter->rx_ps_pages) {
1314 rdlen = adapter->rx_ring.count * 1613 rdlen = adapter->rx_ring[0].count *
1315 sizeof(union e1000_rx_desc_packet_split); 1614 sizeof(union e1000_rx_desc_packet_split);
1316 adapter->clean_rx = e1000_clean_rx_irq_ps; 1615 adapter->clean_rx = e1000_clean_rx_irq_ps;
1317 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1616 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1318 } else { 1617 } else {
1319 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1618 rdlen = adapter->rx_ring[0].count *
1619 sizeof(struct e1000_rx_desc);
1320 adapter->clean_rx = e1000_clean_rx_irq; 1620 adapter->clean_rx = e1000_clean_rx_irq;
1321 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1621 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1322 } 1622 }
1323 1623
1324 /* disable receives while setting up the descriptors */ 1624 /* disable receives while setting up the descriptors */
1325 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1625 rctl = E1000_READ_REG(hw, RCTL);
1326 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); 1626 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
1327 1627
1328 /* set the Receive Delay Timer Register */ 1628 /* set the Receive Delay Timer Register */
1329 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); 1629 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
1330 1630
1331 if(adapter->hw.mac_type >= e1000_82540) { 1631 if (hw->mac_type >= e1000_82540) {
1332 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay); 1632 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1333 if(adapter->itr > 1) 1633 if(adapter->itr > 1)
1334 E1000_WRITE_REG(&adapter->hw, ITR, 1634 E1000_WRITE_REG(hw, ITR,
1335 1000000000 / (adapter->itr * 256)); 1635 1000000000 / (adapter->itr * 256));
1336 } 1636 }
1337 1637
1338 /* Setup the Base and Length of the Rx Descriptor Ring */ 1638 if (hw->mac_type >= e1000_82571) {
1339 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); 1639 /* Reset delay timers after every interrupt */
1340 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); 1640 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1641 ctrl_ext |= E1000_CTRL_EXT_CANC;
1642 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1643 E1000_WRITE_FLUSH(hw);
1644 }
1645
1646 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1647 * the Base and Length of the Rx Descriptor Ring */
1648 switch (adapter->num_queues) {
1649#ifdef CONFIG_E1000_MQ
1650 case 2:
1651 rdba = adapter->rx_ring[1].dma;
1652 E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
1653 E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
1654 E1000_WRITE_REG(hw, RDLEN1, rdlen);
1655 E1000_WRITE_REG(hw, RDH1, 0);
1656 E1000_WRITE_REG(hw, RDT1, 0);
1657 adapter->rx_ring[1].rdh = E1000_RDH1;
1658 adapter->rx_ring[1].rdt = E1000_RDT1;
1659 /* Fall Through */
1660#endif
1661 case 1:
1662 default:
1663 rdba = adapter->rx_ring[0].dma;
1664 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1665 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1666 E1000_WRITE_REG(hw, RDLEN, rdlen);
1667 E1000_WRITE_REG(hw, RDH, 0);
1668 E1000_WRITE_REG(hw, RDT, 0);
1669 adapter->rx_ring[0].rdh = E1000_RDH;
1670 adapter->rx_ring[0].rdt = E1000_RDT;
1671 break;
1672 }
1673
1674#ifdef CONFIG_E1000_MQ
1675 if (adapter->num_queues > 1) {
1676 uint32_t random[10];
1677
1678 get_random_bytes(&random[0], 40);
1679
1680 if (hw->mac_type <= e1000_82572) {
1681 E1000_WRITE_REG(hw, RSSIR, 0);
1682 E1000_WRITE_REG(hw, RSSIM, 0);
1683 }
1684
1685 switch (adapter->num_queues) {
1686 case 2:
1687 default:
1688 reta = 0x00800080;
1689 mrqc = E1000_MRQC_ENABLE_RSS_2Q;
1690 break;
1691 }
1692
1693 /* Fill out redirection table */
1694 for (i = 0; i < 32; i++)
1695 E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
1696 /* Fill out hash function seeds */
1697 for (i = 0; i < 10; i++)
1698 E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
1699
1700 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1701 E1000_MRQC_RSS_FIELD_IPV4_TCP);
1702 E1000_WRITE_REG(hw, MRQC, mrqc);
1703 }
1341 1704
1342 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen); 1705 /* Multiqueue and packet checksumming are mutually exclusive. */
1706 if (hw->mac_type >= e1000_82571) {
1707 rxcsum = E1000_READ_REG(hw, RXCSUM);
1708 rxcsum |= E1000_RXCSUM_PCSD;
1709 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1710 }
1343 1711
1344 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 1712#else
1345 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1346 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1347 1713
1348 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1714 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1349 if(adapter->hw.mac_type >= e1000_82543) { 1715 if (hw->mac_type >= e1000_82543) {
1350 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1716 rxcsum = E1000_READ_REG(hw, RXCSUM);
1351 if(adapter->rx_csum == TRUE) { 1717 if(adapter->rx_csum == TRUE) {
1352 rxcsum |= E1000_RXCSUM_TUOFL; 1718 rxcsum |= E1000_RXCSUM_TUOFL;
1353 1719
1354 /* Enable 82573 IPv4 payload checksum for UDP fragments 1720 /* Enable 82571 IPv4 payload checksum for UDP fragments
1355 * Must be used in conjunction with packet-split. */ 1721 * Must be used in conjunction with packet-split. */
1356 if((adapter->hw.mac_type > e1000_82547_rev_2) && 1722 if ((hw->mac_type >= e1000_82571) &&
1357 (adapter->rx_ps)) { 1723 (adapter->rx_ps_pages)) {
1358 rxcsum |= E1000_RXCSUM_IPPCSE; 1724 rxcsum |= E1000_RXCSUM_IPPCSE;
1359 } 1725 }
1360 } else { 1726 } else {
1361 rxcsum &= ~E1000_RXCSUM_TUOFL; 1727 rxcsum &= ~E1000_RXCSUM_TUOFL;
1362 /* don't need to clear IPPCSE as it defaults to 0 */ 1728 /* don't need to clear IPPCSE as it defaults to 0 */
1363 } 1729 }
1364 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1730 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1365 } 1731 }
1732#endif /* CONFIG_E1000_MQ */
1366 1733
1367 if (adapter->hw.mac_type == e1000_82573) 1734 if (hw->mac_type == e1000_82573)
1368 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); 1735 E1000_WRITE_REG(hw, ERT, 0x0100);
1369 1736
1370 /* Enable Receives */ 1737 /* Enable Receives */
1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1738 E1000_WRITE_REG(hw, RCTL, rctl);
1372} 1739}
1373 1740
1374/** 1741/**
1375 * e1000_free_tx_resources - Free Tx Resources 1742 * e1000_free_tx_resources - Free Tx Resources per Queue
1376 * @adapter: board private structure 1743 * @adapter: board private structure
1744 * @tx_ring: Tx descriptor ring for a specific queue
1377 * 1745 *
1378 * Free all transmit software resources 1746 * Free all transmit software resources
1379 **/ 1747 **/
1380 1748
1381void 1749void
1382e1000_free_tx_resources(struct e1000_adapter *adapter) 1750e1000_free_tx_resources(struct e1000_adapter *adapter,
1751 struct e1000_tx_ring *tx_ring)
1383{ 1752{
1384 struct pci_dev *pdev = adapter->pdev; 1753 struct pci_dev *pdev = adapter->pdev;
1385 1754
1386 e1000_clean_tx_ring(adapter); 1755 e1000_clean_tx_ring(adapter, tx_ring);
1756
1757 vfree(tx_ring->buffer_info);
1758 tx_ring->buffer_info = NULL;
1387 1759
1388 vfree(adapter->tx_ring.buffer_info); 1760 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1389 adapter->tx_ring.buffer_info = NULL; 1761
1762 tx_ring->desc = NULL;
1763}
1390 1764
1391 pci_free_consistent(pdev, adapter->tx_ring.size, 1765/**
1392 adapter->tx_ring.desc, adapter->tx_ring.dma); 1766 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1767 * @adapter: board private structure
1768 *
1769 * Free all transmit software resources
1770 **/
1771
1772void
1773e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1774{
1775 int i;
1393 1776
1394 adapter->tx_ring.desc = NULL; 1777 for (i = 0; i < adapter->num_queues; i++)
1778 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1395} 1779}
1396 1780
1397static inline void 1781static inline void
@@ -1414,21 +1798,22 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1414/** 1798/**
1415 * e1000_clean_tx_ring - Free Tx Buffers 1799 * e1000_clean_tx_ring - Free Tx Buffers
1416 * @adapter: board private structure 1800 * @adapter: board private structure
1801 * @tx_ring: ring to be cleaned
1417 **/ 1802 **/
1418 1803
1419static void 1804static void
1420e1000_clean_tx_ring(struct e1000_adapter *adapter) 1805e1000_clean_tx_ring(struct e1000_adapter *adapter,
1806 struct e1000_tx_ring *tx_ring)
1421{ 1807{
1422 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1423 struct e1000_buffer *buffer_info; 1808 struct e1000_buffer *buffer_info;
1424 unsigned long size; 1809 unsigned long size;
1425 unsigned int i; 1810 unsigned int i;
1426 1811
1427 /* Free all the Tx ring sk_buffs */ 1812 /* Free all the Tx ring sk_buffs */
1428 1813
1429 if (likely(adapter->previous_buffer_info.skb != NULL)) { 1814 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
1430 e1000_unmap_and_free_tx_resource(adapter, 1815 e1000_unmap_and_free_tx_resource(adapter,
1431 &adapter->previous_buffer_info); 1816 &tx_ring->previous_buffer_info);
1432 } 1817 }
1433 1818
1434 for(i = 0; i < tx_ring->count; i++) { 1819 for(i = 0; i < tx_ring->count; i++) {
@@ -1446,24 +1831,39 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
1446 tx_ring->next_to_use = 0; 1831 tx_ring->next_to_use = 0;
1447 tx_ring->next_to_clean = 0; 1832 tx_ring->next_to_clean = 0;
1448 1833
1449 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1834 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
1450 E1000_WRITE_REG(&adapter->hw, TDT, 0); 1835 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
1836}
1837
1838/**
1839 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1840 * @adapter: board private structure
1841 **/
1842
1843static void
1844e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1845{
1846 int i;
1847
1848 for (i = 0; i < adapter->num_queues; i++)
1849 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1451} 1850}
1452 1851
1453/** 1852/**
1454 * e1000_free_rx_resources - Free Rx Resources 1853 * e1000_free_rx_resources - Free Rx Resources
1455 * @adapter: board private structure 1854 * @adapter: board private structure
1855 * @rx_ring: ring to clean the resources from
1456 * 1856 *
1457 * Free all receive software resources 1857 * Free all receive software resources
1458 **/ 1858 **/
1459 1859
1460void 1860void
1461e1000_free_rx_resources(struct e1000_adapter *adapter) 1861e1000_free_rx_resources(struct e1000_adapter *adapter,
1862 struct e1000_rx_ring *rx_ring)
1462{ 1863{
1463 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1464 struct pci_dev *pdev = adapter->pdev; 1864 struct pci_dev *pdev = adapter->pdev;
1465 1865
1466 e1000_clean_rx_ring(adapter); 1866 e1000_clean_rx_ring(adapter, rx_ring);
1467 1867
1468 vfree(rx_ring->buffer_info); 1868 vfree(rx_ring->buffer_info);
1469 rx_ring->buffer_info = NULL; 1869 rx_ring->buffer_info = NULL;
@@ -1478,14 +1878,31 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
1478} 1878}
1479 1879
1480/** 1880/**
1481 * e1000_clean_rx_ring - Free Rx Buffers 1881 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1882 * @adapter: board private structure
1883 *
1884 * Free all receive software resources
1885 **/
1886
1887void
1888e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1889{
1890 int i;
1891
1892 for (i = 0; i < adapter->num_queues; i++)
1893 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1894}
1895
1896/**
1897 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1482 * @adapter: board private structure 1898 * @adapter: board private structure
1899 * @rx_ring: ring to free buffers from
1483 **/ 1900 **/
1484 1901
1485static void 1902static void
1486e1000_clean_rx_ring(struct e1000_adapter *adapter) 1903e1000_clean_rx_ring(struct e1000_adapter *adapter,
1904 struct e1000_rx_ring *rx_ring)
1487{ 1905{
1488 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1489 struct e1000_buffer *buffer_info; 1906 struct e1000_buffer *buffer_info;
1490 struct e1000_ps_page *ps_page; 1907 struct e1000_ps_page *ps_page;
1491 struct e1000_ps_page_dma *ps_page_dma; 1908 struct e1000_ps_page_dma *ps_page_dma;
@@ -1508,7 +1925,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1508 dev_kfree_skb(buffer_info->skb); 1925 dev_kfree_skb(buffer_info->skb);
1509 buffer_info->skb = NULL; 1926 buffer_info->skb = NULL;
1510 1927
1511 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 1928 for(j = 0; j < adapter->rx_ps_pages; j++) {
1512 if(!ps_page->ps_page[j]) break; 1929 if(!ps_page->ps_page[j]) break;
1513 pci_unmap_single(pdev, 1930 pci_unmap_single(pdev,
1514 ps_page_dma->ps_page_dma[j], 1931 ps_page_dma->ps_page_dma[j],
@@ -1534,8 +1951,22 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1534 rx_ring->next_to_clean = 0; 1951 rx_ring->next_to_clean = 0;
1535 rx_ring->next_to_use = 0; 1952 rx_ring->next_to_use = 0;
1536 1953
1537 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1954 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
1538 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1955 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
1956}
1957
1958/**
1959 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
1960 * @adapter: board private structure
1961 **/
1962
1963static void
1964e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1965{
1966 int i;
1967
1968 for (i = 0; i < adapter->num_queues; i++)
1969 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1539} 1970}
1540 1971
1541/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 1972/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
@@ -1556,7 +1987,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
1556 mdelay(5); 1987 mdelay(5);
1557 1988
1558 if(netif_running(netdev)) 1989 if(netif_running(netdev))
1559 e1000_clean_rx_ring(adapter); 1990 e1000_clean_all_rx_rings(adapter);
1560} 1991}
1561 1992
1562static void 1993static void
@@ -1576,7 +2007,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
1576 2007
1577 if(netif_running(netdev)) { 2008 if(netif_running(netdev)) {
1578 e1000_configure_rx(adapter); 2009 e1000_configure_rx(adapter);
1579 e1000_alloc_rx_buffers(adapter); 2010 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
1580 } 2011 }
1581} 2012}
1582 2013
@@ -1607,6 +2038,22 @@ e1000_set_mac(struct net_device *netdev, void *p)
1607 2038
1608 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2039 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1609 2040
2041 /* With 82571 controllers, LAA may be overwritten (with the default)
2042 * due to controller reset from the other port. */
2043 if (adapter->hw.mac_type == e1000_82571) {
2044 /* activate the work around */
2045 adapter->hw.laa_is_present = 1;
2046
2047 /* Hold a copy of the LAA in RAR[14] This is done so that
2048 * between the time RAR[0] gets clobbered and the time it
2049 * gets fixed (in e1000_watchdog), the actual LAA is in one
2050 * of the RARs and no incoming packets directed to this port
2051 * are dropped. Eventaully the LAA will be in RAR[0] and
2052 * RAR[14] */
2053 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2054 E1000_RAR_ENTRIES - 1);
2055 }
2056
1610 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2057 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1611 e1000_leave_82542_rst(adapter); 2058 e1000_leave_82542_rst(adapter);
1612 2059
@@ -1629,12 +2076,13 @@ e1000_set_multi(struct net_device *netdev)
1629 struct e1000_adapter *adapter = netdev_priv(netdev); 2076 struct e1000_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 2077 struct e1000_hw *hw = &adapter->hw;
1631 struct dev_mc_list *mc_ptr; 2078 struct dev_mc_list *mc_ptr;
1632 unsigned long flags;
1633 uint32_t rctl; 2079 uint32_t rctl;
1634 uint32_t hash_value; 2080 uint32_t hash_value;
1635 int i; 2081 int i, rar_entries = E1000_RAR_ENTRIES;
1636 2082
1637 spin_lock_irqsave(&adapter->tx_lock, flags); 2083 /* reserve RAR[14] for LAA over-write work-around */
2084 if (adapter->hw.mac_type == e1000_82571)
2085 rar_entries--;
1638 2086
1639 /* Check for Promiscuous and All Multicast modes */ 2087 /* Check for Promiscuous and All Multicast modes */
1640 2088
@@ -1659,11 +2107,12 @@ e1000_set_multi(struct net_device *netdev)
1659 /* load the first 14 multicast address into the exact filters 1-14 2107 /* load the first 14 multicast address into the exact filters 1-14
1660 * RAR 0 is used for the station MAC adddress 2108 * RAR 0 is used for the station MAC adddress
1661 * if there are not 14 addresses, go ahead and clear the filters 2109 * if there are not 14 addresses, go ahead and clear the filters
2110 * -- with 82571 controllers only 0-13 entries are filled here
1662 */ 2111 */
1663 mc_ptr = netdev->mc_list; 2112 mc_ptr = netdev->mc_list;
1664 2113
1665 for(i = 1; i < E1000_RAR_ENTRIES; i++) { 2114 for(i = 1; i < rar_entries; i++) {
1666 if(mc_ptr) { 2115 if (mc_ptr) {
1667 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2116 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1668 mc_ptr = mc_ptr->next; 2117 mc_ptr = mc_ptr->next;
1669 } else { 2118 } else {
@@ -1686,8 +2135,6 @@ e1000_set_multi(struct net_device *netdev)
1686 2135
1687 if(hw->mac_type == e1000_82542_rev2_0) 2136 if(hw->mac_type == e1000_82542_rev2_0)
1688 e1000_leave_82542_rst(adapter); 2137 e1000_leave_82542_rst(adapter);
1689
1690 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1691} 2138}
1692 2139
1693/* Need to wait a few seconds after link up to get diagnostic information from 2140/* Need to wait a few seconds after link up to get diagnostic information from
@@ -1759,7 +2206,7 @@ static void
1759e1000_watchdog_task(struct e1000_adapter *adapter) 2206e1000_watchdog_task(struct e1000_adapter *adapter)
1760{ 2207{
1761 struct net_device *netdev = adapter->netdev; 2208 struct net_device *netdev = adapter->netdev;
1762 struct e1000_desc_ring *txdr = &adapter->tx_ring; 2209 struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
1763 uint32_t link; 2210 uint32_t link;
1764 2211
1765 e1000_check_for_link(&adapter->hw); 2212 e1000_check_for_link(&adapter->hw);
@@ -1818,8 +2265,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1818 2265
1819 e1000_update_adaptive(&adapter->hw); 2266 e1000_update_adaptive(&adapter->hw);
1820 2267
1821 if(!netif_carrier_ok(netdev)) { 2268 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
1822 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2269 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1823 /* We've lost link, so the controller stops DMA, 2270 /* We've lost link, so the controller stops DMA,
1824 * but we've got queued Tx work that's never going 2271 * but we've got queued Tx work that's never going
1825 * to get done, so reset controller to flush Tx. 2272 * to get done, so reset controller to flush Tx.
@@ -1847,6 +2294,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1847 /* Force detection of hung controller every watchdog period */ 2294 /* Force detection of hung controller every watchdog period */
1848 adapter->detect_tx_hung = TRUE; 2295 adapter->detect_tx_hung = TRUE;
1849 2296
2297 /* With 82571 controllers, LAA may be overwritten due to controller
2298 * reset from the other port. Set the appropriate LAA in RAR[0] */
2299 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2300 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2301
1850 /* Reset the timer */ 2302 /* Reset the timer */
1851 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 2303 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1852} 2304}
@@ -1859,7 +2311,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1859#define E1000_TX_FLAGS_VLAN_SHIFT 16 2311#define E1000_TX_FLAGS_VLAN_SHIFT 16
1860 2312
1861static inline int 2313static inline int
1862e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) 2314e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2315 struct sk_buff *skb)
1863{ 2316{
1864#ifdef NETIF_F_TSO 2317#ifdef NETIF_F_TSO
1865 struct e1000_context_desc *context_desc; 2318 struct e1000_context_desc *context_desc;
@@ -1910,8 +2363,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1910 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2363 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1911 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2364 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1912 2365
1913 i = adapter->tx_ring.next_to_use; 2366 i = tx_ring->next_to_use;
1914 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 2367 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1915 2368
1916 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2369 context_desc->lower_setup.ip_fields.ipcss = ipcss;
1917 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2370 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -1923,8 +2376,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1923 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2376 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1924 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2377 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1925 2378
1926 if(++i == adapter->tx_ring.count) i = 0; 2379 if (++i == tx_ring->count) i = 0;
1927 adapter->tx_ring.next_to_use = i; 2380 tx_ring->next_to_use = i;
1928 2381
1929 return 1; 2382 return 1;
1930 } 2383 }
@@ -1934,7 +2387,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1934} 2387}
1935 2388
1936static inline boolean_t 2389static inline boolean_t
1937e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 2390e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2391 struct sk_buff *skb)
1938{ 2392{
1939 struct e1000_context_desc *context_desc; 2393 struct e1000_context_desc *context_desc;
1940 unsigned int i; 2394 unsigned int i;
@@ -1943,8 +2397,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1943 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2397 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1944 css = skb->h.raw - skb->data; 2398 css = skb->h.raw - skb->data;
1945 2399
1946 i = adapter->tx_ring.next_to_use; 2400 i = tx_ring->next_to_use;
1947 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 2401 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1948 2402
1949 context_desc->upper_setup.tcp_fields.tucss = css; 2403 context_desc->upper_setup.tcp_fields.tucss = css;
1950 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; 2404 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
@@ -1952,8 +2406,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1952 context_desc->tcp_seg_setup.data = 0; 2406 context_desc->tcp_seg_setup.data = 0;
1953 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2407 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1954 2408
1955 if(unlikely(++i == adapter->tx_ring.count)) i = 0; 2409 if (unlikely(++i == tx_ring->count)) i = 0;
1956 adapter->tx_ring.next_to_use = i; 2410 tx_ring->next_to_use = i;
1957 2411
1958 return TRUE; 2412 return TRUE;
1959 } 2413 }
@@ -1965,11 +2419,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1965#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2419#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
1966 2420
1967static inline int 2421static inline int
1968e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb, 2422e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
1969 unsigned int first, unsigned int max_per_txd, 2423 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
1970 unsigned int nr_frags, unsigned int mss) 2424 unsigned int nr_frags, unsigned int mss)
1971{ 2425{
1972 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1973 struct e1000_buffer *buffer_info; 2426 struct e1000_buffer *buffer_info;
1974 unsigned int len = skb->len; 2427 unsigned int len = skb->len;
1975 unsigned int offset = 0, size, count = 0, i; 2428 unsigned int offset = 0, size, count = 0, i;
@@ -2065,9 +2518,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
2065} 2518}
2066 2519
2067static inline void 2520static inline void
2068e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags) 2521e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2522 int tx_flags, int count)
2069{ 2523{
2070 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2071 struct e1000_tx_desc *tx_desc = NULL; 2524 struct e1000_tx_desc *tx_desc = NULL;
2072 struct e1000_buffer *buffer_info; 2525 struct e1000_buffer *buffer_info;
2073 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2526 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -2113,7 +2566,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
2113 wmb(); 2566 wmb();
2114 2567
2115 tx_ring->next_to_use = i; 2568 tx_ring->next_to_use = i;
2116 E1000_WRITE_REG(&adapter->hw, TDT, i); 2569 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
2117} 2570}
2118 2571
2119/** 2572/**
@@ -2206,6 +2659,7 @@ static int
2206e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2659e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2207{ 2660{
2208 struct e1000_adapter *adapter = netdev_priv(netdev); 2661 struct e1000_adapter *adapter = netdev_priv(netdev);
2662 struct e1000_tx_ring *tx_ring;
2209 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2663 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2210 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2664 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2211 unsigned int tx_flags = 0; 2665 unsigned int tx_flags = 0;
@@ -2218,7 +2672,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2218 unsigned int f; 2672 unsigned int f;
2219 len -= skb->data_len; 2673 len -= skb->data_len;
2220 2674
2221 if(unlikely(skb->len <= 0)) { 2675#ifdef CONFIG_E1000_MQ
2676 tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2677#else
2678 tx_ring = adapter->tx_ring;
2679#endif
2680
2681 if (unlikely(skb->len <= 0)) {
2222 dev_kfree_skb_any(skb); 2682 dev_kfree_skb_any(skb);
2223 return NETDEV_TX_OK; 2683 return NETDEV_TX_OK;
2224 } 2684 }
@@ -2262,21 +2722,42 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2262 if(adapter->pcix_82544) 2722 if(adapter->pcix_82544)
2263 count += nr_frags; 2723 count += nr_frags;
2264 2724
2265 local_irq_save(flags); 2725#ifdef NETIF_F_TSO
2266 if (!spin_trylock(&adapter->tx_lock)) { 2726 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2267 /* Collision - tell upper layer to requeue */ 2727 * points to just header, pull a few bytes of payload from
2268 local_irq_restore(flags); 2728 * frags into skb->data */
2269 return NETDEV_TX_LOCKED; 2729 if (skb_shinfo(skb)->tso_size) {
2270 } 2730 uint8_t hdr_len;
2731 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2732 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
2733 (adapter->hw.mac_type == e1000_82571 ||
2734 adapter->hw.mac_type == e1000_82572)) {
2735 unsigned int pull_size;
2736 pull_size = min((unsigned int)4, skb->data_len);
2737 if (!__pskb_pull_tail(skb, pull_size)) {
2738 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2739 dev_kfree_skb_any(skb);
2740 return -EFAULT;
2741 }
2742 }
2743 }
2744#endif
2745
2271 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2746 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2272 e1000_transfer_dhcp_info(adapter, skb); 2747 e1000_transfer_dhcp_info(adapter, skb);
2273 2748
2749 local_irq_save(flags);
2750 if (!spin_trylock(&tx_ring->tx_lock)) {
2751 /* Collision - tell upper layer to requeue */
2752 local_irq_restore(flags);
2753 return NETDEV_TX_LOCKED;
2754 }
2274 2755
2275 /* need: count + 2 desc gap to keep tail from touching 2756 /* need: count + 2 desc gap to keep tail from touching
2276 * head, otherwise try next time */ 2757 * head, otherwise try next time */
2277 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { 2758 if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
2278 netif_stop_queue(netdev); 2759 netif_stop_queue(netdev);
2279 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2760 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2280 return NETDEV_TX_BUSY; 2761 return NETDEV_TX_BUSY;
2281 } 2762 }
2282 2763
@@ -2284,7 +2765,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2284 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2765 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2285 netif_stop_queue(netdev); 2766 netif_stop_queue(netdev);
2286 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2767 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2287 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2768 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2288 return NETDEV_TX_BUSY; 2769 return NETDEV_TX_BUSY;
2289 } 2770 }
2290 } 2771 }
@@ -2294,37 +2775,37 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2294 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2775 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2295 } 2776 }
2296 2777
2297 first = adapter->tx_ring.next_to_use; 2778 first = tx_ring->next_to_use;
2298 2779
2299 tso = e1000_tso(adapter, skb); 2780 tso = e1000_tso(adapter, tx_ring, skb);
2300 if (tso < 0) { 2781 if (tso < 0) {
2301 dev_kfree_skb_any(skb); 2782 dev_kfree_skb_any(skb);
2302 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2783 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2303 return NETDEV_TX_OK; 2784 return NETDEV_TX_OK;
2304 } 2785 }
2305 2786
2306 if (likely(tso)) 2787 if (likely(tso))
2307 tx_flags |= E1000_TX_FLAGS_TSO; 2788 tx_flags |= E1000_TX_FLAGS_TSO;
2308 else if(likely(e1000_tx_csum(adapter, skb))) 2789 else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
2309 tx_flags |= E1000_TX_FLAGS_CSUM; 2790 tx_flags |= E1000_TX_FLAGS_CSUM;
2310 2791
2311 /* Old method was to assume IPv4 packet by default if TSO was enabled. 2792 /* Old method was to assume IPv4 packet by default if TSO was enabled.
2312 * 82573 hardware supports TSO capabilities for IPv6 as well... 2793 * 82571 hardware supports TSO capabilities for IPv6 as well...
2313 * no longer assume, we must. */ 2794 * no longer assume, we must. */
2314 if(likely(skb->protocol == ntohs(ETH_P_IP))) 2795 if (likely(skb->protocol == ntohs(ETH_P_IP)))
2315 tx_flags |= E1000_TX_FLAGS_IPV4; 2796 tx_flags |= E1000_TX_FLAGS_IPV4;
2316 2797
2317 e1000_tx_queue(adapter, 2798 e1000_tx_queue(adapter, tx_ring, tx_flags,
2318 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 2799 e1000_tx_map(adapter, tx_ring, skb, first,
2319 tx_flags); 2800 max_per_txd, nr_frags, mss));
2320 2801
2321 netdev->trans_start = jiffies; 2802 netdev->trans_start = jiffies;
2322 2803
2323 /* Make sure there is space in the ring for the next send. */ 2804 /* Make sure there is space in the ring for the next send. */
2324 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) 2805 if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
2325 netif_stop_queue(netdev); 2806 netif_stop_queue(netdev);
2326 2807
2327 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2808 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2328 return NETDEV_TX_OK; 2809 return NETDEV_TX_OK;
2329} 2810}
2330 2811
@@ -2388,9 +2869,18 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2388 return -EINVAL; 2869 return -EINVAL;
2389 } 2870 }
2390 2871
2391#define MAX_STD_JUMBO_FRAME_SIZE 9216 2872#define MAX_STD_JUMBO_FRAME_SIZE 9234
2392 /* might want this to be bigger enum check... */ 2873 /* might want this to be bigger enum check... */
2393 if (adapter->hw.mac_type == e1000_82573 && 2874 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2875 if ((adapter->hw.mac_type == e1000_82571 ||
2876 adapter->hw.mac_type == e1000_82572) &&
2877 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2878 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2879 "on 82571 and 82572 controllers.\n");
2880 return -EINVAL;
2881 }
2882
2883 if(adapter->hw.mac_type == e1000_82573 &&
2394 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 2884 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2395 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 2885 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2396 "on 82573\n"); 2886 "on 82573\n");
@@ -2578,6 +3068,29 @@ e1000_update_stats(struct e1000_adapter *adapter)
2578 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3068 spin_unlock_irqrestore(&adapter->stats_lock, flags);
2579} 3069}
2580 3070
3071#ifdef CONFIG_E1000_MQ
3072void
3073e1000_rx_schedule(void *data)
3074{
3075 struct net_device *poll_dev, *netdev = data;
3076 struct e1000_adapter *adapter = netdev->priv;
3077 int this_cpu = get_cpu();
3078
3079 poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
3080 if (poll_dev == NULL) {
3081 put_cpu();
3082 return;
3083 }
3084
3085 if (likely(netif_rx_schedule_prep(poll_dev)))
3086 __netif_rx_schedule(poll_dev);
3087 else
3088 e1000_irq_enable(adapter);
3089
3090 put_cpu();
3091}
3092#endif
3093
2581/** 3094/**
2582 * e1000_intr - Interrupt Handler 3095 * e1000_intr - Interrupt Handler
2583 * @irq: interrupt number 3096 * @irq: interrupt number
@@ -2592,8 +3105,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2592 struct e1000_adapter *adapter = netdev_priv(netdev); 3105 struct e1000_adapter *adapter = netdev_priv(netdev);
2593 struct e1000_hw *hw = &adapter->hw; 3106 struct e1000_hw *hw = &adapter->hw;
2594 uint32_t icr = E1000_READ_REG(hw, ICR); 3107 uint32_t icr = E1000_READ_REG(hw, ICR);
2595#ifndef CONFIG_E1000_NAPI 3108#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
2596 unsigned int i; 3109 int i;
2597#endif 3110#endif
2598 3111
2599 if(unlikely(!icr)) 3112 if(unlikely(!icr))
@@ -2605,17 +3118,31 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2605 } 3118 }
2606 3119
2607#ifdef CONFIG_E1000_NAPI 3120#ifdef CONFIG_E1000_NAPI
2608 if(likely(netif_rx_schedule_prep(netdev))) { 3121 atomic_inc(&adapter->irq_sem);
2609 3122 E1000_WRITE_REG(hw, IMC, ~0);
2610 /* Disable interrupts and register for poll. The flush 3123 E1000_WRITE_FLUSH(hw);
2611 of the posted write is intentionally left out. 3124#ifdef CONFIG_E1000_MQ
2612 */ 3125 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
2613 3126 cpu_set(adapter->cpu_for_queue[0],
2614 atomic_inc(&adapter->irq_sem); 3127 adapter->rx_sched_call_data.cpumask);
2615 E1000_WRITE_REG(hw, IMC, ~0); 3128 for (i = 1; i < adapter->num_queues; i++) {
2616 __netif_rx_schedule(netdev); 3129 cpu_set(adapter->cpu_for_queue[i],
3130 adapter->rx_sched_call_data.cpumask);
3131 atomic_inc(&adapter->irq_sem);
3132 }
3133 atomic_set(&adapter->rx_sched_call_data.count, i);
3134 smp_call_async_mask(&adapter->rx_sched_call_data);
3135 } else {
3136 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
2617 } 3137 }
2618#else 3138#else /* if !CONFIG_E1000_MQ */
3139 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
3140 __netif_rx_schedule(&adapter->polling_netdev[0]);
3141 else
3142 e1000_irq_enable(adapter);
3143#endif /* CONFIG_E1000_MQ */
3144
3145#else /* if !CONFIG_E1000_NAPI */
2619 /* Writing IMC and IMS is needed for 82547. 3146 /* Writing IMC and IMS is needed for 82547.
2620 Due to Hub Link bus being occupied, an interrupt 3147 Due to Hub Link bus being occupied, an interrupt
2621 de-assertion message is not able to be sent. 3148 de-assertion message is not able to be sent.
@@ -2632,13 +3159,14 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2632 } 3159 }
2633 3160
2634 for(i = 0; i < E1000_MAX_INTR; i++) 3161 for(i = 0; i < E1000_MAX_INTR; i++)
2635 if(unlikely(!adapter->clean_rx(adapter) & 3162 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
2636 !e1000_clean_tx_irq(adapter))) 3163 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
2637 break; 3164 break;
2638 3165
2639 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3166 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2640 e1000_irq_enable(adapter); 3167 e1000_irq_enable(adapter);
2641#endif 3168
3169#endif /* CONFIG_E1000_NAPI */
2642 3170
2643 return IRQ_HANDLED; 3171 return IRQ_HANDLED;
2644} 3172}
@@ -2650,22 +3178,37 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2650 **/ 3178 **/
2651 3179
2652static int 3180static int
2653e1000_clean(struct net_device *netdev, int *budget) 3181e1000_clean(struct net_device *poll_dev, int *budget)
2654{ 3182{
2655 struct e1000_adapter *adapter = netdev_priv(netdev); 3183 struct e1000_adapter *adapter;
2656 int work_to_do = min(*budget, netdev->quota); 3184 int work_to_do = min(*budget, poll_dev->quota);
2657 int tx_cleaned; 3185 int tx_cleaned, i = 0, work_done = 0;
2658 int work_done = 0;
2659 3186
2660 tx_cleaned = e1000_clean_tx_irq(adapter); 3187 /* Must NOT use netdev_priv macro here. */
2661 adapter->clean_rx(adapter, &work_done, work_to_do); 3188 adapter = poll_dev->priv;
3189
3190 /* Keep link state information with original netdev */
3191 if (!netif_carrier_ok(adapter->netdev))
3192 goto quit_polling;
3193
3194 while (poll_dev != &adapter->polling_netdev[i]) {
3195 i++;
3196 if (unlikely(i == adapter->num_queues))
3197 BUG();
3198 }
3199
3200 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3201 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3202 &work_done, work_to_do);
2662 3203
2663 *budget -= work_done; 3204 *budget -= work_done;
2664 netdev->quota -= work_done; 3205 poll_dev->quota -= work_done;
2665 3206
2666 if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
2667 /* If no Tx and not enough Rx work done, exit the polling mode */ 3207 /* If no Tx and not enough Rx work done, exit the polling mode */
2668 netif_rx_complete(netdev); 3208 if((!tx_cleaned && (work_done == 0)) ||
3209 !netif_running(adapter->netdev)) {
3210quit_polling:
3211 netif_rx_complete(poll_dev);
2669 e1000_irq_enable(adapter); 3212 e1000_irq_enable(adapter);
2670 return 0; 3213 return 0;
2671 } 3214 }
@@ -2680,9 +3223,9 @@ e1000_clean(struct net_device *netdev, int *budget)
2680 **/ 3223 **/
2681 3224
2682static boolean_t 3225static boolean_t
2683e1000_clean_tx_irq(struct e1000_adapter *adapter) 3226e1000_clean_tx_irq(struct e1000_adapter *adapter,
3227 struct e1000_tx_ring *tx_ring)
2684{ 3228{
2685 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2686 struct net_device *netdev = adapter->netdev; 3229 struct net_device *netdev = adapter->netdev;
2687 struct e1000_tx_desc *tx_desc, *eop_desc; 3230 struct e1000_tx_desc *tx_desc, *eop_desc;
2688 struct e1000_buffer *buffer_info; 3231 struct e1000_buffer *buffer_info;
@@ -2693,12 +3236,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2693 eop = tx_ring->buffer_info[i].next_to_watch; 3236 eop = tx_ring->buffer_info[i].next_to_watch;
2694 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3237 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2695 3238
2696 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3239 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2697 /* Premature writeback of Tx descriptors clear (free buffers 3240 /* Premature writeback of Tx descriptors clear (free buffers
2698 * and unmap pci_mapping) previous_buffer_info */ 3241 * and unmap pci_mapping) previous_buffer_info */
2699 if (likely(adapter->previous_buffer_info.skb != NULL)) { 3242 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
2700 e1000_unmap_and_free_tx_resource(adapter, 3243 e1000_unmap_and_free_tx_resource(adapter,
2701 &adapter->previous_buffer_info); 3244 &tx_ring->previous_buffer_info);
2702 } 3245 }
2703 3246
2704 for(cleaned = FALSE; !cleaned; ) { 3247 for(cleaned = FALSE; !cleaned; ) {
@@ -2714,7 +3257,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2714#ifdef NETIF_F_TSO 3257#ifdef NETIF_F_TSO
2715 } else { 3258 } else {
2716 if (cleaned) { 3259 if (cleaned) {
2717 memcpy(&adapter->previous_buffer_info, 3260 memcpy(&tx_ring->previous_buffer_info,
2718 buffer_info, 3261 buffer_info,
2719 sizeof(struct e1000_buffer)); 3262 sizeof(struct e1000_buffer));
2720 memset(buffer_info, 0, 3263 memset(buffer_info, 0,
@@ -2732,6 +3275,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2732 3275
2733 if(unlikely(++i == tx_ring->count)) i = 0; 3276 if(unlikely(++i == tx_ring->count)) i = 0;
2734 } 3277 }
3278
3279 tx_ring->pkt++;
2735 3280
2736 eop = tx_ring->buffer_info[i].next_to_watch; 3281 eop = tx_ring->buffer_info[i].next_to_watch;
2737 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3282 eop_desc = E1000_TX_DESC(*tx_ring, eop);
@@ -2739,15 +3284,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2739 3284
2740 tx_ring->next_to_clean = i; 3285 tx_ring->next_to_clean = i;
2741 3286
2742 spin_lock(&adapter->tx_lock); 3287 spin_lock(&tx_ring->tx_lock);
2743 3288
2744 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3289 if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2745 netif_carrier_ok(netdev))) 3290 netif_carrier_ok(netdev)))
2746 netif_wake_queue(netdev); 3291 netif_wake_queue(netdev);
2747 3292
2748 spin_unlock(&adapter->tx_lock); 3293 spin_unlock(&tx_ring->tx_lock);
2749 if(adapter->detect_tx_hung) {
2750 3294
3295 if (adapter->detect_tx_hung) {
2751 /* Detect a transmit hang in hardware, this serializes the 3296 /* Detect a transmit hang in hardware, this serializes the
2752 * check with the clearing of time_stamp and movement of i */ 3297 * check with the clearing of time_stamp and movement of i */
2753 adapter->detect_tx_hung = FALSE; 3298 adapter->detect_tx_hung = FALSE;
@@ -2771,8 +3316,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2771 " next_to_watch <%x>\n" 3316 " next_to_watch <%x>\n"
2772 " jiffies <%lx>\n" 3317 " jiffies <%lx>\n"
2773 " next_to_watch.status <%x>\n", 3318 " next_to_watch.status <%x>\n",
2774 E1000_READ_REG(&adapter->hw, TDH), 3319 readl(adapter->hw.hw_addr + tx_ring->tdh),
2775 E1000_READ_REG(&adapter->hw, TDT), 3320 readl(adapter->hw.hw_addr + tx_ring->tdt),
2776 tx_ring->next_to_use, 3321 tx_ring->next_to_use,
2777 i, 3322 i,
2778 (unsigned long long)tx_ring->buffer_info[i].dma, 3323 (unsigned long long)tx_ring->buffer_info[i].dma,
@@ -2784,12 +3329,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2784 } 3329 }
2785 } 3330 }
2786#ifdef NETIF_F_TSO 3331#ifdef NETIF_F_TSO
2787 3332 if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
2788 if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3333 time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
2789 time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
2790 e1000_unmap_and_free_tx_resource( 3334 e1000_unmap_and_free_tx_resource(
2791 adapter, &adapter->previous_buffer_info); 3335 adapter, &tx_ring->previous_buffer_info);
2792
2793#endif 3336#endif
2794 return cleaned; 3337 return cleaned;
2795} 3338}
@@ -2852,13 +3395,14 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
2852 3395
2853static boolean_t 3396static boolean_t
2854#ifdef CONFIG_E1000_NAPI 3397#ifdef CONFIG_E1000_NAPI
2855e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, 3398e1000_clean_rx_irq(struct e1000_adapter *adapter,
2856 int work_to_do) 3399 struct e1000_rx_ring *rx_ring,
3400 int *work_done, int work_to_do)
2857#else 3401#else
2858e1000_clean_rx_irq(struct e1000_adapter *adapter) 3402e1000_clean_rx_irq(struct e1000_adapter *adapter,
3403 struct e1000_rx_ring *rx_ring)
2859#endif 3404#endif
2860{ 3405{
2861 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2862 struct net_device *netdev = adapter->netdev; 3406 struct net_device *netdev = adapter->netdev;
2863 struct pci_dev *pdev = adapter->pdev; 3407 struct pci_dev *pdev = adapter->pdev;
2864 struct e1000_rx_desc *rx_desc; 3408 struct e1000_rx_desc *rx_desc;
@@ -2944,6 +3488,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
2944 } 3488 }
2945#endif /* CONFIG_E1000_NAPI */ 3489#endif /* CONFIG_E1000_NAPI */
2946 netdev->last_rx = jiffies; 3490 netdev->last_rx = jiffies;
3491 rx_ring->pkt++;
2947 3492
2948next_desc: 3493next_desc:
2949 rx_desc->status = 0; 3494 rx_desc->status = 0;
@@ -2953,7 +3498,7 @@ next_desc:
2953 rx_desc = E1000_RX_DESC(*rx_ring, i); 3498 rx_desc = E1000_RX_DESC(*rx_ring, i);
2954 } 3499 }
2955 rx_ring->next_to_clean = i; 3500 rx_ring->next_to_clean = i;
2956 adapter->alloc_rx_buf(adapter); 3501 adapter->alloc_rx_buf(adapter, rx_ring);
2957 3502
2958 return cleaned; 3503 return cleaned;
2959} 3504}
@@ -2965,13 +3510,14 @@ next_desc:
2965 3510
2966static boolean_t 3511static boolean_t
2967#ifdef CONFIG_E1000_NAPI 3512#ifdef CONFIG_E1000_NAPI
2968e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, 3513e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
2969 int work_to_do) 3514 struct e1000_rx_ring *rx_ring,
3515 int *work_done, int work_to_do)
2970#else 3516#else
2971e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) 3517e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3518 struct e1000_rx_ring *rx_ring)
2972#endif 3519#endif
2973{ 3520{
2974 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2975 union e1000_rx_desc_packet_split *rx_desc; 3521 union e1000_rx_desc_packet_split *rx_desc;
2976 struct net_device *netdev = adapter->netdev; 3522 struct net_device *netdev = adapter->netdev;
2977 struct pci_dev *pdev = adapter->pdev; 3523 struct pci_dev *pdev = adapter->pdev;
@@ -3027,7 +3573,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3027 /* Good Receive */ 3573 /* Good Receive */
3028 skb_put(skb, length); 3574 skb_put(skb, length);
3029 3575
3030 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3576 for(j = 0; j < adapter->rx_ps_pages; j++) {
3031 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3577 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3032 break; 3578 break;
3033 3579
@@ -3048,11 +3594,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3048 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3594 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3049 skb->protocol = eth_type_trans(skb, netdev); 3595 skb->protocol = eth_type_trans(skb, netdev);
3050 3596
3051#ifdef HAVE_RX_ZERO_COPY
3052 if(likely(rx_desc->wb.upper.header_status & 3597 if(likely(rx_desc->wb.upper.header_status &
3053 E1000_RXDPS_HDRSTAT_HDRSP)) 3598 E1000_RXDPS_HDRSTAT_HDRSP)) {
3599 adapter->rx_hdr_split++;
3600#ifdef HAVE_RX_ZERO_COPY
3054 skb_shinfo(skb)->zero_copy = TRUE; 3601 skb_shinfo(skb)->zero_copy = TRUE;
3055#endif 3602#endif
3603 }
3056#ifdef CONFIG_E1000_NAPI 3604#ifdef CONFIG_E1000_NAPI
3057 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3605 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3058 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3606 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
@@ -3071,6 +3619,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3071 } 3619 }
3072#endif /* CONFIG_E1000_NAPI */ 3620#endif /* CONFIG_E1000_NAPI */
3073 netdev->last_rx = jiffies; 3621 netdev->last_rx = jiffies;
3622 rx_ring->pkt++;
3074 3623
3075next_desc: 3624next_desc:
3076 rx_desc->wb.middle.status_error &= ~0xFF; 3625 rx_desc->wb.middle.status_error &= ~0xFF;
@@ -3081,7 +3630,7 @@ next_desc:
3081 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3630 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3082 } 3631 }
3083 rx_ring->next_to_clean = i; 3632 rx_ring->next_to_clean = i;
3084 adapter->alloc_rx_buf(adapter); 3633 adapter->alloc_rx_buf(adapter, rx_ring);
3085 3634
3086 return cleaned; 3635 return cleaned;
3087} 3636}
@@ -3092,9 +3641,9 @@ next_desc:
3092 **/ 3641 **/
3093 3642
3094static void 3643static void
3095e1000_alloc_rx_buffers(struct e1000_adapter *adapter) 3644e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3645 struct e1000_rx_ring *rx_ring)
3096{ 3646{
3097 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3098 struct net_device *netdev = adapter->netdev; 3647 struct net_device *netdev = adapter->netdev;
3099 struct pci_dev *pdev = adapter->pdev; 3648 struct pci_dev *pdev = adapter->pdev;
3100 struct e1000_rx_desc *rx_desc; 3649 struct e1000_rx_desc *rx_desc;
@@ -3178,7 +3727,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3178 * applicable for weak-ordered memory model archs, 3727 * applicable for weak-ordered memory model archs,
3179 * such as IA-64). */ 3728 * such as IA-64). */
3180 wmb(); 3729 wmb();
3181 E1000_WRITE_REG(&adapter->hw, RDT, i); 3730 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3182 } 3731 }
3183 3732
3184 if(unlikely(++i == rx_ring->count)) i = 0; 3733 if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3194,9 +3743,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3194 **/ 3743 **/
3195 3744
3196static void 3745static void
3197e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) 3746e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3747 struct e1000_rx_ring *rx_ring)
3198{ 3748{
3199 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3200 struct net_device *netdev = adapter->netdev; 3749 struct net_device *netdev = adapter->netdev;
3201 struct pci_dev *pdev = adapter->pdev; 3750 struct pci_dev *pdev = adapter->pdev;
3202 union e1000_rx_desc_packet_split *rx_desc; 3751 union e1000_rx_desc_packet_split *rx_desc;
@@ -3215,22 +3764,26 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3215 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3764 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3216 3765
3217 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3766 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3218 if(unlikely(!ps_page->ps_page[j])) { 3767 if (j < adapter->rx_ps_pages) {
3219 ps_page->ps_page[j] = 3768 if (likely(!ps_page->ps_page[j])) {
3220 alloc_page(GFP_ATOMIC); 3769 ps_page->ps_page[j] =
3221 if(unlikely(!ps_page->ps_page[j])) 3770 alloc_page(GFP_ATOMIC);
3222 goto no_buffers; 3771 if (unlikely(!ps_page->ps_page[j]))
3223 ps_page_dma->ps_page_dma[j] = 3772 goto no_buffers;
3224 pci_map_page(pdev, 3773 ps_page_dma->ps_page_dma[j] =
3225 ps_page->ps_page[j], 3774 pci_map_page(pdev,
3226 0, PAGE_SIZE, 3775 ps_page->ps_page[j],
3227 PCI_DMA_FROMDEVICE); 3776 0, PAGE_SIZE,
3228 } 3777 PCI_DMA_FROMDEVICE);
3229 /* Refresh the desc even if buffer_addrs didn't 3778 }
3230 * change because each write-back erases this info. 3779 /* Refresh the desc even if buffer_addrs didn't
3231 */ 3780 * change because each write-back erases
3232 rx_desc->read.buffer_addr[j+1] = 3781 * this info.
3233 cpu_to_le64(ps_page_dma->ps_page_dma[j]); 3782 */
3783 rx_desc->read.buffer_addr[j+1] =
3784 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3785 } else
3786 rx_desc->read.buffer_addr[j+1] = ~0;
3234 } 3787 }
3235 3788
3236 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 3789 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
@@ -3264,7 +3817,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3264 * descriptors are 32 bytes...so we increment tail 3817 * descriptors are 32 bytes...so we increment tail
3265 * twice as much. 3818 * twice as much.
3266 */ 3819 */
3267 E1000_WRITE_REG(&adapter->hw, RDT, i<<1); 3820 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
3268 } 3821 }
3269 3822
3270 if(unlikely(++i == rx_ring->count)) i = 0; 3823 if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3715,6 +4268,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3715 } 4268 }
3716 4269
3717 switch(adapter->hw.mac_type) { 4270 switch(adapter->hw.mac_type) {
4271 case e1000_82571:
4272 case e1000_82572:
4273 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4274 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4275 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4276 break;
3718 case e1000_82573: 4277 case e1000_82573:
3719 swsm = E1000_READ_REG(&adapter->hw, SWSM); 4278 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3720 E1000_WRITE_REG(&adapter->hw, SWSM, 4279 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3737,6 +4296,7 @@ e1000_resume(struct pci_dev *pdev)
3737 struct net_device *netdev = pci_get_drvdata(pdev); 4296 struct net_device *netdev = pci_get_drvdata(pdev);
3738 struct e1000_adapter *adapter = netdev_priv(netdev); 4297 struct e1000_adapter *adapter = netdev_priv(netdev);
3739 uint32_t manc, ret_val, swsm; 4298 uint32_t manc, ret_val, swsm;
4299 uint32_t ctrl_ext;
3740 4300
3741 pci_set_power_state(pdev, PCI_D0); 4301 pci_set_power_state(pdev, PCI_D0);
3742 pci_restore_state(pdev); 4302 pci_restore_state(pdev);
@@ -3762,6 +4322,12 @@ e1000_resume(struct pci_dev *pdev)
3762 } 4322 }
3763 4323
3764 switch(adapter->hw.mac_type) { 4324 switch(adapter->hw.mac_type) {
4325 case e1000_82571:
4326 case e1000_82572:
4327 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4328 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4329 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4330 break;
3765 case e1000_82573: 4331 case e1000_82573:
3766 swsm = E1000_READ_REG(&adapter->hw, SWSM); 4332 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3767 E1000_WRITE_REG(&adapter->hw, SWSM, 4333 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3786,7 +4352,7 @@ e1000_netpoll(struct net_device *netdev)
3786 struct e1000_adapter *adapter = netdev_priv(netdev); 4352 struct e1000_adapter *adapter = netdev_priv(netdev);
3787 disable_irq(adapter->pdev->irq); 4353 disable_irq(adapter->pdev->irq);
3788 e1000_intr(adapter->pdev->irq, netdev, NULL); 4354 e1000_intr(adapter->pdev->irq, netdev, NULL);
3789 e1000_clean_tx_irq(adapter); 4355 e1000_clean_tx_irq(adapter, adapter->tx_ring);
3790 enable_irq(adapter->pdev->irq); 4356 enable_irq(adapter->pdev->irq);
3791} 4357}
3792#endif 4358#endif