diff options
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 609 |
1 files changed, 264 insertions, 345 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 84dcca3776ee..f39de16e6b97 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -29,6 +29,23 @@ | |||
29 | #include "e1000.h" | 29 | #include "e1000.h" |
30 | 30 | ||
31 | /* Change Log | 31 | /* Change Log |
32 | * 7.0.33 3-Feb-2006 | ||
33 | * o Added another fix for the pass false carrier bit | ||
34 | * 7.0.32 24-Jan-2006 | ||
35 | * o Need to rebuild with noew version number for the pass false carrier | ||
36 | * fix in e1000_hw.c | ||
37 | * 7.0.30 18-Jan-2006 | ||
38 | * o fixup for tso workaround to disable it for pci-x | ||
39 | * o fix mem leak on 82542 | ||
40 | * o fixes for 10 Mb/s connections and incorrect stats | ||
41 | * 7.0.28 01/06/2006 | ||
42 | * o hardware workaround to only set "speed mode" bit for 1G link. | ||
43 | * 7.0.26 12/23/2005 | ||
44 | * o wake on lan support modified for device ID 10B5 | ||
45 | * o fix dhcp + vlan issue not making it to the iAMT firmware | ||
46 | * 7.0.24 12/9/2005 | ||
47 | * o New hardware support for the Gigabit NIC embedded in the south bridge | ||
48 | * o Fixes to the recycling logic (skb->tail) from IBM LTC | ||
32 | * 6.3.9 12/16/2005 | 49 | * 6.3.9 12/16/2005 |
33 | * o incorporate fix for recycled skbs from IBM LTC | 50 | * o incorporate fix for recycled skbs from IBM LTC |
34 | * 6.3.7 11/18/2005 | 51 | * 6.3.7 11/18/2005 |
@@ -46,54 +63,8 @@ | |||
46 | * rx_buffer_len | 63 | * rx_buffer_len |
47 | * 6.3.1 9/19/05 | 64 | * 6.3.1 9/19/05 |
48 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic | 65 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic |
49 | (e1000_clean_tx_irq) | 66 | * (e1000_clean_tx_irq) |
50 | * o Support for 8086:10B5 device (Quad Port) | 67 | * o Support for 8086:10B5 device (Quad Port) |
51 | * 6.2.14 9/15/05 | ||
52 | * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface | ||
53 | * open/close | ||
54 | * 6.2.13 9/14/05 | ||
55 | * o Invoke e1000_check_mng_mode only for 8257x controllers since it | ||
56 | * accesses the FWSM that is not supported in other controllers | ||
57 | * 6.2.12 9/9/05 | ||
58 | * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER | ||
59 | * o set RCTL:SECRC only for controllers newer than 82543. | ||
60 | * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w. | ||
61 | * This code was moved from e1000_remove to e1000_close | ||
62 | * 6.2.10 9/6/05 | ||
63 | * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off. | ||
64 | * o Enable fc by default on 82573 controllers (do not read eeprom) | ||
65 | * o Fix rx_errors statistic not to include missed_packet_count | ||
66 | * o Fix rx_dropped statistic not to include missed_packet_count | ||
67 | (Padraig Brady) | ||
68 | * 6.2.9 8/30/05 | ||
69 | * o Remove call to update statistics from the controller ib e1000_get_stats | ||
70 | * 6.2.8 8/30/05 | ||
71 | * o Improved algorithm for rx buffer allocation/rdt update | ||
72 | * o Flow control watermarks relative to rx PBA size | ||
73 | * o Simplified 'Tx Hung' detect logic | ||
74 | * 6.2.7 8/17/05 | ||
75 | * o Report rx buffer allocation failures and tx timeout counts in stats | ||
76 | * 6.2.6 8/16/05 | ||
77 | * o Implement workaround for controller erratum -- linear non-tso packet | ||
78 | * following a TSO gets written back prematurely | ||
79 | * 6.2.5 8/15/05 | ||
80 | * o Set netdev->tx_queue_len based on link speed/duplex settings. | ||
81 | * o Fix net_stats.rx_fifo_errors <p@draigBrady.com> | ||
82 | * o Do not power off PHY if SoL/IDER session is active | ||
83 | * 6.2.4 8/10/05 | ||
84 | * o Fix loopback test setup/cleanup for 82571/3 controllers | ||
85 | * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat | ||
86 | * all packets as raw | ||
87 | * o Prevent operations that will cause the PHY to be reset if SoL/IDER | ||
88 | * sessions are active and log a message | ||
89 | * 6.2.2 7/21/05 | ||
90 | * o used fixed size descriptors for all MTU sizes, reduces memory load | ||
91 | * 6.1.2 4/13/05 | ||
92 | * o Fixed ethtool diagnostics | ||
93 | * o Enabled flow control to take default eeprom settings | ||
94 | * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent | ||
95 | * calls, one from mii_ioctl and other from within update_stats while | ||
96 | * processing MIIREG ioctl. | ||
97 | */ | 68 | */ |
98 | 69 | ||
99 | char e1000_driver_name[] = "e1000"; | 70 | char e1000_driver_name[] = "e1000"; |
@@ -103,7 +74,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
103 | #else | 74 | #else |
104 | #define DRIVERNAPI "-NAPI" | 75 | #define DRIVERNAPI "-NAPI" |
105 | #endif | 76 | #endif |
106 | #define DRV_VERSION "6.3.9-k4"DRIVERNAPI | 77 | #define DRV_VERSION "7.0.33-k2"DRIVERNAPI |
107 | char e1000_driver_version[] = DRV_VERSION; | 78 | char e1000_driver_version[] = DRV_VERSION; |
108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 79 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
109 | 80 | ||
@@ -157,32 +128,26 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
157 | INTEL_E1000_ETHERNET_DEVICE(0x108A), | 128 | INTEL_E1000_ETHERNET_DEVICE(0x108A), |
158 | INTEL_E1000_ETHERNET_DEVICE(0x108B), | 129 | INTEL_E1000_ETHERNET_DEVICE(0x108B), |
159 | INTEL_E1000_ETHERNET_DEVICE(0x108C), | 130 | INTEL_E1000_ETHERNET_DEVICE(0x108C), |
131 | INTEL_E1000_ETHERNET_DEVICE(0x1096), | ||
132 | INTEL_E1000_ETHERNET_DEVICE(0x1098), | ||
160 | INTEL_E1000_ETHERNET_DEVICE(0x1099), | 133 | INTEL_E1000_ETHERNET_DEVICE(0x1099), |
161 | INTEL_E1000_ETHERNET_DEVICE(0x109A), | 134 | INTEL_E1000_ETHERNET_DEVICE(0x109A), |
162 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | 135 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
136 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), | ||
163 | /* required last entry */ | 137 | /* required last entry */ |
164 | {0,} | 138 | {0,} |
165 | }; | 139 | }; |
166 | 140 | ||
167 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | 141 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
168 | 142 | ||
169 | int e1000_up(struct e1000_adapter *adapter); | ||
170 | void e1000_down(struct e1000_adapter *adapter); | ||
171 | void e1000_reset(struct e1000_adapter *adapter); | ||
172 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); | ||
173 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
174 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
175 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
176 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
177 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | 143 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
178 | struct e1000_tx_ring *txdr); | 144 | struct e1000_tx_ring *txdr); |
179 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | 145 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
180 | struct e1000_rx_ring *rxdr); | 146 | struct e1000_rx_ring *rxdr); |
181 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, | 147 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
182 | struct e1000_tx_ring *tx_ring); | 148 | struct e1000_tx_ring *tx_ring); |
183 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, | 149 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
184 | struct e1000_rx_ring *rx_ring); | 150 | struct e1000_rx_ring *rx_ring); |
185 | void e1000_update_stats(struct e1000_adapter *adapter); | ||
186 | 151 | ||
187 | /* Local Function Prototypes */ | 152 | /* Local Function Prototypes */ |
188 | 153 | ||
@@ -191,9 +156,6 @@ static void e1000_exit_module(void); | |||
191 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | 156 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | static void __devexit e1000_remove(struct pci_dev *pdev); | 157 | static void __devexit e1000_remove(struct pci_dev *pdev); |
193 | static int e1000_alloc_queues(struct e1000_adapter *adapter); | 158 | static int e1000_alloc_queues(struct e1000_adapter *adapter); |
194 | #ifdef CONFIG_E1000_MQ | ||
195 | static void e1000_setup_queue_mapping(struct e1000_adapter *adapter); | ||
196 | #endif | ||
197 | static int e1000_sw_init(struct e1000_adapter *adapter); | 159 | static int e1000_sw_init(struct e1000_adapter *adapter); |
198 | static int e1000_open(struct net_device *netdev); | 160 | static int e1000_open(struct net_device *netdev); |
199 | static int e1000_close(struct net_device *netdev); | 161 | static int e1000_close(struct net_device *netdev); |
@@ -241,11 +203,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
241 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | 203 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
242 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 204 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
243 | int cmd); | 205 | int cmd); |
244 | void e1000_set_ethtool_ops(struct net_device *netdev); | ||
245 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); | 206 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
246 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | 207 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
247 | static void e1000_tx_timeout(struct net_device *dev); | 208 | static void e1000_tx_timeout(struct net_device *dev); |
248 | static void e1000_tx_timeout_task(struct net_device *dev); | 209 | static void e1000_reset_task(struct net_device *dev); |
249 | static void e1000_smartspeed(struct e1000_adapter *adapter); | 210 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
250 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | 211 | static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
251 | struct sk_buff *skb); | 212 | struct sk_buff *skb); |
@@ -265,14 +226,6 @@ static int e1000_resume(struct pci_dev *pdev); | |||
265 | static void e1000_netpoll (struct net_device *netdev); | 226 | static void e1000_netpoll (struct net_device *netdev); |
266 | #endif | 227 | #endif |
267 | 228 | ||
268 | #ifdef CONFIG_E1000_MQ | ||
269 | /* for multiple Rx queues */ | ||
270 | void e1000_rx_schedule(void *data); | ||
271 | #endif | ||
272 | |||
273 | /* Exported from other modules */ | ||
274 | |||
275 | extern void e1000_check_options(struct e1000_adapter *adapter); | ||
276 | 229 | ||
277 | static struct pci_driver e1000_driver = { | 230 | static struct pci_driver e1000_driver = { |
278 | .name = e1000_driver_name, | 231 | .name = e1000_driver_name, |
@@ -380,7 +333,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
380 | (vid != old_vid) && | 333 | (vid != old_vid) && |
381 | !adapter->vlgrp->vlan_devices[old_vid]) | 334 | !adapter->vlgrp->vlan_devices[old_vid]) |
382 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 335 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
383 | } | 336 | } else |
337 | adapter->mng_vlan_id = vid; | ||
384 | } | 338 | } |
385 | } | 339 | } |
386 | 340 | ||
@@ -502,10 +456,6 @@ e1000_up(struct e1000_adapter *adapter) | |||
502 | return err; | 456 | return err; |
503 | } | 457 | } |
504 | 458 | ||
505 | #ifdef CONFIG_E1000_MQ | ||
506 | e1000_setup_queue_mapping(adapter); | ||
507 | #endif | ||
508 | |||
509 | adapter->tx_queue_len = netdev->tx_queue_len; | 459 | adapter->tx_queue_len = netdev->tx_queue_len; |
510 | 460 | ||
511 | mod_timer(&adapter->watchdog_timer, jiffies); | 461 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -526,9 +476,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
526 | e1000_check_mng_mode(&adapter->hw); | 476 | e1000_check_mng_mode(&adapter->hw); |
527 | 477 | ||
528 | e1000_irq_disable(adapter); | 478 | e1000_irq_disable(adapter); |
529 | #ifdef CONFIG_E1000_MQ | 479 | |
530 | while (atomic_read(&adapter->rx_sched_call_data.count) != 0); | ||
531 | #endif | ||
532 | free_irq(adapter->pdev->irq, netdev); | 480 | free_irq(adapter->pdev->irq, netdev); |
533 | #ifdef CONFIG_PCI_MSI | 481 | #ifdef CONFIG_PCI_MSI |
534 | if (adapter->hw.mac_type > e1000_82547_rev_2 && | 482 | if (adapter->hw.mac_type > e1000_82547_rev_2 && |
@@ -587,6 +535,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
587 | break; | 535 | break; |
588 | case e1000_82571: | 536 | case e1000_82571: |
589 | case e1000_82572: | 537 | case e1000_82572: |
538 | case e1000_80003es2lan: | ||
590 | pba = E1000_PBA_38K; | 539 | pba = E1000_PBA_38K; |
591 | break; | 540 | break; |
592 | case e1000_82573: | 541 | case e1000_82573: |
@@ -619,7 +568,10 @@ e1000_reset(struct e1000_adapter *adapter) | |||
619 | 568 | ||
620 | adapter->hw.fc_high_water = fc_high_water_mark; | 569 | adapter->hw.fc_high_water = fc_high_water_mark; |
621 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 570 | adapter->hw.fc_low_water = fc_high_water_mark - 8; |
622 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | 571 | if (adapter->hw.mac_type == e1000_80003es2lan) |
572 | adapter->hw.fc_pause_time = 0xFFFF; | ||
573 | else | ||
574 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | ||
623 | adapter->hw.fc_send_xon = 1; | 575 | adapter->hw.fc_send_xon = 1; |
624 | adapter->hw.fc = adapter->hw.original_fc; | 576 | adapter->hw.fc = adapter->hw.original_fc; |
625 | 577 | ||
@@ -663,6 +615,7 @@ e1000_probe(struct pci_dev *pdev, | |||
663 | unsigned long mmio_start, mmio_len; | 615 | unsigned long mmio_start, mmio_len; |
664 | 616 | ||
665 | static int cards_found = 0; | 617 | static int cards_found = 0; |
618 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ | ||
666 | int i, err, pci_using_dac; | 619 | int i, err, pci_using_dac; |
667 | uint16_t eeprom_data; | 620 | uint16_t eeprom_data; |
668 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; | 621 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; |
@@ -755,6 +708,15 @@ e1000_probe(struct pci_dev *pdev, | |||
755 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) | 708 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
756 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 709 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
757 | 710 | ||
711 | /* if ksp3, indicate if it's port a being setup */ | ||
712 | if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && | ||
713 | e1000_ksp3_port_a == 0) | ||
714 | adapter->ksp3_port_a = 1; | ||
715 | e1000_ksp3_port_a++; | ||
716 | /* Reset for multiple KP3 adapters */ | ||
717 | if (e1000_ksp3_port_a == 4) | ||
718 | e1000_ksp3_port_a = 0; | ||
719 | |||
758 | if (adapter->hw.mac_type >= e1000_82543) { | 720 | if (adapter->hw.mac_type >= e1000_82543) { |
759 | netdev->features = NETIF_F_SG | | 721 | netdev->features = NETIF_F_SG | |
760 | NETIF_F_HW_CSUM | | 722 | NETIF_F_HW_CSUM | |
@@ -826,8 +788,8 @@ e1000_probe(struct pci_dev *pdev, | |||
826 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 788 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
827 | adapter->phy_info_timer.data = (unsigned long) adapter; | 789 | adapter->phy_info_timer.data = (unsigned long) adapter; |
828 | 790 | ||
829 | INIT_WORK(&adapter->tx_timeout_task, | 791 | INIT_WORK(&adapter->reset_task, |
830 | (void (*)(void *))e1000_tx_timeout_task, netdev); | 792 | (void (*)(void *))e1000_reset_task, netdev); |
831 | 793 | ||
832 | /* we're going to reset, so assume we have no link for now */ | 794 | /* we're going to reset, so assume we have no link for now */ |
833 | 795 | ||
@@ -854,6 +816,7 @@ e1000_probe(struct pci_dev *pdev, | |||
854 | case e1000_82546: | 816 | case e1000_82546: |
855 | case e1000_82546_rev_3: | 817 | case e1000_82546_rev_3: |
856 | case e1000_82571: | 818 | case e1000_82571: |
819 | case e1000_80003es2lan: | ||
857 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 820 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ |
858 | e1000_read_eeprom(&adapter->hw, | 821 | e1000_read_eeprom(&adapter->hw, |
859 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 822 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
@@ -972,10 +935,6 @@ e1000_remove(struct pci_dev *pdev) | |||
972 | iounmap(adapter->hw.hw_addr); | 935 | iounmap(adapter->hw.hw_addr); |
973 | pci_release_regions(pdev); | 936 | pci_release_regions(pdev); |
974 | 937 | ||
975 | #ifdef CONFIG_E1000_MQ | ||
976 | free_percpu(adapter->cpu_netdev); | ||
977 | free_percpu(adapter->cpu_tx_ring); | ||
978 | #endif | ||
979 | free_netdev(netdev); | 938 | free_netdev(netdev); |
980 | 939 | ||
981 | pci_disable_device(pdev); | 940 | pci_disable_device(pdev); |
@@ -1056,40 +1015,8 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1056 | hw->master_slave = E1000_MASTER_SLAVE; | 1015 | hw->master_slave = E1000_MASTER_SLAVE; |
1057 | } | 1016 | } |
1058 | 1017 | ||
1059 | #ifdef CONFIG_E1000_MQ | ||
1060 | /* Number of supported queues */ | ||
1061 | switch (hw->mac_type) { | ||
1062 | case e1000_82571: | ||
1063 | case e1000_82572: | ||
1064 | /* These controllers support 2 tx queues, but with a single | ||
1065 | * qdisc implementation, multiple tx queues aren't quite as | ||
1066 | * interesting. If we can find a logical way of mapping | ||
1067 | * flows to a queue, then perhaps we can up the num_tx_queue | ||
1068 | * count back to its default. Until then, we run the risk of | ||
1069 | * terrible performance due to SACK overload. */ | ||
1070 | adapter->num_tx_queues = 1; | ||
1071 | adapter->num_rx_queues = 2; | ||
1072 | break; | ||
1073 | default: | ||
1074 | adapter->num_tx_queues = 1; | ||
1075 | adapter->num_rx_queues = 1; | ||
1076 | break; | ||
1077 | } | ||
1078 | adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); | ||
1079 | adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); | ||
1080 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", | ||
1081 | adapter->num_rx_queues, | ||
1082 | ((adapter->num_rx_queues == 1) | ||
1083 | ? ((num_online_cpus() > 1) | ||
1084 | ? "(due to unsupported feature in current adapter)" | ||
1085 | : "(due to unsupported system configuration)") | ||
1086 | : "")); | ||
1087 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", | ||
1088 | adapter->num_tx_queues); | ||
1089 | #else | ||
1090 | adapter->num_tx_queues = 1; | 1018 | adapter->num_tx_queues = 1; |
1091 | adapter->num_rx_queues = 1; | 1019 | adapter->num_rx_queues = 1; |
1092 | #endif | ||
1093 | 1020 | ||
1094 | if (e1000_alloc_queues(adapter)) { | 1021 | if (e1000_alloc_queues(adapter)) { |
1095 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | 1022 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); |
@@ -1152,51 +1079,9 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1152 | memset(adapter->polling_netdev, 0, size); | 1079 | memset(adapter->polling_netdev, 0, size); |
1153 | #endif | 1080 | #endif |
1154 | 1081 | ||
1155 | #ifdef CONFIG_E1000_MQ | ||
1156 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1157 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1158 | |||
1159 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1160 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1161 | #endif | ||
1162 | |||
1163 | return E1000_SUCCESS; | 1082 | return E1000_SUCCESS; |
1164 | } | 1083 | } |
1165 | 1084 | ||
1166 | #ifdef CONFIG_E1000_MQ | ||
1167 | static void __devinit | ||
1168 | e1000_setup_queue_mapping(struct e1000_adapter *adapter) | ||
1169 | { | ||
1170 | int i, cpu; | ||
1171 | |||
1172 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1173 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1174 | cpus_clear(adapter->rx_sched_call_data.cpumask); | ||
1175 | |||
1176 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1177 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1178 | |||
1179 | lock_cpu_hotplug(); | ||
1180 | i = 0; | ||
1181 | for_each_online_cpu(cpu) { | ||
1182 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; | ||
1183 | /* This is incomplete because we'd like to assign separate | ||
1184 | * physical cpus to these netdev polling structures and | ||
1185 | * avoid saturating a subset of cpus. | ||
1186 | */ | ||
1187 | if (i < adapter->num_rx_queues) { | ||
1188 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; | ||
1189 | adapter->rx_ring[i].cpu = cpu; | ||
1190 | cpu_set(cpu, adapter->cpumask); | ||
1191 | } else | ||
1192 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; | ||
1193 | |||
1194 | i++; | ||
1195 | } | ||
1196 | unlock_cpu_hotplug(); | ||
1197 | } | ||
1198 | #endif | ||
1199 | |||
1200 | /** | 1085 | /** |
1201 | * e1000_open - Called when a network interface is made active | 1086 | * e1000_open - Called when a network interface is made active |
1202 | * @netdev: network interface device structure | 1087 | * @netdev: network interface device structure |
@@ -1435,18 +1320,6 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1435 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1320 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1436 | 1321 | ||
1437 | switch (adapter->num_tx_queues) { | 1322 | switch (adapter->num_tx_queues) { |
1438 | case 2: | ||
1439 | tdba = adapter->tx_ring[1].dma; | ||
1440 | tdlen = adapter->tx_ring[1].count * | ||
1441 | sizeof(struct e1000_tx_desc); | ||
1442 | E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL)); | ||
1443 | E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32)); | ||
1444 | E1000_WRITE_REG(hw, TDLEN1, tdlen); | ||
1445 | E1000_WRITE_REG(hw, TDH1, 0); | ||
1446 | E1000_WRITE_REG(hw, TDT1, 0); | ||
1447 | adapter->tx_ring[1].tdh = E1000_TDH1; | ||
1448 | adapter->tx_ring[1].tdt = E1000_TDT1; | ||
1449 | /* Fall Through */ | ||
1450 | case 1: | 1323 | case 1: |
1451 | default: | 1324 | default: |
1452 | tdba = adapter->tx_ring[0].dma; | 1325 | tdba = adapter->tx_ring[0].dma; |
@@ -1477,6 +1350,10 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1477 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; | 1350 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; |
1478 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; | 1351 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; |
1479 | break; | 1352 | break; |
1353 | case e1000_80003es2lan: | ||
1354 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | ||
1355 | ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; | ||
1356 | break; | ||
1480 | default: | 1357 | default: |
1481 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | 1358 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; |
1482 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; | 1359 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; |
@@ -1497,10 +1374,13 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1497 | tctl = E1000_READ_REG(hw, TCTL); | 1374 | tctl = E1000_READ_REG(hw, TCTL); |
1498 | 1375 | ||
1499 | tctl &= ~E1000_TCTL_CT; | 1376 | tctl &= ~E1000_TCTL_CT; |
1500 | tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC | | 1377 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1501 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1378 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1502 | 1379 | ||
1503 | E1000_WRITE_REG(hw, TCTL, tctl); | 1380 | #ifdef DISABLE_MULR |
1381 | /* disable Multiple Reads for debugging */ | ||
1382 | tctl &= ~E1000_TCTL_MULR; | ||
1383 | #endif | ||
1504 | 1384 | ||
1505 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | 1385 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { |
1506 | tarc = E1000_READ_REG(hw, TARC0); | 1386 | tarc = E1000_READ_REG(hw, TARC0); |
@@ -1513,6 +1393,15 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1513 | else | 1393 | else |
1514 | tarc |= (1 << 28); | 1394 | tarc |= (1 << 28); |
1515 | E1000_WRITE_REG(hw, TARC1, tarc); | 1395 | E1000_WRITE_REG(hw, TARC1, tarc); |
1396 | } else if (hw->mac_type == e1000_80003es2lan) { | ||
1397 | tarc = E1000_READ_REG(hw, TARC0); | ||
1398 | tarc |= 1; | ||
1399 | if (hw->media_type == e1000_media_type_internal_serdes) | ||
1400 | tarc |= (1 << 20); | ||
1401 | E1000_WRITE_REG(hw, TARC0, tarc); | ||
1402 | tarc = E1000_READ_REG(hw, TARC1); | ||
1403 | tarc |= 1; | ||
1404 | E1000_WRITE_REG(hw, TARC1, tarc); | ||
1516 | } | 1405 | } |
1517 | 1406 | ||
1518 | e1000_config_collision_dist(hw); | 1407 | e1000_config_collision_dist(hw); |
@@ -1531,6 +1420,9 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1531 | if (hw->mac_type == e1000_82544 && | 1420 | if (hw->mac_type == e1000_82544 && |
1532 | hw->bus_type == e1000_bus_type_pcix) | 1421 | hw->bus_type == e1000_bus_type_pcix) |
1533 | adapter->pcix_82544 = 1; | 1422 | adapter->pcix_82544 = 1; |
1423 | |||
1424 | E1000_WRITE_REG(hw, TCTL, tctl); | ||
1425 | |||
1534 | } | 1426 | } |
1535 | 1427 | ||
1536 | /** | 1428 | /** |
@@ -1790,12 +1682,9 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1790 | uint64_t rdba; | 1682 | uint64_t rdba; |
1791 | struct e1000_hw *hw = &adapter->hw; | 1683 | struct e1000_hw *hw = &adapter->hw; |
1792 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; | 1684 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; |
1793 | #ifdef CONFIG_E1000_MQ | ||
1794 | uint32_t reta, mrqc; | ||
1795 | int i; | ||
1796 | #endif | ||
1797 | 1685 | ||
1798 | if (adapter->rx_ps_pages) { | 1686 | if (adapter->rx_ps_pages) { |
1687 | /* this is a 32 byte descriptor */ | ||
1799 | rdlen = adapter->rx_ring[0].count * | 1688 | rdlen = adapter->rx_ring[0].count * |
1800 | sizeof(union e1000_rx_desc_packet_split); | 1689 | sizeof(union e1000_rx_desc_packet_split); |
1801 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 1690 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
@@ -1837,18 +1726,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1837 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1726 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1838 | * the Base and Length of the Rx Descriptor Ring */ | 1727 | * the Base and Length of the Rx Descriptor Ring */ |
1839 | switch (adapter->num_rx_queues) { | 1728 | switch (adapter->num_rx_queues) { |
1840 | #ifdef CONFIG_E1000_MQ | ||
1841 | case 2: | ||
1842 | rdba = adapter->rx_ring[1].dma; | ||
1843 | E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); | ||
1844 | E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); | ||
1845 | E1000_WRITE_REG(hw, RDLEN1, rdlen); | ||
1846 | E1000_WRITE_REG(hw, RDH1, 0); | ||
1847 | E1000_WRITE_REG(hw, RDT1, 0); | ||
1848 | adapter->rx_ring[1].rdh = E1000_RDH1; | ||
1849 | adapter->rx_ring[1].rdt = E1000_RDT1; | ||
1850 | /* Fall Through */ | ||
1851 | #endif | ||
1852 | case 1: | 1729 | case 1: |
1853 | default: | 1730 | default: |
1854 | rdba = adapter->rx_ring[0].dma; | 1731 | rdba = adapter->rx_ring[0].dma; |
@@ -1862,46 +1739,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1862 | break; | 1739 | break; |
1863 | } | 1740 | } |
1864 | 1741 | ||
1865 | #ifdef CONFIG_E1000_MQ | ||
1866 | if (adapter->num_rx_queues > 1) { | ||
1867 | uint32_t random[10]; | ||
1868 | |||
1869 | get_random_bytes(&random[0], 40); | ||
1870 | |||
1871 | if (hw->mac_type <= e1000_82572) { | ||
1872 | E1000_WRITE_REG(hw, RSSIR, 0); | ||
1873 | E1000_WRITE_REG(hw, RSSIM, 0); | ||
1874 | } | ||
1875 | |||
1876 | switch (adapter->num_rx_queues) { | ||
1877 | case 2: | ||
1878 | default: | ||
1879 | reta = 0x00800080; | ||
1880 | mrqc = E1000_MRQC_ENABLE_RSS_2Q; | ||
1881 | break; | ||
1882 | } | ||
1883 | |||
1884 | /* Fill out redirection table */ | ||
1885 | for (i = 0; i < 32; i++) | ||
1886 | E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); | ||
1887 | /* Fill out hash function seeds */ | ||
1888 | for (i = 0; i < 10; i++) | ||
1889 | E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); | ||
1890 | |||
1891 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
1892 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
1893 | E1000_WRITE_REG(hw, MRQC, mrqc); | ||
1894 | } | ||
1895 | |||
1896 | /* Multiqueue and packet checksumming are mutually exclusive. */ | ||
1897 | if (hw->mac_type >= e1000_82571) { | ||
1898 | rxcsum = E1000_READ_REG(hw, RXCSUM); | ||
1899 | rxcsum |= E1000_RXCSUM_PCSD; | ||
1900 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | ||
1901 | } | ||
1902 | |||
1903 | #else | ||
1904 | |||
1905 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1742 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1906 | if (hw->mac_type >= e1000_82543) { | 1743 | if (hw->mac_type >= e1000_82543) { |
1907 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 1744 | rxcsum = E1000_READ_REG(hw, RXCSUM); |
@@ -1920,7 +1757,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1920 | } | 1757 | } |
1921 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1758 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1922 | } | 1759 | } |
1923 | #endif /* CONFIG_E1000_MQ */ | ||
1924 | 1760 | ||
1925 | if (hw->mac_type == e1000_82573) | 1761 | if (hw->mac_type == e1000_82573) |
1926 | E1000_WRITE_REG(hw, ERT, 0x0100); | 1762 | E1000_WRITE_REG(hw, ERT, 0x0100); |
@@ -2392,7 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2392 | { | 2228 | { |
2393 | struct net_device *netdev = adapter->netdev; | 2229 | struct net_device *netdev = adapter->netdev; |
2394 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2230 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2395 | uint32_t link; | 2231 | uint32_t link, tctl; |
2396 | 2232 | ||
2397 | e1000_check_for_link(&adapter->hw); | 2233 | e1000_check_for_link(&adapter->hw); |
2398 | if (adapter->hw.mac_type == e1000_82573) { | 2234 | if (adapter->hw.mac_type == e1000_82573) { |
@@ -2418,20 +2254,61 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2418 | adapter->link_duplex == FULL_DUPLEX ? | 2254 | adapter->link_duplex == FULL_DUPLEX ? |
2419 | "Full Duplex" : "Half Duplex"); | 2255 | "Full Duplex" : "Half Duplex"); |
2420 | 2256 | ||
2421 | /* tweak tx_queue_len according to speed/duplex */ | 2257 | /* tweak tx_queue_len according to speed/duplex |
2258 | * and adjust the timeout factor */ | ||
2422 | netdev->tx_queue_len = adapter->tx_queue_len; | 2259 | netdev->tx_queue_len = adapter->tx_queue_len; |
2423 | adapter->tx_timeout_factor = 1; | 2260 | adapter->tx_timeout_factor = 1; |
2424 | if (adapter->link_duplex == HALF_DUPLEX) { | 2261 | adapter->txb2b = 1; |
2262 | switch (adapter->link_speed) { | ||
2263 | case SPEED_10: | ||
2264 | adapter->txb2b = 0; | ||
2265 | netdev->tx_queue_len = 10; | ||
2266 | adapter->tx_timeout_factor = 8; | ||
2267 | break; | ||
2268 | case SPEED_100: | ||
2269 | adapter->txb2b = 0; | ||
2270 | netdev->tx_queue_len = 100; | ||
2271 | /* maybe add some timeout factor ? */ | ||
2272 | break; | ||
2273 | } | ||
2274 | |||
2275 | if ((adapter->hw.mac_type == e1000_82571 || | ||
2276 | adapter->hw.mac_type == e1000_82572) && | ||
2277 | adapter->txb2b == 0) { | ||
2278 | #define SPEED_MODE_BIT (1 << 21) | ||
2279 | uint32_t tarc0; | ||
2280 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | ||
2281 | tarc0 &= ~SPEED_MODE_BIT; | ||
2282 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | ||
2283 | } | ||
2284 | |||
2285 | #ifdef NETIF_F_TSO | ||
2286 | /* disable TSO for pcie and 10/100 speeds, to avoid | ||
2287 | * some hardware issues */ | ||
2288 | if (!adapter->tso_force && | ||
2289 | adapter->hw.bus_type == e1000_bus_type_pci_express){ | ||
2425 | switch (adapter->link_speed) { | 2290 | switch (adapter->link_speed) { |
2426 | case SPEED_10: | 2291 | case SPEED_10: |
2427 | netdev->tx_queue_len = 10; | ||
2428 | adapter->tx_timeout_factor = 8; | ||
2429 | break; | ||
2430 | case SPEED_100: | 2292 | case SPEED_100: |
2431 | netdev->tx_queue_len = 100; | 2293 | DPRINTK(PROBE,INFO, |
2294 | "10/100 speed: disabling TSO\n"); | ||
2295 | netdev->features &= ~NETIF_F_TSO; | ||
2296 | break; | ||
2297 | case SPEED_1000: | ||
2298 | netdev->features |= NETIF_F_TSO; | ||
2299 | break; | ||
2300 | default: | ||
2301 | /* oops */ | ||
2432 | break; | 2302 | break; |
2433 | } | 2303 | } |
2434 | } | 2304 | } |
2305 | #endif | ||
2306 | |||
2307 | /* enable transmits in the hardware, need to do this | ||
2308 | * after setting TARC0 */ | ||
2309 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | ||
2310 | tctl |= E1000_TCTL_EN; | ||
2311 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | ||
2435 | 2312 | ||
2436 | netif_carrier_on(netdev); | 2313 | netif_carrier_on(netdev); |
2437 | netif_wake_queue(netdev); | 2314 | netif_wake_queue(netdev); |
@@ -2446,6 +2323,16 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2446 | netif_carrier_off(netdev); | 2323 | netif_carrier_off(netdev); |
2447 | netif_stop_queue(netdev); | 2324 | netif_stop_queue(netdev); |
2448 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); | 2325 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
2326 | |||
2327 | /* 80003ES2LAN workaround-- | ||
2328 | * For packet buffer work-around on link down event; | ||
2329 | * disable receives in the ISR and | ||
2330 | * reset device here in the watchdog | ||
2331 | */ | ||
2332 | if (adapter->hw.mac_type == e1000_80003es2lan) { | ||
2333 | /* reset device */ | ||
2334 | schedule_work(&adapter->reset_task); | ||
2335 | } | ||
2449 | } | 2336 | } |
2450 | 2337 | ||
2451 | e1000_smartspeed(adapter); | 2338 | e1000_smartspeed(adapter); |
@@ -2465,16 +2352,14 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2465 | 2352 | ||
2466 | e1000_update_adaptive(&adapter->hw); | 2353 | e1000_update_adaptive(&adapter->hw); |
2467 | 2354 | ||
2468 | #ifdef CONFIG_E1000_MQ | ||
2469 | txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2470 | #endif | ||
2471 | if (!netif_carrier_ok(netdev)) { | 2355 | if (!netif_carrier_ok(netdev)) { |
2472 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2356 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
2473 | /* We've lost link, so the controller stops DMA, | 2357 | /* We've lost link, so the controller stops DMA, |
2474 | * but we've got queued Tx work that's never going | 2358 | * but we've got queued Tx work that's never going |
2475 | * to get done, so reset controller to flush Tx. | 2359 | * to get done, so reset controller to flush Tx. |
2476 | * (Do the reset outside of interrupt context). */ | 2360 | * (Do the reset outside of interrupt context). */ |
2477 | schedule_work(&adapter->tx_timeout_task); | 2361 | adapter->tx_timeout_count++; |
2362 | schedule_work(&adapter->reset_task); | ||
2478 | } | 2363 | } |
2479 | } | 2364 | } |
2480 | 2365 | ||
@@ -2649,9 +2534,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2649 | /* Workaround for Controller erratum -- | 2534 | /* Workaround for Controller erratum -- |
2650 | * descriptor for non-tso packet in a linear SKB that follows a | 2535 | * descriptor for non-tso packet in a linear SKB that follows a |
2651 | * tso gets written back prematurely before the data is fully | 2536 | * tso gets written back prematurely before the data is fully |
2652 | * DMAd to the controller */ | 2537 | * DMA'd to the controller */ |
2653 | if (!skb->data_len && tx_ring->last_tx_tso && | 2538 | if (!skb->data_len && tx_ring->last_tx_tso && |
2654 | !skb_shinfo(skb)->tso_size) { | 2539 | !skb_shinfo(skb)->tso_size) { |
2655 | tx_ring->last_tx_tso = 0; | 2540 | tx_ring->last_tx_tso = 0; |
2656 | size -= 4; | 2541 | size -= 4; |
2657 | } | 2542 | } |
@@ -2840,7 +2725,7 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
2840 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 2725 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
2841 | return 0; | 2726 | return 0; |
2842 | } | 2727 | } |
2843 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { | 2728 | if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { |
2844 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 2729 | struct ethhdr *eth = (struct ethhdr *) skb->data; |
2845 | if ((htons(ETH_P_IP) == eth->h_proto)) { | 2730 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
2846 | const struct iphdr *ip = | 2731 | const struct iphdr *ip = |
@@ -2881,11 +2766,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2881 | unsigned int f; | 2766 | unsigned int f; |
2882 | len -= skb->data_len; | 2767 | len -= skb->data_len; |
2883 | 2768 | ||
2884 | #ifdef CONFIG_E1000_MQ | ||
2885 | tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2886 | #else | ||
2887 | tx_ring = adapter->tx_ring; | 2769 | tx_ring = adapter->tx_ring; |
2888 | #endif | ||
2889 | 2770 | ||
2890 | if (unlikely(skb->len <= 0)) { | 2771 | if (unlikely(skb->len <= 0)) { |
2891 | dev_kfree_skb_any(skb); | 2772 | dev_kfree_skb_any(skb); |
@@ -2905,21 +2786,29 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2905 | max_per_txd = min(mss << 2, max_per_txd); | 2786 | max_per_txd = min(mss << 2, max_per_txd); |
2906 | max_txd_pwr = fls(max_per_txd) - 1; | 2787 | max_txd_pwr = fls(max_per_txd) - 1; |
2907 | 2788 | ||
2908 | /* TSO Workaround for 82571/2 Controllers -- if skb->data | 2789 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data |
2909 | * points to just header, pull a few bytes of payload from | 2790 | * points to just header, pull a few bytes of payload from |
2910 | * frags into skb->data */ | 2791 | * frags into skb->data */ |
2911 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2792 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
2912 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && | 2793 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
2913 | (adapter->hw.mac_type == e1000_82571 || | 2794 | switch (adapter->hw.mac_type) { |
2914 | adapter->hw.mac_type == e1000_82572)) { | 2795 | unsigned int pull_size; |
2915 | unsigned int pull_size; | 2796 | case e1000_82571: |
2916 | pull_size = min((unsigned int)4, skb->data_len); | 2797 | case e1000_82572: |
2917 | if (!__pskb_pull_tail(skb, pull_size)) { | 2798 | case e1000_82573: |
2918 | printk(KERN_ERR "__pskb_pull_tail failed.\n"); | 2799 | pull_size = min((unsigned int)4, skb->data_len); |
2919 | dev_kfree_skb_any(skb); | 2800 | if (!__pskb_pull_tail(skb, pull_size)) { |
2920 | return NETDEV_TX_OK; | 2801 | printk(KERN_ERR |
2802 | "__pskb_pull_tail failed.\n"); | ||
2803 | dev_kfree_skb_any(skb); | ||
2804 | return NETDEV_TX_OK; | ||
2805 | } | ||
2806 | len = skb->len - skb->data_len; | ||
2807 | break; | ||
2808 | default: | ||
2809 | /* do nothing */ | ||
2810 | break; | ||
2921 | } | 2811 | } |
2922 | len = skb->len - skb->data_len; | ||
2923 | } | 2812 | } |
2924 | } | 2813 | } |
2925 | 2814 | ||
@@ -2935,7 +2824,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2935 | #ifdef NETIF_F_TSO | 2824 | #ifdef NETIF_F_TSO |
2936 | /* Controller Erratum workaround */ | 2825 | /* Controller Erratum workaround */ |
2937 | if (!skb->data_len && tx_ring->last_tx_tso && | 2826 | if (!skb->data_len && tx_ring->last_tx_tso && |
2938 | !skb_shinfo(skb)->tso_size) | 2827 | !skb_shinfo(skb)->tso_size) |
2939 | count++; | 2828 | count++; |
2940 | #endif | 2829 | #endif |
2941 | 2830 | ||
@@ -2958,7 +2847,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2958 | if (adapter->pcix_82544) | 2847 | if (adapter->pcix_82544) |
2959 | count += nr_frags; | 2848 | count += nr_frags; |
2960 | 2849 | ||
2961 | if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) | 2850 | |
2851 | if (adapter->hw.tx_pkt_filtering && | ||
2852 | (adapter->hw.mac_type == e1000_82573)) | ||
2962 | e1000_transfer_dhcp_info(adapter, skb); | 2853 | e1000_transfer_dhcp_info(adapter, skb); |
2963 | 2854 | ||
2964 | local_irq_save(flags); | 2855 | local_irq_save(flags); |
@@ -3036,15 +2927,15 @@ e1000_tx_timeout(struct net_device *netdev) | |||
3036 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2927 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3037 | 2928 | ||
3038 | /* Do the reset outside of interrupt context */ | 2929 | /* Do the reset outside of interrupt context */ |
3039 | schedule_work(&adapter->tx_timeout_task); | 2930 | adapter->tx_timeout_count++; |
2931 | schedule_work(&adapter->reset_task); | ||
3040 | } | 2932 | } |
3041 | 2933 | ||
3042 | static void | 2934 | static void |
3043 | e1000_tx_timeout_task(struct net_device *netdev) | 2935 | e1000_reset_task(struct net_device *netdev) |
3044 | { | 2936 | { |
3045 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2937 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3046 | 2938 | ||
3047 | adapter->tx_timeout_count++; | ||
3048 | e1000_down(adapter); | 2939 | e1000_down(adapter); |
3049 | e1000_up(adapter); | 2940 | e1000_up(adapter); |
3050 | } | 2941 | } |
@@ -3079,6 +2970,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3079 | { | 2970 | { |
3080 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2971 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3081 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 2972 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
2973 | uint16_t eeprom_data = 0; | ||
3082 | 2974 | ||
3083 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 2975 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3084 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 2976 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
@@ -3090,14 +2982,28 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3090 | switch (adapter->hw.mac_type) { | 2982 | switch (adapter->hw.mac_type) { |
3091 | case e1000_82542_rev2_0: | 2983 | case e1000_82542_rev2_0: |
3092 | case e1000_82542_rev2_1: | 2984 | case e1000_82542_rev2_1: |
3093 | case e1000_82573: | ||
3094 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 2985 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
3095 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | 2986 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); |
3096 | return -EINVAL; | 2987 | return -EINVAL; |
3097 | } | 2988 | } |
3098 | break; | 2989 | break; |
2990 | case e1000_82573: | ||
2991 | /* only enable jumbo frames if ASPM is disabled completely | ||
2992 | * this means both bits must be zero in 0x1A bits 3:2 */ | ||
2993 | e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, | ||
2994 | &eeprom_data); | ||
2995 | if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) { | ||
2996 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | ||
2997 | DPRINTK(PROBE, ERR, | ||
2998 | "Jumbo Frames not supported.\n"); | ||
2999 | return -EINVAL; | ||
3000 | } | ||
3001 | break; | ||
3002 | } | ||
3003 | /* fall through to get support */ | ||
3099 | case e1000_82571: | 3004 | case e1000_82571: |
3100 | case e1000_82572: | 3005 | case e1000_82572: |
3006 | case e1000_80003es2lan: | ||
3101 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | 3007 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 |
3102 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | 3008 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
3103 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); | 3009 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); |
@@ -3251,11 +3157,15 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3251 | 3157 | ||
3252 | /* Rx Errors */ | 3158 | /* Rx Errors */ |
3253 | 3159 | ||
3160 | /* RLEC on some newer hardware can be incorrect so build | ||
3161 | * our own version based on RUC and ROC */ | ||
3254 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 3162 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
3255 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3163 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3256 | adapter->stats.rlec + adapter->stats.cexterr; | 3164 | adapter->stats.ruc + adapter->stats.roc + |
3165 | adapter->stats.cexterr; | ||
3257 | adapter->net_stats.rx_dropped = 0; | 3166 | adapter->net_stats.rx_dropped = 0; |
3258 | adapter->net_stats.rx_length_errors = adapter->stats.rlec; | 3167 | adapter->net_stats.rx_length_errors = adapter->stats.ruc + |
3168 | adapter->stats.roc; | ||
3259 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | 3169 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
3260 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | 3170 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; |
3261 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | 3171 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; |
@@ -3288,29 +3198,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3288 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3198 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3289 | } | 3199 | } |
3290 | 3200 | ||
3291 | #ifdef CONFIG_E1000_MQ | ||
3292 | void | ||
3293 | e1000_rx_schedule(void *data) | ||
3294 | { | ||
3295 | struct net_device *poll_dev, *netdev = data; | ||
3296 | struct e1000_adapter *adapter = netdev->priv; | ||
3297 | int this_cpu = get_cpu(); | ||
3298 | |||
3299 | poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu); | ||
3300 | if (poll_dev == NULL) { | ||
3301 | put_cpu(); | ||
3302 | return; | ||
3303 | } | ||
3304 | |||
3305 | if (likely(netif_rx_schedule_prep(poll_dev))) | ||
3306 | __netif_rx_schedule(poll_dev); | ||
3307 | else | ||
3308 | e1000_irq_enable(adapter); | ||
3309 | |||
3310 | put_cpu(); | ||
3311 | } | ||
3312 | #endif | ||
3313 | |||
3314 | /** | 3201 | /** |
3315 | * e1000_intr - Interrupt Handler | 3202 | * e1000_intr - Interrupt Handler |
3316 | * @irq: interrupt number | 3203 | * @irq: interrupt number |
@@ -3324,7 +3211,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3324 | struct net_device *netdev = data; | 3211 | struct net_device *netdev = data; |
3325 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3212 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3326 | struct e1000_hw *hw = &adapter->hw; | 3213 | struct e1000_hw *hw = &adapter->hw; |
3327 | uint32_t icr = E1000_READ_REG(hw, ICR); | 3214 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); |
3328 | #ifndef CONFIG_E1000_NAPI | 3215 | #ifndef CONFIG_E1000_NAPI |
3329 | int i; | 3216 | int i; |
3330 | #else | 3217 | #else |
@@ -3346,6 +3233,17 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3346 | 3233 | ||
3347 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3234 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3348 | hw->get_link_status = 1; | 3235 | hw->get_link_status = 1; |
3236 | /* 80003ES2LAN workaround-- | ||
3237 | * For packet buffer work-around on link down event; | ||
3238 | * disable receives here in the ISR and | ||
3239 | * reset adapter in watchdog | ||
3240 | */ | ||
3241 | if (netif_carrier_ok(netdev) && | ||
3242 | (adapter->hw.mac_type == e1000_80003es2lan)) { | ||
3243 | /* disable receives */ | ||
3244 | rctl = E1000_READ_REG(hw, RCTL); | ||
3245 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | ||
3246 | } | ||
3349 | mod_timer(&adapter->watchdog_timer, jiffies); | 3247 | mod_timer(&adapter->watchdog_timer, jiffies); |
3350 | } | 3248 | } |
3351 | 3249 | ||
@@ -3355,26 +3253,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3355 | E1000_WRITE_REG(hw, IMC, ~0); | 3253 | E1000_WRITE_REG(hw, IMC, ~0); |
3356 | E1000_WRITE_FLUSH(hw); | 3254 | E1000_WRITE_FLUSH(hw); |
3357 | } | 3255 | } |
3358 | #ifdef CONFIG_E1000_MQ | ||
3359 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { | ||
3360 | /* We must setup the cpumask once count == 0 since | ||
3361 | * each cpu bit is cleared when the work is done. */ | ||
3362 | adapter->rx_sched_call_data.cpumask = adapter->cpumask; | ||
3363 | atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem); | ||
3364 | atomic_set(&adapter->rx_sched_call_data.count, | ||
3365 | adapter->num_rx_queues); | ||
3366 | smp_call_async_mask(&adapter->rx_sched_call_data); | ||
3367 | } else { | ||
3368 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); | ||
3369 | } | ||
3370 | #else /* if !CONFIG_E1000_MQ */ | ||
3371 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3256 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) |
3372 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3257 | __netif_rx_schedule(&adapter->polling_netdev[0]); |
3373 | else | 3258 | else |
3374 | e1000_irq_enable(adapter); | 3259 | e1000_irq_enable(adapter); |
3375 | #endif /* CONFIG_E1000_MQ */ | 3260 | #else |
3376 | |||
3377 | #else /* if !CONFIG_E1000_NAPI */ | ||
3378 | /* Writing IMC and IMS is needed for 82547. | 3261 | /* Writing IMC and IMS is needed for 82547. |
3379 | * Due to Hub Link bus being occupied, an interrupt | 3262 | * Due to Hub Link bus being occupied, an interrupt |
3380 | * de-assertion message is not able to be sent. | 3263 | * de-assertion message is not able to be sent. |
@@ -3398,7 +3281,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3398 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3281 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3399 | e1000_irq_enable(adapter); | 3282 | e1000_irq_enable(adapter); |
3400 | 3283 | ||
3401 | #endif /* CONFIG_E1000_NAPI */ | 3284 | #endif |
3402 | 3285 | ||
3403 | return IRQ_HANDLED; | 3286 | return IRQ_HANDLED; |
3404 | } | 3287 | } |
@@ -3474,6 +3357,9 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3474 | struct e1000_tx_desc *tx_desc, *eop_desc; | 3357 | struct e1000_tx_desc *tx_desc, *eop_desc; |
3475 | struct e1000_buffer *buffer_info; | 3358 | struct e1000_buffer *buffer_info; |
3476 | unsigned int i, eop; | 3359 | unsigned int i, eop; |
3360 | #ifdef CONFIG_E1000_NAPI | ||
3361 | unsigned int count = 0; | ||
3362 | #endif | ||
3477 | boolean_t cleaned = FALSE; | 3363 | boolean_t cleaned = FALSE; |
3478 | 3364 | ||
3479 | i = tx_ring->next_to_clean; | 3365 | i = tx_ring->next_to_clean; |
@@ -3486,21 +3372,20 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3486 | buffer_info = &tx_ring->buffer_info[i]; | 3372 | buffer_info = &tx_ring->buffer_info[i]; |
3487 | cleaned = (i == eop); | 3373 | cleaned = (i == eop); |
3488 | 3374 | ||
3489 | #ifdef CONFIG_E1000_MQ | ||
3490 | tx_ring->tx_stats.bytes += buffer_info->length; | ||
3491 | #endif | ||
3492 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3375 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3493 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3376 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); |
3494 | 3377 | ||
3495 | if (unlikely(++i == tx_ring->count)) i = 0; | 3378 | if (unlikely(++i == tx_ring->count)) i = 0; |
3496 | } | 3379 | } |
3497 | 3380 | ||
3498 | #ifdef CONFIG_E1000_MQ | ||
3499 | tx_ring->tx_stats.packets++; | ||
3500 | #endif | ||
3501 | 3381 | ||
3502 | eop = tx_ring->buffer_info[i].next_to_watch; | 3382 | eop = tx_ring->buffer_info[i].next_to_watch; |
3503 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3383 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3384 | #ifdef CONFIG_E1000_NAPI | ||
3385 | #define E1000_TX_WEIGHT 64 | ||
3386 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | ||
3387 | if (count++ == E1000_TX_WEIGHT) break; | ||
3388 | #endif | ||
3504 | } | 3389 | } |
3505 | 3390 | ||
3506 | tx_ring->next_to_clean = i; | 3391 | tx_ring->next_to_clean = i; |
@@ -3519,7 +3404,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3519 | adapter->detect_tx_hung = FALSE; | 3404 | adapter->detect_tx_hung = FALSE; |
3520 | if (tx_ring->buffer_info[eop].dma && | 3405 | if (tx_ring->buffer_info[eop].dma && |
3521 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | 3406 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
3522 | adapter->tx_timeout_factor * HZ) | 3407 | (adapter->tx_timeout_factor * HZ)) |
3523 | && !(E1000_READ_REG(&adapter->hw, STATUS) & | 3408 | && !(E1000_READ_REG(&adapter->hw, STATUS) & |
3524 | E1000_STATUS_TXOFF)) { | 3409 | E1000_STATUS_TXOFF)) { |
3525 | 3410 | ||
@@ -3644,10 +3529,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3644 | skb = buffer_info->skb; | 3529 | skb = buffer_info->skb; |
3645 | buffer_info->skb = NULL; | 3530 | buffer_info->skb = NULL; |
3646 | 3531 | ||
3532 | prefetch(skb->data - NET_IP_ALIGN); | ||
3533 | |||
3647 | if (++i == rx_ring->count) i = 0; | 3534 | if (++i == rx_ring->count) i = 0; |
3648 | next_rxd = E1000_RX_DESC(*rx_ring, i); | 3535 | next_rxd = E1000_RX_DESC(*rx_ring, i); |
3536 | prefetch(next_rxd); | ||
3537 | |||
3649 | next_buffer = &rx_ring->buffer_info[i]; | 3538 | next_buffer = &rx_ring->buffer_info[i]; |
3650 | next_skb = next_buffer->skb; | 3539 | next_skb = next_buffer->skb; |
3540 | prefetch(next_skb->data - NET_IP_ALIGN); | ||
3651 | 3541 | ||
3652 | cleaned = TRUE; | 3542 | cleaned = TRUE; |
3653 | cleaned_count++; | 3543 | cleaned_count++; |
@@ -3733,10 +3623,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3733 | } | 3623 | } |
3734 | #endif /* CONFIG_E1000_NAPI */ | 3624 | #endif /* CONFIG_E1000_NAPI */ |
3735 | netdev->last_rx = jiffies; | 3625 | netdev->last_rx = jiffies; |
3736 | #ifdef CONFIG_E1000_MQ | ||
3737 | rx_ring->rx_stats.packets++; | ||
3738 | rx_ring->rx_stats.bytes += length; | ||
3739 | #endif | ||
3740 | 3626 | ||
3741 | next_desc: | 3627 | next_desc: |
3742 | rx_desc->status = 0; | 3628 | rx_desc->status = 0; |
@@ -3747,6 +3633,7 @@ next_desc: | |||
3747 | cleaned_count = 0; | 3633 | cleaned_count = 0; |
3748 | } | 3634 | } |
3749 | 3635 | ||
3636 | /* use prefetched values */ | ||
3750 | rx_desc = next_rxd; | 3637 | rx_desc = next_rxd; |
3751 | buffer_info = next_buffer; | 3638 | buffer_info = next_buffer; |
3752 | } | 3639 | } |
@@ -3789,9 +3676,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3789 | i = rx_ring->next_to_clean; | 3676 | i = rx_ring->next_to_clean; |
3790 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3677 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3791 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3678 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3792 | buffer_info = &rx_ring->buffer_info[i]; | ||
3793 | 3679 | ||
3794 | while (staterr & E1000_RXD_STAT_DD) { | 3680 | while (staterr & E1000_RXD_STAT_DD) { |
3681 | buffer_info = &rx_ring->buffer_info[i]; | ||
3795 | ps_page = &rx_ring->ps_page[i]; | 3682 | ps_page = &rx_ring->ps_page[i]; |
3796 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3683 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3797 | #ifdef CONFIG_E1000_NAPI | 3684 | #ifdef CONFIG_E1000_NAPI |
@@ -3801,10 +3688,16 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3801 | #endif | 3688 | #endif |
3802 | skb = buffer_info->skb; | 3689 | skb = buffer_info->skb; |
3803 | 3690 | ||
3691 | /* in the packet split case this is header only */ | ||
3692 | prefetch(skb->data - NET_IP_ALIGN); | ||
3693 | |||
3804 | if (++i == rx_ring->count) i = 0; | 3694 | if (++i == rx_ring->count) i = 0; |
3805 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); | 3695 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); |
3696 | prefetch(next_rxd); | ||
3697 | |||
3806 | next_buffer = &rx_ring->buffer_info[i]; | 3698 | next_buffer = &rx_ring->buffer_info[i]; |
3807 | next_skb = next_buffer->skb; | 3699 | next_skb = next_buffer->skb; |
3700 | prefetch(next_skb->data - NET_IP_ALIGN); | ||
3808 | 3701 | ||
3809 | cleaned = TRUE; | 3702 | cleaned = TRUE; |
3810 | cleaned_count++; | 3703 | cleaned_count++; |
@@ -3836,23 +3729,49 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3836 | /* Good Receive */ | 3729 | /* Good Receive */ |
3837 | skb_put(skb, length); | 3730 | skb_put(skb, length); |
3838 | 3731 | ||
3732 | { | ||
3733 | /* this looks ugly, but it seems compiler issues make it | ||
3734 | more efficient than reusing j */ | ||
3735 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | ||
3736 | |||
3737 | /* page alloc/put takes too long and effects small packet | ||
3738 | * throughput, so unsplit small packets and save the alloc/put*/ | ||
3739 | if (l1 && ((length + l1) < E1000_CB_LENGTH)) { | ||
3740 | u8 *vaddr; | ||
3741 | /* there is no documentation about how to call | ||
3742 | * kmap_atomic, so we can't hold the mapping | ||
3743 | * very long */ | ||
3744 | pci_dma_sync_single_for_cpu(pdev, | ||
3745 | ps_page_dma->ps_page_dma[0], | ||
3746 | PAGE_SIZE, | ||
3747 | PCI_DMA_FROMDEVICE); | ||
3748 | vaddr = kmap_atomic(ps_page->ps_page[0], | ||
3749 | KM_SKB_DATA_SOFTIRQ); | ||
3750 | memcpy(skb->tail, vaddr, l1); | ||
3751 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | ||
3752 | pci_dma_sync_single_for_device(pdev, | ||
3753 | ps_page_dma->ps_page_dma[0], | ||
3754 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
3755 | skb_put(skb, l1); | ||
3756 | length += l1; | ||
3757 | goto copydone; | ||
3758 | } /* if */ | ||
3759 | } | ||
3760 | |||
3839 | for (j = 0; j < adapter->rx_ps_pages; j++) { | 3761 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
3840 | if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) | 3762 | if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) |
3841 | break; | 3763 | break; |
3842 | |||
3843 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], | 3764 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
3844 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 3765 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
3845 | ps_page_dma->ps_page_dma[j] = 0; | 3766 | ps_page_dma->ps_page_dma[j] = 0; |
3846 | skb_shinfo(skb)->frags[j].page = | 3767 | skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0, |
3847 | ps_page->ps_page[j]; | 3768 | length); |
3848 | ps_page->ps_page[j] = NULL; | 3769 | ps_page->ps_page[j] = NULL; |
3849 | skb_shinfo(skb)->frags[j].page_offset = 0; | ||
3850 | skb_shinfo(skb)->frags[j].size = length; | ||
3851 | skb_shinfo(skb)->nr_frags++; | ||
3852 | skb->len += length; | 3770 | skb->len += length; |
3853 | skb->data_len += length; | 3771 | skb->data_len += length; |
3854 | } | 3772 | } |
3855 | 3773 | ||
3774 | copydone: | ||
3856 | e1000_rx_checksum(adapter, staterr, | 3775 | e1000_rx_checksum(adapter, staterr, |
3857 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 3776 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
3858 | skb->protocol = eth_type_trans(skb, netdev); | 3777 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -3878,10 +3797,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3878 | } | 3797 | } |
3879 | #endif /* CONFIG_E1000_NAPI */ | 3798 | #endif /* CONFIG_E1000_NAPI */ |
3880 | netdev->last_rx = jiffies; | 3799 | netdev->last_rx = jiffies; |
3881 | #ifdef CONFIG_E1000_MQ | ||
3882 | rx_ring->rx_stats.packets++; | ||
3883 | rx_ring->rx_stats.bytes += length; | ||
3884 | #endif | ||
3885 | 3800 | ||
3886 | next_desc: | 3801 | next_desc: |
3887 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); | 3802 | rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); |
@@ -3893,6 +3808,7 @@ next_desc: | |||
3893 | cleaned_count = 0; | 3808 | cleaned_count = 0; |
3894 | } | 3809 | } |
3895 | 3810 | ||
3811 | /* use prefetched values */ | ||
3896 | rx_desc = next_rxd; | 3812 | rx_desc = next_rxd; |
3897 | buffer_info = next_buffer; | 3813 | buffer_info = next_buffer; |
3898 | 3814 | ||
@@ -3936,7 +3852,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3936 | goto map_skb; | 3852 | goto map_skb; |
3937 | } | 3853 | } |
3938 | 3854 | ||
3939 | |||
3940 | if (unlikely(!skb)) { | 3855 | if (unlikely(!skb)) { |
3941 | /* Better luck next round */ | 3856 | /* Better luck next round */ |
3942 | adapter->alloc_rx_buff_failed++; | 3857 | adapter->alloc_rx_buff_failed++; |
@@ -4242,7 +4157,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4242 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4157 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4243 | return -EIO; | 4158 | return -EIO; |
4244 | } | 4159 | } |
4245 | if (adapter->hw.phy_type == e1000_phy_m88) { | 4160 | if (adapter->hw.phy_type == e1000_media_type_copper) { |
4246 | switch (data->reg_num) { | 4161 | switch (data->reg_num) { |
4247 | case PHY_CTRL: | 4162 | case PHY_CTRL: |
4248 | if (mii_reg & MII_CR_POWER_DOWN) | 4163 | if (mii_reg & MII_CR_POWER_DOWN) |
@@ -4258,8 +4173,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4258 | else | 4173 | else |
4259 | spddplx = SPEED_10; | 4174 | spddplx = SPEED_10; |
4260 | spddplx += (mii_reg & 0x100) | 4175 | spddplx += (mii_reg & 0x100) |
4261 | ? FULL_DUPLEX : | 4176 | ? DUPLEX_FULL : |
4262 | HALF_DUPLEX; | 4177 | DUPLEX_HALF; |
4263 | retval = e1000_set_spd_dplx(adapter, | 4178 | retval = e1000_set_spd_dplx(adapter, |
4264 | spddplx); | 4179 | spddplx); |
4265 | if (retval) { | 4180 | if (retval) { |
@@ -4489,8 +4404,8 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | |||
4489 | } | 4404 | } |
4490 | 4405 | ||
4491 | #ifdef CONFIG_PM | 4406 | #ifdef CONFIG_PM |
4492 | /* these functions save and restore 16 or 64 dwords (64-256 bytes) of config | 4407 | /* Save/restore 16 or 64 dwords of PCI config space depending on which |
4493 | * space versus the 64 bytes that pci_[save|restore]_state handle | 4408 | * bus we're on (PCI(X) vs. PCI-E) |
4494 | */ | 4409 | */ |
4495 | #define PCIE_CONFIG_SPACE_LEN 256 | 4410 | #define PCIE_CONFIG_SPACE_LEN 256 |
4496 | #define PCI_CONFIG_SPACE_LEN 64 | 4411 | #define PCI_CONFIG_SPACE_LEN 64 |
@@ -4500,6 +4415,7 @@ e1000_pci_save_state(struct e1000_adapter *adapter) | |||
4500 | struct pci_dev *dev = adapter->pdev; | 4415 | struct pci_dev *dev = adapter->pdev; |
4501 | int size; | 4416 | int size; |
4502 | int i; | 4417 | int i; |
4418 | |||
4503 | if (adapter->hw.mac_type >= e1000_82571) | 4419 | if (adapter->hw.mac_type >= e1000_82571) |
4504 | size = PCIE_CONFIG_SPACE_LEN; | 4420 | size = PCIE_CONFIG_SPACE_LEN; |
4505 | else | 4421 | else |
@@ -4523,8 +4439,10 @@ e1000_pci_restore_state(struct e1000_adapter *adapter) | |||
4523 | struct pci_dev *dev = adapter->pdev; | 4439 | struct pci_dev *dev = adapter->pdev; |
4524 | int size; | 4440 | int size; |
4525 | int i; | 4441 | int i; |
4442 | |||
4526 | if (adapter->config_space == NULL) | 4443 | if (adapter->config_space == NULL) |
4527 | return; | 4444 | return; |
4445 | |||
4528 | if (adapter->hw.mac_type >= e1000_82571) | 4446 | if (adapter->hw.mac_type >= e1000_82571) |
4529 | size = PCIE_CONFIG_SPACE_LEN; | 4447 | size = PCIE_CONFIG_SPACE_LEN; |
4530 | else | 4448 | else |
@@ -4552,8 +4470,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4552 | e1000_down(adapter); | 4470 | e1000_down(adapter); |
4553 | 4471 | ||
4554 | #ifdef CONFIG_PM | 4472 | #ifdef CONFIG_PM |
4555 | /* implement our own version of pci_save_state(pdev) because pci | 4473 | /* Implement our own version of pci_save_state(pdev) because pci- |
4556 | * express adapters have larger 256 byte config spaces */ | 4474 | * express adapters have 256-byte config spaces. */ |
4557 | retval = e1000_pci_save_state(adapter); | 4475 | retval = e1000_pci_save_state(adapter); |
4558 | if (retval) | 4476 | if (retval) |
4559 | return retval; | 4477 | return retval; |
@@ -4610,7 +4528,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4610 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); | 4528 | retval = pci_enable_wake(pdev, PCI_D3hot, 0); |
4611 | if (retval) | 4529 | if (retval) |
4612 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | 4530 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); |
4613 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | 4531 | retval = pci_enable_wake(pdev, PCI_D3cold, 0); |
4614 | if (retval) | 4532 | if (retval) |
4615 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4533 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); |
4616 | } | 4534 | } |
@@ -4626,7 +4544,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4626 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); | 4544 | DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); |
4627 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); | 4545 | retval = pci_enable_wake(pdev, PCI_D3cold, 1); |
4628 | if (retval) | 4546 | if (retval) |
4629 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4547 | DPRINTK(PROBE, ERR, |
4548 | "Error enabling D3 cold wake\n"); | ||
4630 | } | 4549 | } |
4631 | } | 4550 | } |
4632 | 4551 | ||