aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000.h51
-rw-r--r--drivers/net/e1000/e1000_ethtool.c167
-rw-r--r--drivers/net/e1000/e1000_hw.c53
-rw-r--r--drivers/net/e1000/e1000_hw.h25
-rw-r--r--drivers/net/e1000/e1000_main.c814
-rw-r--r--drivers/net/e1000/e1000_param.c14
6 files changed, 768 insertions, 356 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e02e9ba2e18b..27c77306193b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,10 +72,6 @@
72#include <linux/mii.h> 72#include <linux/mii.h>
73#include <linux/ethtool.h> 73#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
75#ifdef CONFIG_E1000_MQ
76#include <linux/cpu.h>
77#include <linux/smp.h>
78#endif
79 75
80#define BAR_0 0 76#define BAR_0 0
81#define BAR_1 1 77#define BAR_1 1
@@ -87,6 +83,10 @@
87struct e1000_adapter; 83struct e1000_adapter;
88 84
89#include "e1000_hw.h" 85#include "e1000_hw.h"
86#ifdef CONFIG_E1000_MQ
87#include <linux/cpu.h>
88#include <linux/smp.h>
89#endif
90 90
91#ifdef DBG 91#ifdef DBG
92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) 92#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
169 uint16_t next_to_watch; 169 uint16_t next_to_watch;
170}; 170};
171 171
172#ifdef CONFIG_E1000_MQ
173struct e1000_queue_stats {
174 uint64_t packets;
175 uint64_t bytes;
176};
177#endif
178
172struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 179struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
173struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 180struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
174 181
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
191 spinlock_t tx_lock; 198 spinlock_t tx_lock;
192 uint16_t tdh; 199 uint16_t tdh;
193 uint16_t tdt; 200 uint16_t tdt;
194 uint64_t pkt;
195 201
196 boolean_t last_tx_tso; 202 boolean_t last_tx_tso;
197 203
204#ifdef CONFIG_E1000_MQ
205 struct e1000_queue_stats tx_stats;
206#endif
198}; 207};
199 208
200struct e1000_rx_ring { 209struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
216 struct e1000_ps_page *ps_page; 225 struct e1000_ps_page *ps_page;
217 struct e1000_ps_page_dma *ps_page_dma; 226 struct e1000_ps_page_dma *ps_page_dma;
218 227
228 struct sk_buff *rx_skb_top;
229 struct sk_buff *rx_skb_prev;
230
231 /* cpu for rx queue */
232 int cpu;
233
219 uint16_t rdh; 234 uint16_t rdh;
220 uint16_t rdt; 235 uint16_t rdt;
221 uint64_t pkt; 236#ifdef CONFIG_E1000_MQ
237 struct e1000_queue_stats rx_stats;
238#endif
222}; 239};
223 240
224#define E1000_DESC_UNUSED(R) \ 241#define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
251 uint16_t link_speed; 268 uint16_t link_speed;
252 uint16_t link_duplex; 269 uint16_t link_duplex;
253 spinlock_t stats_lock; 270 spinlock_t stats_lock;
271#ifdef CONFIG_E1000_NAPI
272 spinlock_t tx_queue_lock;
273#endif
254 atomic_t irq_sem; 274 atomic_t irq_sem;
255 struct work_struct tx_timeout_task; 275 struct work_struct tx_timeout_task;
256 struct work_struct watchdog_task; 276 struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
264#ifdef CONFIG_E1000_MQ 284#ifdef CONFIG_E1000_MQ
265 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ 285 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
266#endif 286#endif
287 unsigned long tx_queue_len;
267 uint32_t txd_cmd; 288 uint32_t txd_cmd;
268 uint32_t tx_int_delay; 289 uint32_t tx_int_delay;
269 uint32_t tx_abs_int_delay; 290 uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
271 uint64_t gotcl_old; 292 uint64_t gotcl_old;
272 uint64_t tpt_old; 293 uint64_t tpt_old;
273 uint64_t colc_old; 294 uint64_t colc_old;
295 uint32_t tx_timeout_count;
274 uint32_t tx_fifo_head; 296 uint32_t tx_fifo_head;
275 uint32_t tx_head_addr; 297 uint32_t tx_head_addr;
276 uint32_t tx_fifo_size; 298 uint32_t tx_fifo_size;
299 uint8_t tx_timeout_factor;
277 atomic_t tx_fifo_stall; 300 atomic_t tx_fifo_stall;
278 boolean_t pcix_82544; 301 boolean_t pcix_82544;
279 boolean_t detect_tx_hung; 302 boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
281 /* RX */ 304 /* RX */
282#ifdef CONFIG_E1000_NAPI 305#ifdef CONFIG_E1000_NAPI
283 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 306 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
284 struct e1000_rx_ring *rx_ring, 307 struct e1000_rx_ring *rx_ring,
285 int *work_done, int work_to_do); 308 int *work_done, int work_to_do);
286#else 309#else
287 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 310 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
288 struct e1000_rx_ring *rx_ring); 311 struct e1000_rx_ring *rx_ring);
289#endif 312#endif
290 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 313 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
291 struct e1000_rx_ring *rx_ring); 314 struct e1000_rx_ring *rx_ring,
315 int cleaned_count);
292 struct e1000_rx_ring *rx_ring; /* One per active queue */ 316 struct e1000_rx_ring *rx_ring; /* One per active queue */
293#ifdef CONFIG_E1000_NAPI 317#ifdef CONFIG_E1000_NAPI
294 struct net_device *polling_netdev; /* One per active queue */ 318 struct net_device *polling_netdev; /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
296#ifdef CONFIG_E1000_MQ 320#ifdef CONFIG_E1000_MQ
297 struct net_device **cpu_netdev; /* per-cpu */ 321 struct net_device **cpu_netdev; /* per-cpu */
298 struct call_async_data_struct rx_sched_call_data; 322 struct call_async_data_struct rx_sched_call_data;
299 int cpu_for_queue[4]; 323 cpumask_t cpumask;
300#endif 324#endif
301 int num_queues; 325 int num_tx_queues;
326 int num_rx_queues;
302 327
303 uint64_t hw_csum_err; 328 uint64_t hw_csum_err;
304 uint64_t hw_csum_good; 329 uint64_t hw_csum_good;
305 uint64_t rx_hdr_split; 330 uint64_t rx_hdr_split;
331 uint32_t alloc_rx_buff_failed;
306 uint32_t rx_int_delay; 332 uint32_t rx_int_delay;
307 uint32_t rx_abs_int_delay; 333 uint32_t rx_abs_int_delay;
308 boolean_t rx_csum; 334 boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
330 struct e1000_rx_ring test_rx_ring; 356 struct e1000_rx_ring test_rx_ring;
331 357
332 358
359 u32 *config_space;
333 int msg_enable; 360 int msg_enable;
334#ifdef CONFIG_PCI_MSI 361#ifdef CONFIG_PCI_MSI
335 boolean_t have_msi; 362 boolean_t have_msi;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c88f1a3c1b1d..d252297e4db0 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
80 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 80 { "tx_deferred_ok", E1000_STAT(stats.dc) },
81 { "tx_single_coll_ok", E1000_STAT(stats.scc) }, 81 { "tx_single_coll_ok", E1000_STAT(stats.scc) },
82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, 82 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
83 { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
83 { "rx_long_length_errors", E1000_STAT(stats.roc) }, 84 { "rx_long_length_errors", E1000_STAT(stats.roc) },
84 { "rx_short_length_errors", E1000_STAT(stats.ruc) }, 85 { "rx_short_length_errors", E1000_STAT(stats.ruc) },
85 { "rx_align_errors", E1000_STAT(stats.algnerrc) }, 86 { "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 94 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 95 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split) }, 96 { "rx_header_split", E1000_STAT(rx_hdr_split) },
97 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
96}; 98};
97#define E1000_STATS_LEN \ 99
100#ifdef CONFIG_E1000_MQ
101#define E1000_QUEUE_STATS_LEN \
102 (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
103 ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
104 * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
105#else
106#define E1000_QUEUE_STATS_LEN 0
107#endif
108#define E1000_GLOBAL_STATS_LEN \
98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 109 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
110#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
99static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { 111static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
100 "Register test (offline)", "Eeprom test (offline)", 112 "Register test (offline)", "Eeprom test (offline)",
101 "Interrupt test (offline)", "Loopback test (offline)", 113 "Interrupt test (offline)", "Loopback test (offline)",
@@ -183,7 +195,15 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
183 struct e1000_adapter *adapter = netdev_priv(netdev); 195 struct e1000_adapter *adapter = netdev_priv(netdev);
184 struct e1000_hw *hw = &adapter->hw; 196 struct e1000_hw *hw = &adapter->hw;
185 197
186 if(ecmd->autoneg == AUTONEG_ENABLE) { 198 /* When SoL/IDER sessions are active, autoneg/speed/duplex
199 * cannot be changed */
200 if (e1000_check_phy_reset_block(hw)) {
201 DPRINTK(DRV, ERR, "Cannot change link characteristics "
202 "when SoL/IDER is active.\n");
203 return -EINVAL;
204 }
205
206 if (ecmd->autoneg == AUTONEG_ENABLE) {
187 hw->autoneg = 1; 207 hw->autoneg = 1;
188 if(hw->media_type == e1000_media_type_fiber) 208 if(hw->media_type == e1000_media_type_fiber)
189 hw->autoneg_advertised = ADVERTISED_1000baseT_Full | 209 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
@@ -567,21 +587,21 @@ e1000_get_drvinfo(struct net_device *netdev,
567 587
568 strncpy(drvinfo->driver, e1000_driver_name, 32); 588 strncpy(drvinfo->driver, e1000_driver_name, 32);
569 strncpy(drvinfo->version, e1000_driver_version, 32); 589 strncpy(drvinfo->version, e1000_driver_version, 32);
570 590
571 /* EEPROM image version # is reported as firware version # for 591 /* EEPROM image version # is reported as firmware version # for
572 * 8257{1|2|3} controllers */ 592 * 8257{1|2|3} controllers */
573 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); 593 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
574 switch (adapter->hw.mac_type) { 594 switch (adapter->hw.mac_type) {
575 case e1000_82571: 595 case e1000_82571:
576 case e1000_82572: 596 case e1000_82572:
577 case e1000_82573: 597 case e1000_82573:
578 sprintf(firmware_version, "%d.%d-%d", 598 sprintf(firmware_version, "%d.%d-%d",
579 (eeprom_data & 0xF000) >> 12, 599 (eeprom_data & 0xF000) >> 12,
580 (eeprom_data & 0x0FF0) >> 4, 600 (eeprom_data & 0x0FF0) >> 4,
581 eeprom_data & 0x000F); 601 eeprom_data & 0x000F);
582 break; 602 break;
583 default: 603 default:
584 sprintf(firmware_version, "n/a"); 604 sprintf(firmware_version, "N/A");
585 } 605 }
586 606
587 strncpy(drvinfo->fw_version, firmware_version, 32); 607 strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -623,8 +643,8 @@ e1000_set_ringparam(struct net_device *netdev,
623 struct e1000_rx_ring *rxdr, *rx_old, *rx_new; 643 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
624 int i, err, tx_ring_size, rx_ring_size; 644 int i, err, tx_ring_size, rx_ring_size;
625 645
626 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 646 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
627 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 647 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
628 648
629 if (netif_running(adapter->netdev)) 649 if (netif_running(adapter->netdev))
630 e1000_down(adapter); 650 e1000_down(adapter);
@@ -663,10 +683,10 @@ e1000_set_ringparam(struct net_device *netdev,
663 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 683 E1000_MAX_TXD : E1000_MAX_82544_TXD));
664 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 684 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
665 685
666 for (i = 0; i < adapter->num_queues; i++) { 686 for (i = 0; i < adapter->num_tx_queues; i++)
667 txdr[i].count = txdr->count; 687 txdr[i].count = txdr->count;
688 for (i = 0; i < adapter->num_rx_queues; i++)
668 rxdr[i].count = rxdr->count; 689 rxdr[i].count = rxdr->count;
669 }
670 690
671 if(netif_running(adapter->netdev)) { 691 if(netif_running(adapter->netdev)) {
672 /* Try to get new resources before deleting old */ 692 /* Try to get new resources before deleting old */
@@ -979,18 +999,17 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
979 } 999 }
980 } 1000 }
981 1001
982 if(txdr->desc) { 1002 if (txdr->desc) {
983 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 1003 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
984 txdr->desc = NULL; 1004 txdr->desc = NULL;
985 } 1005 }
986 if(rxdr->desc) { 1006 if (rxdr->desc) {
987 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 1007 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
988 rxdr->desc = NULL; 1008 rxdr->desc = NULL;
989 } 1009 }
990 1010
991 kfree(txdr->buffer_info); 1011 kfree(txdr->buffer_info);
992 txdr->buffer_info = NULL; 1012 txdr->buffer_info = NULL;
993
994 kfree(rxdr->buffer_info); 1013 kfree(rxdr->buffer_info);
995 rxdr->buffer_info = NULL; 1014 rxdr->buffer_info = NULL;
996 1015
@@ -1327,11 +1346,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1327static int 1346static int
1328e1000_setup_loopback_test(struct e1000_adapter *adapter) 1347e1000_setup_loopback_test(struct e1000_adapter *adapter)
1329{ 1348{
1330 uint32_t rctl;
1331 struct e1000_hw *hw = &adapter->hw; 1349 struct e1000_hw *hw = &adapter->hw;
1350 uint32_t rctl;
1332 1351
1333 if (hw->media_type == e1000_media_type_fiber || 1352 if (hw->media_type == e1000_media_type_fiber ||
1334 hw->media_type == e1000_media_type_internal_serdes) { 1353 hw->media_type == e1000_media_type_internal_serdes) {
1335 switch (hw->mac_type) { 1354 switch (hw->mac_type) {
1336 case e1000_82545: 1355 case e1000_82545:
1337 case e1000_82546: 1356 case e1000_82546:
@@ -1362,25 +1381,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
1362static void 1381static void
1363e1000_loopback_cleanup(struct e1000_adapter *adapter) 1382e1000_loopback_cleanup(struct e1000_adapter *adapter)
1364{ 1383{
1384 struct e1000_hw *hw = &adapter->hw;
1365 uint32_t rctl; 1385 uint32_t rctl;
1366 uint16_t phy_reg; 1386 uint16_t phy_reg;
1367 struct e1000_hw *hw = &adapter->hw;
1368 1387
1369 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1388 rctl = E1000_READ_REG(hw, RCTL);
1370 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1389 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1390 E1000_WRITE_REG(hw, RCTL, rctl);
1372 1391
1373 switch (hw->mac_type) { 1392 switch (hw->mac_type) {
1374 case e1000_82571: 1393 case e1000_82571:
1375 case e1000_82572: 1394 case e1000_82572:
1376 if (hw->media_type == e1000_media_type_fiber || 1395 if (hw->media_type == e1000_media_type_fiber ||
1377 hw->media_type == e1000_media_type_internal_serdes){ 1396 hw->media_type == e1000_media_type_internal_serdes) {
1378#define E1000_SERDES_LB_OFF 0x400 1397#define E1000_SERDES_LB_OFF 0x400
1379 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); 1398 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
1380 msec_delay(10); 1399 msec_delay(10);
1381 break; 1400 break;
1382 } 1401 }
1383 /* fall thru for Cu adapters */ 1402 /* Fall Through */
1384 case e1000_82545: 1403 case e1000_82545:
1385 case e1000_82546: 1404 case e1000_82546:
1386 case e1000_82545_rev_3: 1405 case e1000_82545_rev_3:
@@ -1401,7 +1420,7 @@ static void
1401e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1420e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1402{ 1421{
1403 memset(skb->data, 0xFF, frame_size); 1422 memset(skb->data, 0xFF, frame_size);
1404 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1423 frame_size &= ~1;
1405 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1424 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1406 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1425 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1407 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1426 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,7 +1429,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1410static int 1429static int
1411e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1430e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1412{ 1431{
1413 frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; 1432 frame_size &= ~1;
1414 if(*(skb->data + 3) == 0xFF) { 1433 if(*(skb->data + 3) == 0xFF) {
1415 if((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1434 if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1416 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1435 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
@@ -1488,14 +1507,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1488static int 1507static int
1489e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1508e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
1490{ 1509{
1491 if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; 1510 /* PHY loopback cannot be performed if SoL/IDER
1492 if((*data = e1000_setup_loopback_test(adapter))) 1511 * sessions are active */
1493 goto err_loopback_setup; 1512 if (e1000_check_phy_reset_block(&adapter->hw)) {
1513 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1514 "when SoL/IDER is active.\n");
1515 *data = 0;
1516 goto out;
1517 }
1518
1519 if ((*data = e1000_setup_desc_rings(adapter)))
1520 goto out;
1521 if ((*data = e1000_setup_loopback_test(adapter)))
1522 goto err_loopback;
1494 *data = e1000_run_loopback_test(adapter); 1523 *data = e1000_run_loopback_test(adapter);
1495 e1000_loopback_cleanup(adapter); 1524 e1000_loopback_cleanup(adapter);
1496err_loopback_setup: 1525
1497 e1000_free_desc_rings(adapter);
1498err_loopback: 1526err_loopback:
1527 e1000_free_desc_rings(adapter);
1528out:
1499 return *data; 1529 return *data;
1500} 1530}
1501 1531
@@ -1617,6 +1647,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1617 1647
1618 case E1000_DEV_ID_82546EB_FIBER: 1648 case E1000_DEV_ID_82546EB_FIBER:
1619 case E1000_DEV_ID_82546GB_FIBER: 1649 case E1000_DEV_ID_82546GB_FIBER:
1650 case E1000_DEV_ID_82571EB_FIBER:
1620 /* Wake events only supported on port A for dual fiber */ 1651 /* Wake events only supported on port A for dual fiber */
1621 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1652 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1622 wol->supported = 0; 1653 wol->supported = 0;
@@ -1660,6 +1691,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1660 1691
1661 case E1000_DEV_ID_82546EB_FIBER: 1692 case E1000_DEV_ID_82546EB_FIBER:
1662 case E1000_DEV_ID_82546GB_FIBER: 1693 case E1000_DEV_ID_82546GB_FIBER:
1694 case E1000_DEV_ID_82571EB_FIBER:
1663 /* Wake events only supported on port A for dual fiber */ 1695 /* Wake events only supported on port A for dual fiber */
1664 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 1696 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1665 return wol->wolopts ? -EOPNOTSUPP : 0; 1697 return wol->wolopts ? -EOPNOTSUPP : 0;
@@ -1721,21 +1753,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1721 mod_timer(&adapter->blink_timer, jiffies); 1753 mod_timer(&adapter->blink_timer, jiffies);
1722 msleep_interruptible(data * 1000); 1754 msleep_interruptible(data * 1000);
1723 del_timer_sync(&adapter->blink_timer); 1755 del_timer_sync(&adapter->blink_timer);
1724 } 1756 } else if (adapter->hw.mac_type < e1000_82573) {
1725 else if(adapter->hw.mac_type < e1000_82573) { 1757 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1726 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1758 (E1000_LEDCTL_LED2_BLINK_RATE |
1727 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | 1759 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
1728 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1760 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1729 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | 1761 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
1730 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); 1762 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
1731 msleep_interruptible(data * 1000); 1763 msleep_interruptible(data * 1000);
1732 } 1764 } else {
1733 else { 1765 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1734 E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | 1766 (E1000_LEDCTL_LED2_BLINK_RATE |
1735 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 1767 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
1736 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | 1768 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1737 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | 1769 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
1738 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); 1770 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
1739 msleep_interruptible(data * 1000); 1771 msleep_interruptible(data * 1000);
1740 } 1772 }
1741 1773
@@ -1768,19 +1800,43 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1768 struct ethtool_stats *stats, uint64_t *data) 1800 struct ethtool_stats *stats, uint64_t *data)
1769{ 1801{
1770 struct e1000_adapter *adapter = netdev_priv(netdev); 1802 struct e1000_adapter *adapter = netdev_priv(netdev);
1803#ifdef CONFIG_E1000_MQ
1804 uint64_t *queue_stat;
1805 int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
1806 int j, k;
1807#endif
1771 int i; 1808 int i;
1772 1809
1773 e1000_update_stats(adapter); 1810 e1000_update_stats(adapter);
1774 for(i = 0; i < E1000_STATS_LEN; i++) { 1811 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1775 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1812 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1776 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1813 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1777 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1814 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
1778 } 1815 }
1816#ifdef CONFIG_E1000_MQ
1817 for (j = 0; j < adapter->num_tx_queues; j++) {
1818 queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
1819 for (k = 0; k < stat_count; k++)
1820 data[i + k] = queue_stat[k];
1821 i += k;
1822 }
1823 for (j = 0; j < adapter->num_rx_queues; j++) {
1824 queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
1825 for (k = 0; k < stat_count; k++)
1826 data[i + k] = queue_stat[k];
1827 i += k;
1828 }
1829#endif
1830/* BUG_ON(i != E1000_STATS_LEN); */
1779} 1831}
1780 1832
1781static void 1833static void
1782e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1834e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1783{ 1835{
1836#ifdef CONFIG_E1000_MQ
1837 struct e1000_adapter *adapter = netdev_priv(netdev);
1838#endif
1839 uint8_t *p = data;
1784 int i; 1840 int i;
1785 1841
1786 switch(stringset) { 1842 switch(stringset) {
@@ -1789,11 +1845,26 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1789 E1000_TEST_LEN*ETH_GSTRING_LEN); 1845 E1000_TEST_LEN*ETH_GSTRING_LEN);
1790 break; 1846 break;
1791 case ETH_SS_STATS: 1847 case ETH_SS_STATS:
1792 for (i=0; i < E1000_STATS_LEN; i++) { 1848 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1793 memcpy(data + i * ETH_GSTRING_LEN, 1849 memcpy(p, e1000_gstrings_stats[i].stat_string,
1794 e1000_gstrings_stats[i].stat_string, 1850 ETH_GSTRING_LEN);
1795 ETH_GSTRING_LEN); 1851 p += ETH_GSTRING_LEN;
1852 }
1853#ifdef CONFIG_E1000_MQ
1854 for (i = 0; i < adapter->num_tx_queues; i++) {
1855 sprintf(p, "tx_queue_%u_packets", i);
1856 p += ETH_GSTRING_LEN;
1857 sprintf(p, "tx_queue_%u_bytes", i);
1858 p += ETH_GSTRING_LEN;
1796 } 1859 }
1860 for (i = 0; i < adapter->num_rx_queues; i++) {
1861 sprintf(p, "rx_queue_%u_packets", i);
1862 p += ETH_GSTRING_LEN;
1863 sprintf(p, "rx_queue_%u_bytes", i);
1864 p += ETH_GSTRING_LEN;
1865 }
1866#endif
1867/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1797 break; 1868 break;
1798 } 1869 }
1799} 1870}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 136fc031e4ad..2437d362ff63 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82546GB_FIBER: 318 case E1000_DEV_ID_82546GB_FIBER:
319 case E1000_DEV_ID_82546GB_SERDES: 319 case E1000_DEV_ID_82546GB_SERDES:
320 case E1000_DEV_ID_82546GB_PCIE: 320 case E1000_DEV_ID_82546GB_PCIE:
321 case E1000_DEV_ID_82546GB_QUAD_COPPER:
322 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
321 hw->mac_type = e1000_82546_rev_3; 323 hw->mac_type = e1000_82546_rev_3;
322 break; 324 break;
323 case E1000_DEV_ID_82541EI: 325 case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
639 uint16_t cmd_mmrbc; 641 uint16_t cmd_mmrbc;
640 uint16_t stat_mmrbc; 642 uint16_t stat_mmrbc;
641 uint32_t mta_size; 643 uint32_t mta_size;
644 uint32_t ctrl_ext;
642 645
643 DEBUGFUNC("e1000_init_hw"); 646 DEBUGFUNC("e1000_init_hw");
644 647
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
735 break; 738 break;
736 case e1000_82571: 739 case e1000_82571:
737 case e1000_82572: 740 case e1000_82572:
738 ctrl |= (1 << 22);
739 case e1000_82573: 741 case e1000_82573:
740 ctrl |= E1000_TXDCTL_COUNT_DESC; 742 ctrl |= E1000_TXDCTL_COUNT_DESC;
741 break; 743 break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
775 */ 777 */
776 e1000_clear_hw_cntrs(hw); 778 e1000_clear_hw_cntrs(hw);
777 779
780 if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
781 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
782 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
783 /* Relaxed ordering must be disabled to avoid a parity
784 * error crash in a PCI slot. */
785 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
786 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
787 }
788
778 return ret_val; 789 return ret_val;
779} 790}
780 791
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
838 849
839 DEBUGFUNC("e1000_setup_link"); 850 DEBUGFUNC("e1000_setup_link");
840 851
852 /* In the case of the phy reset being blocked, we already have a link.
853 * We do not have to set it up again. */
854 if (e1000_check_phy_reset_block(hw))
855 return E1000_SUCCESS;
856
841 /* Read and store word 0x0F of the EEPROM. This word contains bits 857 /* Read and store word 0x0F of the EEPROM. This word contains bits
842 * that determine the hardware's default PAUSE (flow control) mode, 858 * that determine the hardware's default PAUSE (flow control) mode,
843 * a bit that determines whether the HW defaults to enabling or 859 * a bit that determines whether the HW defaults to enabling or
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1929void 1945void
1930e1000_config_collision_dist(struct e1000_hw *hw) 1946e1000_config_collision_dist(struct e1000_hw *hw)
1931{ 1947{
1932 uint32_t tctl; 1948 uint32_t tctl, coll_dist;
1933 1949
1934 DEBUGFUNC("e1000_config_collision_dist"); 1950 DEBUGFUNC("e1000_config_collision_dist");
1935 1951
1952 if (hw->mac_type < e1000_82543)
1953 coll_dist = E1000_COLLISION_DISTANCE_82542;
1954 else
1955 coll_dist = E1000_COLLISION_DISTANCE;
1956
1936 tctl = E1000_READ_REG(hw, TCTL); 1957 tctl = E1000_READ_REG(hw, TCTL);
1937 1958
1938 tctl &= ~E1000_TCTL_COLD; 1959 tctl &= ~E1000_TCTL_COLD;
1939 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; 1960 tctl |= coll_dist << E1000_COLD_SHIFT;
1940 1961
1941 E1000_WRITE_REG(hw, TCTL, tctl); 1962 E1000_WRITE_REG(hw, TCTL, tctl);
1942 E1000_WRITE_FLUSH(hw); 1963 E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2982 3003
2983 if (hw->mac_type < e1000_82571) 3004 if (hw->mac_type < e1000_82571)
2984 msec_delay(10); 3005 msec_delay(10);
3006 else
3007 udelay(100);
2985 3008
2986 E1000_WRITE_REG(hw, CTRL, ctrl); 3009 E1000_WRITE_REG(hw, CTRL, ctrl);
2987 E1000_WRITE_FLUSH(hw); 3010 E1000_WRITE_FLUSH(hw);
@@ -3881,14 +3904,16 @@ e1000_read_eeprom(struct e1000_hw *hw,
3881 return -E1000_ERR_EEPROM; 3904 return -E1000_ERR_EEPROM;
3882 } 3905 }
3883 3906
3884 /* FLASH reads without acquiring the semaphore are safe in 82573-based 3907 /* FLASH reads without acquiring the semaphore are safe */
3885 * controllers. 3908 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
3886 */ 3909 hw->eeprom.use_eerd == FALSE) {
3887 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3910 switch (hw->mac_type) {
3888 (hw->mac_type != e1000_82573)) { 3911 default:
3889 /* Prepare the EEPROM for reading */ 3912 /* Prepare the EEPROM for reading */
3890 if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3913 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3891 return -E1000_ERR_EEPROM; 3914 return -E1000_ERR_EEPROM;
3915 break;
3916 }
3892 } 3917 }
3893 3918
3894 if(eeprom->use_eerd == TRUE) { 3919 if(eeprom->use_eerd == TRUE) {
@@ -6720,6 +6745,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
6720 break; 6745 break;
6721 } 6746 }
6722 6747
6748 /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
6749 * Need to wait for PHY configuration completion before accessing NVM
6750 * and PHY. */
6751 if (hw->mac_type == e1000_82573)
6752 msec_delay(25);
6753
6723 return E1000_SUCCESS; 6754 return E1000_SUCCESS;
6724} 6755}
6725 6756
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 7caa35748cea..0b8f6f2b774b 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -439,6 +439,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
439#define E1000_DEV_ID_82546GB_FIBER 0x107A 439#define E1000_DEV_ID_82546GB_FIBER 0x107A
440#define E1000_DEV_ID_82546GB_SERDES 0x107B 440#define E1000_DEV_ID_82546GB_SERDES 0x107B
441#define E1000_DEV_ID_82546GB_PCIE 0x108A 441#define E1000_DEV_ID_82546GB_PCIE 0x108A
442#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
442#define E1000_DEV_ID_82547EI 0x1019 443#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82571EB_COPPER 0x105E 444#define E1000_DEV_ID_82571EB_COPPER 0x105E
444#define E1000_DEV_ID_82571EB_FIBER 0x105F 445#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -449,6 +450,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
449#define E1000_DEV_ID_82573E 0x108B 450#define E1000_DEV_ID_82573E 0x108B
450#define E1000_DEV_ID_82573E_IAMT 0x108C 451#define E1000_DEV_ID_82573E_IAMT 0x108C
451#define E1000_DEV_ID_82573L 0x109A 452#define E1000_DEV_ID_82573L 0x109A
453#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
452 454
453 455
454#define NODE_ADDRESS_SIZE 6 456#define NODE_ADDRESS_SIZE 6
@@ -1497,6 +1499,7 @@ struct e1000_hw {
1497#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 1499#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
1498#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ 1500#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
1499#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 1501#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
1502#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
1500#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 1503#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
1501#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 1504#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
1502#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 1505#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1954,6 +1957,23 @@ struct e1000_host_command_info {
1954 1957
1955#define E1000_MDALIGN 4096 1958#define E1000_MDALIGN 4096
1956 1959
1960/* PCI-Ex registers */
1961
1962/* PCI-Ex Control Register */
1963#define E1000_GCR_RXD_NO_SNOOP 0x00000001
1964#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
1965#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
1966#define E1000_GCR_TXD_NO_SNOOP 0x00000008
1967#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
1968#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
1969
1970#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
1971 E1000_GCR_RXDSCW_NO_SNOOP | \
1972 E1000_GCR_RXDSCR_NO_SNOOP | \
1973 E1000_GCR TXD_NO_SNOOP | \
1974 E1000_GCR_TXDSCW_NO_SNOOP | \
1975 E1000_GCR_TXDSCR_NO_SNOOP)
1976
1957#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 1977#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1958/* Function Active and Power State to MNG */ 1978/* Function Active and Power State to MNG */
1959#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1979#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
@@ -2077,7 +2097,10 @@ struct e1000_host_command_info {
2077/* Collision related configuration parameters */ 2097/* Collision related configuration parameters */
2078#define E1000_COLLISION_THRESHOLD 15 2098#define E1000_COLLISION_THRESHOLD 15
2079#define E1000_CT_SHIFT 4 2099#define E1000_CT_SHIFT 4
2080#define E1000_COLLISION_DISTANCE 64 2100/* Collision distance is a 0-based value that applies to
2101 * half-duplex-capable hardware only. */
2102#define E1000_COLLISION_DISTANCE 63
2103#define E1000_COLLISION_DISTANCE_82542 64
2081#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2104#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2082#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE 2105#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
2083#define E1000_COLD_SHIFT 12 2106#define E1000_COLD_SHIFT 12
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 438a931fd55d..d0a5d1656c5f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43#else 43#else
44#define DRIVERNAPI "-NAPI" 44#define DRIVERNAPI "-NAPI"
45#endif 45#endif
46#define DRV_VERSION "6.1.16-k2"DRIVERNAPI 46#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
47char e1000_driver_version[] = DRV_VERSION; 47char e1000_driver_version[] = DRV_VERSION;
48static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 48static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49 49
@@ -97,7 +97,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
97 INTEL_E1000_ETHERNET_DEVICE(0x108A), 97 INTEL_E1000_ETHERNET_DEVICE(0x108A),
98 INTEL_E1000_ETHERNET_DEVICE(0x108B), 98 INTEL_E1000_ETHERNET_DEVICE(0x108B),
99 INTEL_E1000_ETHERNET_DEVICE(0x108C), 99 INTEL_E1000_ETHERNET_DEVICE(0x108C),
100 INTEL_E1000_ETHERNET_DEVICE(0x1099),
100 INTEL_E1000_ETHERNET_DEVICE(0x109A), 101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
102 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
101 /* required last entry */ 103 /* required last entry */
102 {0,} 104 {0,}
103}; 105};
@@ -171,9 +173,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
171 struct e1000_rx_ring *rx_ring); 173 struct e1000_rx_ring *rx_ring);
172#endif 174#endif
173static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 175static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
174 struct e1000_rx_ring *rx_ring); 176 struct e1000_rx_ring *rx_ring,
177 int cleaned_count);
175static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 178static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring); 179 struct e1000_rx_ring *rx_ring,
180 int cleaned_count);
177static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 181static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
178static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 182static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
179 int cmd); 183 int cmd);
@@ -319,7 +323,75 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
319 } 323 }
320 } 324 }
321} 325}
322 326
327/**
328 * e1000_release_hw_control - release control of the h/w to f/w
329 * @adapter: address of board private structure
330 *
331 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
332 * For ASF and Pass Through versions of f/w this means that the
333 * driver is no longer loaded. For AMT version (only with 82573) i
334 * of the f/w this means that the netowrk i/f is closed.
335 *
336 **/
337
338static inline void
339e1000_release_hw_control(struct e1000_adapter *adapter)
340{
341 uint32_t ctrl_ext;
342 uint32_t swsm;
343
344 /* Let firmware taken over control of h/w */
345 switch (adapter->hw.mac_type) {
346 case e1000_82571:
347 case e1000_82572:
348 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
349 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
350 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
351 break;
352 case e1000_82573:
353 swsm = E1000_READ_REG(&adapter->hw, SWSM);
354 E1000_WRITE_REG(&adapter->hw, SWSM,
355 swsm & ~E1000_SWSM_DRV_LOAD);
356 default:
357 break;
358 }
359}
360
361/**
362 * e1000_get_hw_control - get control of the h/w from f/w
363 * @adapter: address of board private structure
364 *
365 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
366 * For ASF and Pass Through versions of f/w this means that
367 * the driver is loaded. For AMT version (only with 82573)
368 * of the f/w this means that the netowrk i/f is open.
369 *
370 **/
371
372static inline void
373e1000_get_hw_control(struct e1000_adapter *adapter)
374{
375 uint32_t ctrl_ext;
376 uint32_t swsm;
377 /* Let firmware know the driver has taken over */
378 switch (adapter->hw.mac_type) {
379 case e1000_82571:
380 case e1000_82572:
381 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
382 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
383 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
384 break;
385 case e1000_82573:
386 swsm = E1000_READ_REG(&adapter->hw, SWSM);
387 E1000_WRITE_REG(&adapter->hw, SWSM,
388 swsm | E1000_SWSM_DRV_LOAD);
389 break;
390 default:
391 break;
392 }
393}
394
323int 395int
324e1000_up(struct e1000_adapter *adapter) 396e1000_up(struct e1000_adapter *adapter)
325{ 397{
@@ -343,8 +415,14 @@ e1000_up(struct e1000_adapter *adapter)
343 e1000_configure_tx(adapter); 415 e1000_configure_tx(adapter);
344 e1000_setup_rctl(adapter); 416 e1000_setup_rctl(adapter);
345 e1000_configure_rx(adapter); 417 e1000_configure_rx(adapter);
346 for (i = 0; i < adapter->num_queues; i++) 418 /* call E1000_DESC_UNUSED which always leaves
347 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); 419 * at least 1 descriptor unused to make sure
420 * next_to_use != next_to_clean */
421 for (i = 0; i < adapter->num_rx_queues; i++) {
422 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
423 adapter->alloc_rx_buf(adapter, ring,
424 E1000_DESC_UNUSED(ring));
425 }
348 426
349#ifdef CONFIG_PCI_MSI 427#ifdef CONFIG_PCI_MSI
350 if(adapter->hw.mac_type > e1000_82547_rev_2) { 428 if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -364,6 +442,12 @@ e1000_up(struct e1000_adapter *adapter)
364 return err; 442 return err;
365 } 443 }
366 444
445#ifdef CONFIG_E1000_MQ
446 e1000_setup_queue_mapping(adapter);
447#endif
448
449 adapter->tx_queue_len = netdev->tx_queue_len;
450
367 mod_timer(&adapter->watchdog_timer, jiffies); 451 mod_timer(&adapter->watchdog_timer, jiffies);
368 452
369#ifdef CONFIG_E1000_NAPI 453#ifdef CONFIG_E1000_NAPI
@@ -378,6 +462,8 @@ void
378e1000_down(struct e1000_adapter *adapter) 462e1000_down(struct e1000_adapter *adapter)
379{ 463{
380 struct net_device *netdev = adapter->netdev; 464 struct net_device *netdev = adapter->netdev;
465 boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
466 e1000_check_mng_mode(&adapter->hw);
381 467
382 e1000_irq_disable(adapter); 468 e1000_irq_disable(adapter);
383#ifdef CONFIG_E1000_MQ 469#ifdef CONFIG_E1000_MQ
@@ -396,6 +482,7 @@ e1000_down(struct e1000_adapter *adapter)
396#ifdef CONFIG_E1000_NAPI 482#ifdef CONFIG_E1000_NAPI
397 netif_poll_disable(netdev); 483 netif_poll_disable(netdev);
398#endif 484#endif
485 netdev->tx_queue_len = adapter->tx_queue_len;
399 adapter->link_speed = 0; 486 adapter->link_speed = 0;
400 adapter->link_duplex = 0; 487 adapter->link_duplex = 0;
401 netif_carrier_off(netdev); 488 netif_carrier_off(netdev);
@@ -405,12 +492,16 @@ e1000_down(struct e1000_adapter *adapter)
405 e1000_clean_all_tx_rings(adapter); 492 e1000_clean_all_tx_rings(adapter);
406 e1000_clean_all_rx_rings(adapter); 493 e1000_clean_all_rx_rings(adapter);
407 494
408 /* If WoL is not enabled and management mode is not IAMT 495 /* Power down the PHY so no link is implied when interface is down *
409 * Power down the PHY so no link is implied when interface is down */ 496 * The PHY cannot be powered down if any of the following is TRUE *
410 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 497 * (a) WoL is enabled
498 * (b) AMT is active
499 * (c) SoL/IDER session is active */
500 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
411 adapter->hw.media_type == e1000_media_type_copper && 501 adapter->hw.media_type == e1000_media_type_copper &&
412 !e1000_check_mng_mode(&adapter->hw) && 502 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
413 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { 503 !mng_mode_enabled &&
504 !e1000_check_phy_reset_block(&adapter->hw)) {
414 uint16_t mii_reg; 505 uint16_t mii_reg;
415 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 506 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
416 mii_reg |= MII_CR_POWER_DOWN; 507 mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +513,8 @@ e1000_down(struct e1000_adapter *adapter)
422void 513void
423e1000_reset(struct e1000_adapter *adapter) 514e1000_reset(struct e1000_adapter *adapter)
424{ 515{
425 struct net_device *netdev = adapter->netdev;
426 uint32_t pba, manc; 516 uint32_t pba, manc;
427 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 517 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
428 uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
429 518
430 /* Repartition Pba for greater than 9k mtu 519 /* Repartition Pba for greater than 9k mtu
431 * To take effect CTRL.RST is required. 520 * To take effect CTRL.RST is required.
@@ -449,15 +538,8 @@ e1000_reset(struct e1000_adapter *adapter)
449 } 538 }
450 539
451 if((adapter->hw.mac_type != e1000_82573) && 540 if((adapter->hw.mac_type != e1000_82573) &&
452 (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { 541 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
453 pba -= 8; /* allocate more FIFO for Tx */ 542 pba -= 8; /* allocate more FIFO for Tx */
454 /* send an XOFF when there is enough space in the
455 * Rx FIFO to hold one extra full size Rx packet
456 */
457 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
458 ETHERNET_FCS_SIZE + 1;
459 fc_low_water_mark = fc_high_water_mark + 8;
460 }
461 543
462 544
463 if(adapter->hw.mac_type == e1000_82547) { 545 if(adapter->hw.mac_type == e1000_82547) {
@@ -471,10 +553,12 @@ e1000_reset(struct e1000_adapter *adapter)
471 E1000_WRITE_REG(&adapter->hw, PBA, pba); 553 E1000_WRITE_REG(&adapter->hw, PBA, pba);
472 554
473 /* flow control settings */ 555 /* flow control settings */
474 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 556 /* Set the FC high water mark to 90% of the FIFO size.
475 fc_high_water_mark; 557 * Required to clear last 3 LSB */
476 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 558 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
477 fc_low_water_mark; 559
560 adapter->hw.fc_high_water = fc_high_water_mark;
561 adapter->hw.fc_low_water = fc_high_water_mark - 8;
478 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 562 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
479 adapter->hw.fc_send_xon = 1; 563 adapter->hw.fc_send_xon = 1;
480 adapter->hw.fc = adapter->hw.original_fc; 564 adapter->hw.fc = adapter->hw.original_fc;
@@ -517,8 +601,6 @@ e1000_probe(struct pci_dev *pdev,
517 struct net_device *netdev; 601 struct net_device *netdev;
518 struct e1000_adapter *adapter; 602 struct e1000_adapter *adapter;
519 unsigned long mmio_start, mmio_len; 603 unsigned long mmio_start, mmio_len;
520 uint32_t ctrl_ext;
521 uint32_t swsm;
522 604
523 static int cards_found = 0; 605 static int cards_found = 0;
524 int i, err, pci_using_dac; 606 int i, err, pci_using_dac;
@@ -712,8 +794,7 @@ e1000_probe(struct pci_dev *pdev,
712 case e1000_82546: 794 case e1000_82546:
713 case e1000_82546_rev_3: 795 case e1000_82546_rev_3:
714 case e1000_82571: 796 case e1000_82571:
715 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) 797 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
716 && (adapter->hw.media_type == e1000_media_type_copper)) {
717 e1000_read_eeprom(&adapter->hw, 798 e1000_read_eeprom(&adapter->hw,
718 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 799 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
719 break; 800 break;
@@ -727,25 +808,36 @@ e1000_probe(struct pci_dev *pdev,
727 if(eeprom_data & eeprom_apme_mask) 808 if(eeprom_data & eeprom_apme_mask)
728 adapter->wol |= E1000_WUFC_MAG; 809 adapter->wol |= E1000_WUFC_MAG;
729 810
811 /* print bus type/speed/width info */
812 {
813 struct e1000_hw *hw = &adapter->hw;
814 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
815 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
816 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
817 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
818 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
819 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
820 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
821 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
822 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
823 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
824 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
825 "32-bit"));
826 }
827
828 for (i = 0; i < 6; i++)
829 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
830
730 /* reset the hardware with the new settings */ 831 /* reset the hardware with the new settings */
731 e1000_reset(adapter); 832 e1000_reset(adapter);
732 833
733 /* Let firmware know the driver has taken over */ 834 /* If the controller is 82573 and f/w is AMT, do not set
734 switch(adapter->hw.mac_type) { 835 * DRV_LOAD until the interface is up. For all other cases,
735 case e1000_82571: 836 * let the f/w know that the h/w is now under the control
736 case e1000_82572: 837 * of the driver. */
737 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 838 if (adapter->hw.mac_type != e1000_82573 ||
738 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 839 !e1000_check_mng_mode(&adapter->hw))
739 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 840 e1000_get_hw_control(adapter);
740 break;
741 case e1000_82573:
742 swsm = E1000_READ_REG(&adapter->hw, SWSM);
743 E1000_WRITE_REG(&adapter->hw, SWSM,
744 swsm | E1000_SWSM_DRV_LOAD);
745 break;
746 default:
747 break;
748 }
749 841
750 strcpy(netdev->name, "eth%d"); 842 strcpy(netdev->name, "eth%d");
751 if((err = register_netdev(netdev))) 843 if((err = register_netdev(netdev)))
@@ -782,8 +874,7 @@ e1000_remove(struct pci_dev *pdev)
782{ 874{
783 struct net_device *netdev = pci_get_drvdata(pdev); 875 struct net_device *netdev = pci_get_drvdata(pdev);
784 struct e1000_adapter *adapter = netdev_priv(netdev); 876 struct e1000_adapter *adapter = netdev_priv(netdev);
785 uint32_t ctrl_ext; 877 uint32_t manc;
786 uint32_t manc, swsm;
787#ifdef CONFIG_E1000_NAPI 878#ifdef CONFIG_E1000_NAPI
788 int i; 879 int i;
789#endif 880#endif
@@ -799,26 +890,13 @@ e1000_remove(struct pci_dev *pdev)
799 } 890 }
800 } 891 }
801 892
802 switch(adapter->hw.mac_type) { 893 /* Release control of h/w to f/w. If f/w is AMT enabled, this
803 case e1000_82571: 894 * would have already happened in close and is redundant. */
804 case e1000_82572: 895 e1000_release_hw_control(adapter);
805 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
806 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
807 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
808 break;
809 case e1000_82573:
810 swsm = E1000_READ_REG(&adapter->hw, SWSM);
811 E1000_WRITE_REG(&adapter->hw, SWSM,
812 swsm & ~E1000_SWSM_DRV_LOAD);
813 break;
814
815 default:
816 break;
817 }
818 896
819 unregister_netdev(netdev); 897 unregister_netdev(netdev);
820#ifdef CONFIG_E1000_NAPI 898#ifdef CONFIG_E1000_NAPI
821 for (i = 0; i < adapter->num_queues; i++) 899 for (i = 0; i < adapter->num_rx_queues; i++)
822 __dev_put(&adapter->polling_netdev[i]); 900 __dev_put(&adapter->polling_netdev[i]);
823#endif 901#endif
824 902
@@ -923,15 +1001,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
923 switch (hw->mac_type) { 1001 switch (hw->mac_type) {
924 case e1000_82571: 1002 case e1000_82571:
925 case e1000_82572: 1003 case e1000_82572:
926 adapter->num_queues = 2; 1004 /* These controllers support 2 tx queues, but with a single
1005 * qdisc implementation, multiple tx queues aren't quite as
1006 * interesting. If we can find a logical way of mapping
1007 * flows to a queue, then perhaps we can up the num_tx_queue
1008 * count back to its default. Until then, we run the risk of
1009 * terrible performance due to SACK overload. */
1010 adapter->num_tx_queues = 1;
1011 adapter->num_rx_queues = 2;
927 break; 1012 break;
928 default: 1013 default:
929 adapter->num_queues = 1; 1014 adapter->num_tx_queues = 1;
1015 adapter->num_rx_queues = 1;
930 break; 1016 break;
931 } 1017 }
932 adapter->num_queues = min(adapter->num_queues, num_online_cpus()); 1018 adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
1019 adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
1020 DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
1021 adapter->num_rx_queues,
1022 ((adapter->num_rx_queues == 1)
1023 ? ((num_online_cpus() > 1)
1024 ? "(due to unsupported feature in current adapter)"
1025 : "(due to unsupported system configuration)")
1026 : ""));
1027 DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
1028 adapter->num_tx_queues);
933#else 1029#else
934 adapter->num_queues = 1; 1030 adapter->num_tx_queues = 1;
1031 adapter->num_rx_queues = 1;
935#endif 1032#endif
936 1033
937 if (e1000_alloc_queues(adapter)) { 1034 if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1037,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
940 } 1037 }
941 1038
942#ifdef CONFIG_E1000_NAPI 1039#ifdef CONFIG_E1000_NAPI
943 for (i = 0; i < adapter->num_queues; i++) { 1040 for (i = 0; i < adapter->num_rx_queues; i++) {
944 adapter->polling_netdev[i].priv = adapter; 1041 adapter->polling_netdev[i].priv = adapter;
945 adapter->polling_netdev[i].poll = &e1000_clean; 1042 adapter->polling_netdev[i].poll = &e1000_clean;
946 adapter->polling_netdev[i].weight = 64; 1043 adapter->polling_netdev[i].weight = 64;
947 dev_hold(&adapter->polling_netdev[i]); 1044 dev_hold(&adapter->polling_netdev[i]);
948 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); 1045 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
949 } 1046 }
950#endif 1047 spin_lock_init(&adapter->tx_queue_lock);
951
952#ifdef CONFIG_E1000_MQ
953 e1000_setup_queue_mapping(adapter);
954#endif 1048#endif
955 1049
956 atomic_set(&adapter->irq_sem, 1); 1050 atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1067,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
973{ 1067{
974 int size; 1068 int size;
975 1069
976 size = sizeof(struct e1000_tx_ring) * adapter->num_queues; 1070 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
977 adapter->tx_ring = kmalloc(size, GFP_KERNEL); 1071 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
978 if (!adapter->tx_ring) 1072 if (!adapter->tx_ring)
979 return -ENOMEM; 1073 return -ENOMEM;
980 memset(adapter->tx_ring, 0, size); 1074 memset(adapter->tx_ring, 0, size);
981 1075
982 size = sizeof(struct e1000_rx_ring) * adapter->num_queues; 1076 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
983 adapter->rx_ring = kmalloc(size, GFP_KERNEL); 1077 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
984 if (!adapter->rx_ring) { 1078 if (!adapter->rx_ring) {
985 kfree(adapter->tx_ring); 1079 kfree(adapter->tx_ring);
@@ -988,7 +1082,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
988 memset(adapter->rx_ring, 0, size); 1082 memset(adapter->rx_ring, 0, size);
989 1083
990#ifdef CONFIG_E1000_NAPI 1084#ifdef CONFIG_E1000_NAPI
991 size = sizeof(struct net_device) * adapter->num_queues; 1085 size = sizeof(struct net_device) * adapter->num_rx_queues;
992 adapter->polling_netdev = kmalloc(size, GFP_KERNEL); 1086 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
993 if (!adapter->polling_netdev) { 1087 if (!adapter->polling_netdev) {
994 kfree(adapter->tx_ring); 1088 kfree(adapter->tx_ring);
@@ -998,6 +1092,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
998 memset(adapter->polling_netdev, 0, size); 1092 memset(adapter->polling_netdev, 0, size);
999#endif 1093#endif
1000 1094
1095#ifdef CONFIG_E1000_MQ
1096 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1097 adapter->rx_sched_call_data.info = adapter->netdev;
1098
1099 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1100 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1101#endif
1102
1001 return E1000_SUCCESS; 1103 return E1000_SUCCESS;
1002} 1104}
1003 1105
@@ -1017,14 +1119,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1017 lock_cpu_hotplug(); 1119 lock_cpu_hotplug();
1018 i = 0; 1120 i = 0;
1019 for_each_online_cpu(cpu) { 1121 for_each_online_cpu(cpu) {
1020 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; 1122 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
1021 /* This is incomplete because we'd like to assign separate 1123 /* This is incomplete because we'd like to assign separate
1022 * physical cpus to these netdev polling structures and 1124 * physical cpus to these netdev polling structures and
1023 * avoid saturating a subset of cpus. 1125 * avoid saturating a subset of cpus.
1024 */ 1126 */
1025 if (i < adapter->num_queues) { 1127 if (i < adapter->num_rx_queues) {
1026 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; 1128 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1027 adapter->cpu_for_queue[i] = cpu; 1129 adapter->rx_ring[i].cpu = cpu;
1130 cpu_set(cpu, adapter->cpumask);
1028 } else 1131 } else
1029 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; 1132 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1030 1133
@@ -1071,6 +1174,12 @@ e1000_open(struct net_device *netdev)
1071 e1000_update_mng_vlan(adapter); 1174 e1000_update_mng_vlan(adapter);
1072 } 1175 }
1073 1176
1177 /* If AMT is enabled, let the firmware know that the network
1178 * interface is now open */
1179 if (adapter->hw.mac_type == e1000_82573 &&
1180 e1000_check_mng_mode(&adapter->hw))
1181 e1000_get_hw_control(adapter);
1182
1074 return E1000_SUCCESS; 1183 return E1000_SUCCESS;
1075 1184
1076err_up: 1185err_up:
@@ -1109,6 +1218,13 @@ e1000_close(struct net_device *netdev)
1109 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1110 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1219 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1111 } 1220 }
1221
1222 /* If AMT is enabled, let the firmware know that the network
1223 * interface is now closed */
1224 if (adapter->hw.mac_type == e1000_82573 &&
1225 e1000_check_mng_mode(&adapter->hw))
1226 e1000_release_hw_control(adapter);
1227
1112 return 0; 1228 return 0;
1113} 1229}
1114 1230
@@ -1229,7 +1345,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1229{ 1345{
1230 int i, err = 0; 1346 int i, err = 0;
1231 1347
1232 for (i = 0; i < adapter->num_queues; i++) { 1348 for (i = 0; i < adapter->num_tx_queues; i++) {
1233 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1349 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1234 if (err) { 1350 if (err) {
1235 DPRINTK(PROBE, ERR, 1351 DPRINTK(PROBE, ERR,
@@ -1254,10 +1370,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1254 uint64_t tdba; 1370 uint64_t tdba;
1255 struct e1000_hw *hw = &adapter->hw; 1371 struct e1000_hw *hw = &adapter->hw;
1256 uint32_t tdlen, tctl, tipg, tarc; 1372 uint32_t tdlen, tctl, tipg, tarc;
1373 uint32_t ipgr1, ipgr2;
1257 1374
1258 /* Setup the HW Tx Head and Tail descriptor pointers */ 1375 /* Setup the HW Tx Head and Tail descriptor pointers */
1259 1376
1260 switch (adapter->num_queues) { 1377 switch (adapter->num_tx_queues) {
1261 case 2: 1378 case 2:
1262 tdba = adapter->tx_ring[1].dma; 1379 tdba = adapter->tx_ring[1].dma;
1263 tdlen = adapter->tx_ring[1].count * 1380 tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1404,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1287 1404
1288 /* Set the default values for the Tx Inter Packet Gap timer */ 1405 /* Set the default values for the Tx Inter Packet Gap timer */
1289 1406
1407 if (hw->media_type == e1000_media_type_fiber ||
1408 hw->media_type == e1000_media_type_internal_serdes)
1409 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1410 else
1411 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1412
1290 switch (hw->mac_type) { 1413 switch (hw->mac_type) {
1291 case e1000_82542_rev2_0: 1414 case e1000_82542_rev2_0:
1292 case e1000_82542_rev2_1: 1415 case e1000_82542_rev2_1:
1293 tipg = DEFAULT_82542_TIPG_IPGT; 1416 tipg = DEFAULT_82542_TIPG_IPGT;
1294 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1417 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1295 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1418 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1296 break; 1419 break;
1297 default: 1420 default:
1298 if (hw->media_type == e1000_media_type_fiber || 1421 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1299 hw->media_type == e1000_media_type_internal_serdes) 1422 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1300 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1423 break;
1301 else
1302 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1303 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1304 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1305 } 1424 }
1425 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1426 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1306 E1000_WRITE_REG(hw, TIPG, tipg); 1427 E1000_WRITE_REG(hw, TIPG, tipg);
1307 1428
1308 /* Set the Tx Interrupt Delay register */ 1429 /* Set the Tx Interrupt Delay register */
@@ -1454,6 +1575,8 @@ setup_rx_desc_die:
1454 1575
1455 rxdr->next_to_clean = 0; 1576 rxdr->next_to_clean = 0;
1456 rxdr->next_to_use = 0; 1577 rxdr->next_to_use = 0;
1578 rxdr->rx_skb_top = NULL;
1579 rxdr->rx_skb_prev = NULL;
1457 1580
1458 return 0; 1581 return 0;
1459} 1582}
@@ -1475,7 +1598,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1475{ 1598{
1476 int i, err = 0; 1599 int i, err = 0;
1477 1600
1478 for (i = 0; i < adapter->num_queues; i++) { 1601 for (i = 0; i < adapter->num_rx_queues; i++) {
1479 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1602 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1480 if (err) { 1603 if (err) {
1481 DPRINTK(PROBE, ERR, 1604 DPRINTK(PROBE, ERR,
@@ -1510,7 +1633,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1510 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1633 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1511 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); 1634 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1512 1635
1513 if(adapter->hw.tbi_compatibility_on == 1) 1636 if (adapter->hw.mac_type > e1000_82543)
1637 rctl |= E1000_RCTL_SECRC;
1638
1639 if (adapter->hw.tbi_compatibility_on == 1)
1514 rctl |= E1000_RCTL_SBP; 1640 rctl |= E1000_RCTL_SBP;
1515 else 1641 else
1516 rctl &= ~E1000_RCTL_SBP; 1642 rctl &= ~E1000_RCTL_SBP;
@@ -1638,16 +1764,21 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1638 } 1764 }
1639 1765
1640 if (hw->mac_type >= e1000_82571) { 1766 if (hw->mac_type >= e1000_82571) {
1641 /* Reset delay timers after every interrupt */
1642 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 1767 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1768 /* Reset delay timers after every interrupt */
1643 ctrl_ext |= E1000_CTRL_EXT_CANC; 1769 ctrl_ext |= E1000_CTRL_EXT_CANC;
1770#ifdef CONFIG_E1000_NAPI
1771 /* Auto-Mask interrupts upon ICR read. */
1772 ctrl_ext |= E1000_CTRL_EXT_IAME;
1773#endif
1644 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 1774 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1775 E1000_WRITE_REG(hw, IAM, ~0);
1645 E1000_WRITE_FLUSH(hw); 1776 E1000_WRITE_FLUSH(hw);
1646 } 1777 }
1647 1778
1648 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1779 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1649 * the Base and Length of the Rx Descriptor Ring */ 1780 * the Base and Length of the Rx Descriptor Ring */
1650 switch (adapter->num_queues) { 1781 switch (adapter->num_rx_queues) {
1651#ifdef CONFIG_E1000_MQ 1782#ifdef CONFIG_E1000_MQ
1652 case 2: 1783 case 2:
1653 rdba = adapter->rx_ring[1].dma; 1784 rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1805,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1674 } 1805 }
1675 1806
1676#ifdef CONFIG_E1000_MQ 1807#ifdef CONFIG_E1000_MQ
1677 if (adapter->num_queues > 1) { 1808 if (adapter->num_rx_queues > 1) {
1678 uint32_t random[10]; 1809 uint32_t random[10];
1679 1810
1680 get_random_bytes(&random[0], 40); 1811 get_random_bytes(&random[0], 40);
@@ -1684,7 +1815,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1684 E1000_WRITE_REG(hw, RSSIM, 0); 1815 E1000_WRITE_REG(hw, RSSIM, 0);
1685 } 1816 }
1686 1817
1687 switch (adapter->num_queues) { 1818 switch (adapter->num_rx_queues) {
1688 case 2: 1819 case 2:
1689 default: 1820 default:
1690 reta = 0x00800080; 1821 reta = 0x00800080;
@@ -1776,7 +1907,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1776{ 1907{
1777 int i; 1908 int i;
1778 1909
1779 for (i = 0; i < adapter->num_queues; i++) 1910 for (i = 0; i < adapter->num_tx_queues; i++)
1780 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1911 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1781} 1912}
1782 1913
@@ -1789,12 +1920,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1789 buffer_info->dma, 1920 buffer_info->dma,
1790 buffer_info->length, 1921 buffer_info->length,
1791 PCI_DMA_TODEVICE); 1922 PCI_DMA_TODEVICE);
1792 buffer_info->dma = 0;
1793 } 1923 }
1794 if(buffer_info->skb) { 1924 if (buffer_info->skb)
1795 dev_kfree_skb_any(buffer_info->skb); 1925 dev_kfree_skb_any(buffer_info->skb);
1796 buffer_info->skb = NULL; 1926 memset(buffer_info, 0, sizeof(struct e1000_buffer));
1797 }
1798} 1927}
1799 1928
1800/** 1929/**
@@ -1843,7 +1972,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1843{ 1972{
1844 int i; 1973 int i;
1845 1974
1846 for (i = 0; i < adapter->num_queues; i++) 1975 for (i = 0; i < adapter->num_tx_queues; i++)
1847 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1976 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1848} 1977}
1849 1978
@@ -1887,7 +2016,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1887{ 2016{
1888 int i; 2017 int i;
1889 2018
1890 for (i = 0; i < adapter->num_queues; i++) 2019 for (i = 0; i < adapter->num_rx_queues; i++)
1891 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2020 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1892} 2021}
1893 2022
@@ -1913,8 +2042,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1913 for(i = 0; i < rx_ring->count; i++) { 2042 for(i = 0; i < rx_ring->count; i++) {
1914 buffer_info = &rx_ring->buffer_info[i]; 2043 buffer_info = &rx_ring->buffer_info[i];
1915 if(buffer_info->skb) { 2044 if(buffer_info->skb) {
1916 ps_page = &rx_ring->ps_page[i];
1917 ps_page_dma = &rx_ring->ps_page_dma[i];
1918 pci_unmap_single(pdev, 2045 pci_unmap_single(pdev,
1919 buffer_info->dma, 2046 buffer_info->dma,
1920 buffer_info->length, 2047 buffer_info->length,
@@ -1922,19 +2049,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
1922 2049
1923 dev_kfree_skb(buffer_info->skb); 2050 dev_kfree_skb(buffer_info->skb);
1924 buffer_info->skb = NULL; 2051 buffer_info->skb = NULL;
1925 2052 }
1926 for(j = 0; j < adapter->rx_ps_pages; j++) { 2053 ps_page = &rx_ring->ps_page[i];
1927 if(!ps_page->ps_page[j]) break; 2054 ps_page_dma = &rx_ring->ps_page_dma[i];
1928 pci_unmap_single(pdev, 2055 for (j = 0; j < adapter->rx_ps_pages; j++) {
1929 ps_page_dma->ps_page_dma[j], 2056 if (!ps_page->ps_page[j]) break;
1930 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2057 pci_unmap_page(pdev,
1931 ps_page_dma->ps_page_dma[j] = 0; 2058 ps_page_dma->ps_page_dma[j],
1932 put_page(ps_page->ps_page[j]); 2059 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1933 ps_page->ps_page[j] = NULL; 2060 ps_page_dma->ps_page_dma[j] = 0;
1934 } 2061 put_page(ps_page->ps_page[j]);
2062 ps_page->ps_page[j] = NULL;
1935 } 2063 }
1936 } 2064 }
1937 2065
2066 /* there also may be some cached data in our adapter */
2067 if (rx_ring->rx_skb_top) {
2068 dev_kfree_skb(rx_ring->rx_skb_top);
2069
2070 /* rx_skb_prev will be wiped out by rx_skb_top */
2071 rx_ring->rx_skb_top = NULL;
2072 rx_ring->rx_skb_prev = NULL;
2073 }
2074
2075
1938 size = sizeof(struct e1000_buffer) * rx_ring->count; 2076 size = sizeof(struct e1000_buffer) * rx_ring->count;
1939 memset(rx_ring->buffer_info, 0, size); 2077 memset(rx_ring->buffer_info, 0, size);
1940 size = sizeof(struct e1000_ps_page) * rx_ring->count; 2078 size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2101,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1963{ 2101{
1964 int i; 2102 int i;
1965 2103
1966 for (i = 0; i < adapter->num_queues; i++) 2104 for (i = 0; i < adapter->num_rx_queues; i++)
1967 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2105 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1968} 2106}
1969 2107
@@ -2005,7 +2143,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
2005 2143
2006 if(netif_running(netdev)) { 2144 if(netif_running(netdev)) {
2007 e1000_configure_rx(adapter); 2145 e1000_configure_rx(adapter);
2008 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); 2146 /* No need to loop, because 82542 supports only 1 queue */
2147 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2148 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2009 } 2149 }
2010} 2150}
2011 2151
@@ -2204,7 +2344,7 @@ static void
2204e1000_watchdog_task(struct e1000_adapter *adapter) 2344e1000_watchdog_task(struct e1000_adapter *adapter)
2205{ 2345{
2206 struct net_device *netdev = adapter->netdev; 2346 struct net_device *netdev = adapter->netdev;
2207 struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; 2347 struct e1000_tx_ring *txdr = adapter->tx_ring;
2208 uint32_t link; 2348 uint32_t link;
2209 2349
2210 e1000_check_for_link(&adapter->hw); 2350 e1000_check_for_link(&adapter->hw);
@@ -2231,6 +2371,21 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2231 adapter->link_duplex == FULL_DUPLEX ? 2371 adapter->link_duplex == FULL_DUPLEX ?
2232 "Full Duplex" : "Half Duplex"); 2372 "Full Duplex" : "Half Duplex");
2233 2373
2374 /* tweak tx_queue_len according to speed/duplex */
2375 netdev->tx_queue_len = adapter->tx_queue_len;
2376 adapter->tx_timeout_factor = 1;
2377 if (adapter->link_duplex == HALF_DUPLEX) {
2378 switch (adapter->link_speed) {
2379 case SPEED_10:
2380 netdev->tx_queue_len = 10;
2381 adapter->tx_timeout_factor = 8;
2382 break;
2383 case SPEED_100:
2384 netdev->tx_queue_len = 100;
2385 break;
2386 }
2387 }
2388
2234 netif_carrier_on(netdev); 2389 netif_carrier_on(netdev);
2235 netif_wake_queue(netdev); 2390 netif_wake_queue(netdev);
2236 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2391 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
@@ -2263,7 +2418,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
2263 2418
2264 e1000_update_adaptive(&adapter->hw); 2419 e1000_update_adaptive(&adapter->hw);
2265 2420
2266 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { 2421#ifdef CONFIG_E1000_MQ
2422 txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2423#endif
2424 if (!netif_carrier_ok(netdev)) {
2267 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2425 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2268 /* We've lost link, so the controller stops DMA, 2426 /* We've lost link, so the controller stops DMA,
2269 * but we've got queued Tx work that's never going 2427 * but we've got queued Tx work that's never going
@@ -2314,6 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2314{ 2472{
2315#ifdef NETIF_F_TSO 2473#ifdef NETIF_F_TSO
2316 struct e1000_context_desc *context_desc; 2474 struct e1000_context_desc *context_desc;
2475 struct e1000_buffer *buffer_info;
2317 unsigned int i; 2476 unsigned int i;
2318 uint32_t cmd_length = 0; 2477 uint32_t cmd_length = 0;
2319 uint16_t ipcse = 0, tucse, mss; 2478 uint16_t ipcse = 0, tucse, mss;
@@ -2363,6 +2522,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2363 2522
2364 i = tx_ring->next_to_use; 2523 i = tx_ring->next_to_use;
2365 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2524 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2525 buffer_info = &tx_ring->buffer_info[i];
2366 2526
2367 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2527 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2368 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2528 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -2374,14 +2534,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2374 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2534 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2375 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2535 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2376 2536
2537 buffer_info->time_stamp = jiffies;
2538
2377 if (++i == tx_ring->count) i = 0; 2539 if (++i == tx_ring->count) i = 0;
2378 tx_ring->next_to_use = i; 2540 tx_ring->next_to_use = i;
2379 2541
2380 return 1; 2542 return TRUE;
2381 } 2543 }
2382#endif 2544#endif
2383 2545
2384 return 0; 2546 return FALSE;
2385} 2547}
2386 2548
2387static inline boolean_t 2549static inline boolean_t
@@ -2389,6 +2551,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2389 struct sk_buff *skb) 2551 struct sk_buff *skb)
2390{ 2552{
2391 struct e1000_context_desc *context_desc; 2553 struct e1000_context_desc *context_desc;
2554 struct e1000_buffer *buffer_info;
2392 unsigned int i; 2555 unsigned int i;
2393 uint8_t css; 2556 uint8_t css;
2394 2557
@@ -2396,6 +2559,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2396 css = skb->h.raw - skb->data; 2559 css = skb->h.raw - skb->data;
2397 2560
2398 i = tx_ring->next_to_use; 2561 i = tx_ring->next_to_use;
2562 buffer_info = &tx_ring->buffer_info[i];
2399 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2563 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2400 2564
2401 context_desc->upper_setup.tcp_fields.tucss = css; 2565 context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2568,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2404 context_desc->tcp_seg_setup.data = 0; 2568 context_desc->tcp_seg_setup.data = 0;
2405 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2569 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2406 2570
2571 buffer_info->time_stamp = jiffies;
2572
2407 if (unlikely(++i == tx_ring->count)) i = 0; 2573 if (unlikely(++i == tx_ring->count)) i = 0;
2408 tx_ring->next_to_use = i; 2574 tx_ring->next_to_use = i;
2409 2575
@@ -2688,11 +2854,30 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2688 * overrun the FIFO, adjust the max buffer len if mss 2854 * overrun the FIFO, adjust the max buffer len if mss
2689 * drops. */ 2855 * drops. */
2690 if(mss) { 2856 if(mss) {
2857 uint8_t hdr_len;
2691 max_per_txd = min(mss << 2, max_per_txd); 2858 max_per_txd = min(mss << 2, max_per_txd);
2692 max_txd_pwr = fls(max_per_txd) - 1; 2859 max_txd_pwr = fls(max_per_txd) - 1;
2860
2861 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2862 * points to just header, pull a few bytes of payload from
2863 * frags into skb->data */
2864 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2865 if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
2866 (adapter->hw.mac_type == e1000_82571 ||
2867 adapter->hw.mac_type == e1000_82572)) {
2868 unsigned int pull_size;
2869 pull_size = min((unsigned int)4, skb->data_len);
2870 if (!__pskb_pull_tail(skb, pull_size)) {
2871 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2872 dev_kfree_skb_any(skb);
2873 return -EFAULT;
2874 }
2875 len = skb->len - skb->data_len;
2876 }
2693 } 2877 }
2694 2878
2695 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 2879 if((mss) || (skb->ip_summed == CHECKSUM_HW))
2880 /* reserve a descriptor for the offload context */
2696 count++; 2881 count++;
2697 count++; 2882 count++;
2698#else 2883#else
@@ -2726,27 +2911,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2726 if(adapter->pcix_82544) 2911 if(adapter->pcix_82544)
2727 count += nr_frags; 2912 count += nr_frags;
2728 2913
2729#ifdef NETIF_F_TSO
2730 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2731 * points to just header, pull a few bytes of payload from
2732 * frags into skb->data */
2733 if (skb_shinfo(skb)->tso_size) {
2734 uint8_t hdr_len;
2735 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2736 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
2737 (adapter->hw.mac_type == e1000_82571 ||
2738 adapter->hw.mac_type == e1000_82572)) {
2739 unsigned int pull_size;
2740 pull_size = min((unsigned int)4, skb->data_len);
2741 if (!__pskb_pull_tail(skb, pull_size)) {
2742 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2743 dev_kfree_skb_any(skb);
2744 return -EFAULT;
2745 }
2746 }
2747 }
2748#endif
2749
2750 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2914 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2751 e1000_transfer_dhcp_info(adapter, skb); 2915 e1000_transfer_dhcp_info(adapter, skb);
2752 2916
@@ -2833,6 +2997,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
2833{ 2997{
2834 struct e1000_adapter *adapter = netdev_priv(netdev); 2998 struct e1000_adapter *adapter = netdev_priv(netdev);
2835 2999
3000 adapter->tx_timeout_count++;
2836 e1000_down(adapter); 3001 e1000_down(adapter);
2837 e1000_up(adapter); 3002 e1000_up(adapter);
2838} 3003}
@@ -2850,7 +3015,7 @@ e1000_get_stats(struct net_device *netdev)
2850{ 3015{
2851 struct e1000_adapter *adapter = netdev_priv(netdev); 3016 struct e1000_adapter *adapter = netdev_priv(netdev);
2852 3017
2853 e1000_update_stats(adapter); 3018 /* only return the current stats */
2854 return &adapter->net_stats; 3019 return &adapter->net_stats;
2855} 3020}
2856 3021
@@ -2871,50 +3036,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2871 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3036 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2872 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3037 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2873 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3038 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2874 return -EINVAL;
2875 }
2876
2877#define MAX_STD_JUMBO_FRAME_SIZE 9234
2878 /* might want this to be bigger enum check... */
2879 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2880 if ((adapter->hw.mac_type == e1000_82571 ||
2881 adapter->hw.mac_type == e1000_82572) &&
2882 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2883 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2884 "on 82571 and 82572 controllers.\n");
2885 return -EINVAL; 3039 return -EINVAL;
2886 } 3040 }
2887 3041
2888 if(adapter->hw.mac_type == e1000_82573 && 3042 /* Adapter-specific max frame size limits. */
2889 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 3043 switch (adapter->hw.mac_type) {
2890 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3044 case e1000_82542_rev2_0:
2891 "on 82573\n"); 3045 case e1000_82542_rev2_1:
2892 return -EINVAL; 3046 case e1000_82573:
2893 } 3047 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2894 3048 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2895 if(adapter->hw.mac_type > e1000_82547_rev_2) { 3049 return -EINVAL;
2896 adapter->rx_buffer_len = max_frame; 3050 }
2897 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 3051 break;
2898 } else { 3052 case e1000_82571:
2899 if(unlikely((adapter->hw.mac_type < e1000_82543) && 3053 case e1000_82572:
2900 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 3054#define MAX_STD_JUMBO_FRAME_SIZE 9234
2901 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 3055 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2902 "on 82542\n"); 3056 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
2903 return -EINVAL; 3057 return -EINVAL;
2904
2905 } else {
2906 if(max_frame <= E1000_RXBUFFER_2048) {
2907 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2908 } else if(max_frame <= E1000_RXBUFFER_4096) {
2909 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2910 } else if(max_frame <= E1000_RXBUFFER_8192) {
2911 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2912 } else if(max_frame <= E1000_RXBUFFER_16384) {
2913 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2914 }
2915 } 3058 }
3059 break;
3060 default:
3061 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3062 break;
2916 } 3063 }
2917 3064
3065 /* since the driver code now supports splitting a packet across
3066 * multiple descriptors, most of the fifo related limitations on
3067 * jumbo frame traffic have gone away.
3068 * simply use 2k descriptors for everything.
3069 *
3070 * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3071 * means we reserve 2 more, this pushes us to allocate from the next
3072 * larger slab size
3073 * i.e. RXBUFFER_2048 --> size-4096 slab */
3074
3075 /* recent hardware supports 1KB granularity */
3076 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3077 adapter->rx_buffer_len =
3078 ((max_frame < E1000_RXBUFFER_2048) ?
3079 max_frame : E1000_RXBUFFER_2048);
3080 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3081 } else
3082 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3083
2918 netdev->mtu = new_mtu; 3084 netdev->mtu = new_mtu;
2919 3085
2920 if(netif_running(netdev)) { 3086 if(netif_running(netdev)) {
@@ -3037,12 +3203,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
3037 3203
3038 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3204 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3039 adapter->stats.crcerrs + adapter->stats.algnerrc + 3205 adapter->stats.crcerrs + adapter->stats.algnerrc +
3040 adapter->stats.rlec + adapter->stats.mpc + 3206 adapter->stats.rlec + adapter->stats.cexterr;
3041 adapter->stats.cexterr; 3207 adapter->net_stats.rx_dropped = 0;
3042 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3208 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3043 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3209 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3044 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3210 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3045 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
3046 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3211 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3047 3212
3048 /* Tx Errors */ 3213 /* Tx Errors */
@@ -3110,12 +3275,24 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3110 struct e1000_adapter *adapter = netdev_priv(netdev); 3275 struct e1000_adapter *adapter = netdev_priv(netdev);
3111 struct e1000_hw *hw = &adapter->hw; 3276 struct e1000_hw *hw = &adapter->hw;
3112 uint32_t icr = E1000_READ_REG(hw, ICR); 3277 uint32_t icr = E1000_READ_REG(hw, ICR);
3113#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) 3278#ifndef CONFIG_E1000_NAPI
3114 int i; 3279 int i;
3280#else
3281 /* Interrupt Auto-Mask...upon reading ICR,
3282 * interrupts are masked. No need for the
3283 * IMC write, but it does mean we should
3284 * account for it ASAP. */
3285 if (likely(hw->mac_type >= e1000_82571))
3286 atomic_inc(&adapter->irq_sem);
3115#endif 3287#endif
3116 3288
3117 if(unlikely(!icr)) 3289 if (unlikely(!icr)) {
3290#ifdef CONFIG_E1000_NAPI
3291 if (hw->mac_type >= e1000_82571)
3292 e1000_irq_enable(adapter);
3293#endif
3118 return IRQ_NONE; /* Not our interrupt */ 3294 return IRQ_NONE; /* Not our interrupt */
3295 }
3119 3296
3120 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3297 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3121 hw->get_link_status = 1; 3298 hw->get_link_status = 1;
@@ -3123,19 +3300,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
3123 } 3300 }
3124 3301
3125#ifdef CONFIG_E1000_NAPI 3302#ifdef CONFIG_E1000_NAPI
3126 atomic_inc(&adapter->irq_sem); 3303 if (unlikely(hw->mac_type < e1000_82571)) {
3127 E1000_WRITE_REG(hw, IMC, ~0); 3304 atomic_inc(&adapter->irq_sem);
3128 E1000_WRITE_FLUSH(hw); 3305 E1000_WRITE_REG(hw, IMC, ~0);
3306 E1000_WRITE_FLUSH(hw);
3307 }
3129#ifdef CONFIG_E1000_MQ 3308#ifdef CONFIG_E1000_MQ
3130 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { 3309 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
3131 cpu_set(adapter->cpu_for_queue[0], 3310 /* We must setup the cpumask once count == 0 since
3132 adapter->rx_sched_call_data.cpumask); 3311 * each cpu bit is cleared when the work is done. */
3133 for (i = 1; i < adapter->num_queues; i++) { 3312 adapter->rx_sched_call_data.cpumask = adapter->cpumask;
3134 cpu_set(adapter->cpu_for_queue[i], 3313 atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
3135 adapter->rx_sched_call_data.cpumask); 3314 atomic_set(&adapter->rx_sched_call_data.count,
3136 atomic_inc(&adapter->irq_sem); 3315 adapter->num_rx_queues);
3137 }
3138 atomic_set(&adapter->rx_sched_call_data.count, i);
3139 smp_call_async_mask(&adapter->rx_sched_call_data); 3316 smp_call_async_mask(&adapter->rx_sched_call_data);
3140 } else { 3317 } else {
3141 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); 3318 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3187,7 +3364,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3187{ 3364{
3188 struct e1000_adapter *adapter; 3365 struct e1000_adapter *adapter;
3189 int work_to_do = min(*budget, poll_dev->quota); 3366 int work_to_do = min(*budget, poll_dev->quota);
3190 int tx_cleaned, i = 0, work_done = 0; 3367 int tx_cleaned = 0, i = 0, work_done = 0;
3191 3368
3192 /* Must NOT use netdev_priv macro here. */ 3369 /* Must NOT use netdev_priv macro here. */
3193 adapter = poll_dev->priv; 3370 adapter = poll_dev->priv;
@@ -3198,11 +3375,23 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3198 3375
3199 while (poll_dev != &adapter->polling_netdev[i]) { 3376 while (poll_dev != &adapter->polling_netdev[i]) {
3200 i++; 3377 i++;
3201 if (unlikely(i == adapter->num_queues)) 3378 if (unlikely(i == adapter->num_rx_queues))
3202 BUG(); 3379 BUG();
3203 } 3380 }
3204 3381
3205 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); 3382 if (likely(adapter->num_tx_queues == 1)) {
3383 /* e1000_clean is called per-cpu. This lock protects
3384 * tx_ring[0] from being cleaned by multiple cpus
3385 * simultaneously. A failure obtaining the lock means
3386 * tx_ring[0] is currently being cleaned anyway. */
3387 if (spin_trylock(&adapter->tx_queue_lock)) {
3388 tx_cleaned = e1000_clean_tx_irq(adapter,
3389 &adapter->tx_ring[0]);
3390 spin_unlock(&adapter->tx_queue_lock);
3391 }
3392 } else
3393 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3394
3206 adapter->clean_rx(adapter, &adapter->rx_ring[i], 3395 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3207 &work_done, work_to_do); 3396 &work_done, work_to_do);
3208 3397
@@ -3247,17 +3436,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3247 buffer_info = &tx_ring->buffer_info[i]; 3436 buffer_info = &tx_ring->buffer_info[i];
3248 cleaned = (i == eop); 3437 cleaned = (i == eop);
3249 3438
3439#ifdef CONFIG_E1000_MQ
3440 tx_ring->tx_stats.bytes += buffer_info->length;
3441#endif
3250 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3442 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3251 3443 memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
3252 tx_desc->buffer_addr = 0;
3253 tx_desc->lower.data = 0;
3254 tx_desc->upper.data = 0;
3255 3444
3256 if(unlikely(++i == tx_ring->count)) i = 0; 3445 if(unlikely(++i == tx_ring->count)) i = 0;
3257 } 3446 }
3258 3447
3259 tx_ring->pkt++; 3448#ifdef CONFIG_E1000_MQ
3260 3449 tx_ring->tx_stats.packets++;
3450#endif
3451
3261 eop = tx_ring->buffer_info[i].next_to_watch; 3452 eop = tx_ring->buffer_info[i].next_to_watch;
3262 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3453 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3263 } 3454 }
@@ -3276,32 +3467,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3276 /* Detect a transmit hang in hardware, this serializes the 3467 /* Detect a transmit hang in hardware, this serializes the
3277 * check with the clearing of time_stamp and movement of i */ 3468 * check with the clearing of time_stamp and movement of i */
3278 adapter->detect_tx_hung = FALSE; 3469 adapter->detect_tx_hung = FALSE;
3279 if (tx_ring->buffer_info[i].dma && 3470 if (tx_ring->buffer_info[eop].dma &&
3280 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) 3471 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3472 adapter->tx_timeout_factor * HZ)
3281 && !(E1000_READ_REG(&adapter->hw, STATUS) & 3473 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3282 E1000_STATUS_TXOFF)) { 3474 E1000_STATUS_TXOFF)) {
3283 3475
3284 /* detected Tx unit hang */ 3476 /* detected Tx unit hang */
3285 i = tx_ring->next_to_clean;
3286 eop = tx_ring->buffer_info[i].next_to_watch;
3287 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3288 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3477 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3478 " Tx Queue <%lu>\n"
3289 " TDH <%x>\n" 3479 " TDH <%x>\n"
3290 " TDT <%x>\n" 3480 " TDT <%x>\n"
3291 " next_to_use <%x>\n" 3481 " next_to_use <%x>\n"
3292 " next_to_clean <%x>\n" 3482 " next_to_clean <%x>\n"
3293 "buffer_info[next_to_clean]\n" 3483 "buffer_info[next_to_clean]\n"
3294 " dma <%llx>\n"
3295 " time_stamp <%lx>\n" 3484 " time_stamp <%lx>\n"
3296 " next_to_watch <%x>\n" 3485 " next_to_watch <%x>\n"
3297 " jiffies <%lx>\n" 3486 " jiffies <%lx>\n"
3298 " next_to_watch.status <%x>\n", 3487 " next_to_watch.status <%x>\n",
3488 (unsigned long)((tx_ring - adapter->tx_ring) /
3489 sizeof(struct e1000_tx_ring)),
3299 readl(adapter->hw.hw_addr + tx_ring->tdh), 3490 readl(adapter->hw.hw_addr + tx_ring->tdh),
3300 readl(adapter->hw.hw_addr + tx_ring->tdt), 3491 readl(adapter->hw.hw_addr + tx_ring->tdt),
3301 tx_ring->next_to_use, 3492 tx_ring->next_to_use,
3302 i, 3493 tx_ring->next_to_clean,
3303 (unsigned long long)tx_ring->buffer_info[i].dma, 3494 tx_ring->buffer_info[eop].time_stamp,
3304 tx_ring->buffer_info[i].time_stamp,
3305 eop, 3495 eop,
3306 jiffies, 3496 jiffies,
3307 eop_desc->upper.fields.status); 3497 eop_desc->upper.fields.status);
@@ -3386,20 +3576,23 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3386 uint32_t length; 3576 uint32_t length;
3387 uint8_t last_byte; 3577 uint8_t last_byte;
3388 unsigned int i; 3578 unsigned int i;
3389 boolean_t cleaned = FALSE; 3579 int cleaned_count = 0;
3580 boolean_t cleaned = FALSE, multi_descriptor = FALSE;
3390 3581
3391 i = rx_ring->next_to_clean; 3582 i = rx_ring->next_to_clean;
3392 rx_desc = E1000_RX_DESC(*rx_ring, i); 3583 rx_desc = E1000_RX_DESC(*rx_ring, i);
3393 3584
3394 while(rx_desc->status & E1000_RXD_STAT_DD) { 3585 while(rx_desc->status & E1000_RXD_STAT_DD) {
3395 buffer_info = &rx_ring->buffer_info[i]; 3586 buffer_info = &rx_ring->buffer_info[i];
3587 u8 status;
3396#ifdef CONFIG_E1000_NAPI 3588#ifdef CONFIG_E1000_NAPI
3397 if(*work_done >= work_to_do) 3589 if(*work_done >= work_to_do)
3398 break; 3590 break;
3399 (*work_done)++; 3591 (*work_done)++;
3400#endif 3592#endif
3593 status = rx_desc->status;
3401 cleaned = TRUE; 3594 cleaned = TRUE;
3402 3595 cleaned_count++;
3403 pci_unmap_single(pdev, 3596 pci_unmap_single(pdev,
3404 buffer_info->dma, 3597 buffer_info->dma,
3405 buffer_info->length, 3598 buffer_info->length,
@@ -3433,18 +3626,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3433 } 3626 }
3434 } 3627 }
3435 3628
3436 /* Good Receive */ 3629 /* code added for copybreak, this should improve
3437 skb_put(skb, length - ETHERNET_FCS_SIZE); 3630 * performance for small packets with large amounts
3631 * of reassembly being done in the stack */
3632#define E1000_CB_LENGTH 256
3633 if ((length < E1000_CB_LENGTH) &&
3634 !rx_ring->rx_skb_top &&
3635 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3636 !multi_descriptor) {
3637 struct sk_buff *new_skb =
3638 dev_alloc_skb(length + NET_IP_ALIGN);
3639 if (new_skb) {
3640 skb_reserve(new_skb, NET_IP_ALIGN);
3641 new_skb->dev = netdev;
3642 memcpy(new_skb->data - NET_IP_ALIGN,
3643 skb->data - NET_IP_ALIGN,
3644 length + NET_IP_ALIGN);
3645 /* save the skb in buffer_info as good */
3646 buffer_info->skb = skb;
3647 skb = new_skb;
3648 skb_put(skb, length);
3649 }
3650 }
3651
3652 /* end copybreak code */
3438 3653
3439 /* Receive Checksum Offload */ 3654 /* Receive Checksum Offload */
3440 e1000_rx_checksum(adapter, 3655 e1000_rx_checksum(adapter,
3441 (uint32_t)(rx_desc->status) | 3656 (uint32_t)(status) |
3442 ((uint32_t)(rx_desc->errors) << 24), 3657 ((uint32_t)(rx_desc->errors) << 24),
3443 rx_desc->csum, skb); 3658 rx_desc->csum, skb);
3444 skb->protocol = eth_type_trans(skb, netdev); 3659 skb->protocol = eth_type_trans(skb, netdev);
3445#ifdef CONFIG_E1000_NAPI 3660#ifdef CONFIG_E1000_NAPI
3446 if(unlikely(adapter->vlgrp && 3661 if(unlikely(adapter->vlgrp &&
3447 (rx_desc->status & E1000_RXD_STAT_VP))) { 3662 (status & E1000_RXD_STAT_VP))) {
3448 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3663 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3449 le16_to_cpu(rx_desc->special) & 3664 le16_to_cpu(rx_desc->special) &
3450 E1000_RXD_SPC_VLAN_MASK); 3665 E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3677,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3462 } 3677 }
3463#endif /* CONFIG_E1000_NAPI */ 3678#endif /* CONFIG_E1000_NAPI */
3464 netdev->last_rx = jiffies; 3679 netdev->last_rx = jiffies;
3465 rx_ring->pkt++; 3680#ifdef CONFIG_E1000_MQ
3681 rx_ring->rx_stats.packets++;
3682 rx_ring->rx_stats.bytes += length;
3683#endif
3466 3684
3467next_desc: 3685next_desc:
3468 rx_desc->status = 0; 3686 rx_desc->status = 0;
3469 buffer_info->skb = NULL;
3470 if(unlikely(++i == rx_ring->count)) i = 0;
3471 3687
3472 rx_desc = E1000_RX_DESC(*rx_ring, i); 3688 /* return some buffers to hardware, one at a time is too slow */
3689 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3690 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3691 cleaned_count = 0;
3692 }
3693
3473 } 3694 }
3474 rx_ring->next_to_clean = i; 3695 rx_ring->next_to_clean = i;
3475 adapter->alloc_rx_buf(adapter, rx_ring); 3696
3697 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3698 if (cleaned_count)
3699 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3476 3700
3477 return cleaned; 3701 return cleaned;
3478} 3702}
@@ -3501,6 +3725,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3501 struct sk_buff *skb; 3725 struct sk_buff *skb;
3502 unsigned int i, j; 3726 unsigned int i, j;
3503 uint32_t length, staterr; 3727 uint32_t length, staterr;
3728 int cleaned_count = 0;
3504 boolean_t cleaned = FALSE; 3729 boolean_t cleaned = FALSE;
3505 3730
3506 i = rx_ring->next_to_clean; 3731 i = rx_ring->next_to_clean;
@@ -3517,6 +3742,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3517 (*work_done)++; 3742 (*work_done)++;
3518#endif 3743#endif
3519 cleaned = TRUE; 3744 cleaned = TRUE;
3745 cleaned_count++;
3520 pci_unmap_single(pdev, buffer_info->dma, 3746 pci_unmap_single(pdev, buffer_info->dma,
3521 buffer_info->length, 3747 buffer_info->length,
3522 PCI_DMA_FROMDEVICE); 3748 PCI_DMA_FROMDEVICE);
@@ -3593,18 +3819,28 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3593 } 3819 }
3594#endif /* CONFIG_E1000_NAPI */ 3820#endif /* CONFIG_E1000_NAPI */
3595 netdev->last_rx = jiffies; 3821 netdev->last_rx = jiffies;
3596 rx_ring->pkt++; 3822#ifdef CONFIG_E1000_MQ
3823 rx_ring->rx_stats.packets++;
3824 rx_ring->rx_stats.bytes += length;
3825#endif
3597 3826
3598next_desc: 3827next_desc:
3599 rx_desc->wb.middle.status_error &= ~0xFF; 3828 rx_desc->wb.middle.status_error &= ~0xFF;
3600 buffer_info->skb = NULL; 3829 buffer_info->skb = NULL;
3601 if(unlikely(++i == rx_ring->count)) i = 0;
3602 3830
3603 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3831 /* return some buffers to hardware, one at a time is too slow */
3832 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3833 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3834 cleaned_count = 0;
3835 }
3836
3604 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3837 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3605 } 3838 }
3606 rx_ring->next_to_clean = i; 3839 rx_ring->next_to_clean = i;
3607 adapter->alloc_rx_buf(adapter, rx_ring); 3840
3841 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3842 if (cleaned_count)
3843 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3608 3844
3609 return cleaned; 3845 return cleaned;
3610} 3846}
@@ -3616,7 +3852,8 @@ next_desc:
3616 3852
3617static void 3853static void
3618e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 3854e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3619 struct e1000_rx_ring *rx_ring) 3855 struct e1000_rx_ring *rx_ring,
3856 int cleaned_count)
3620{ 3857{
3621 struct net_device *netdev = adapter->netdev; 3858 struct net_device *netdev = adapter->netdev;
3622 struct pci_dev *pdev = adapter->pdev; 3859 struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3866,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3629 i = rx_ring->next_to_use; 3866 i = rx_ring->next_to_use;
3630 buffer_info = &rx_ring->buffer_info[i]; 3867 buffer_info = &rx_ring->buffer_info[i];
3631 3868
3632 while(!buffer_info->skb) { 3869 while (cleaned_count--) {
3633 skb = dev_alloc_skb(bufsz); 3870 if (!(skb = buffer_info->skb))
3871 skb = dev_alloc_skb(bufsz);
3872 else {
3873 skb_trim(skb, 0);
3874 goto map_skb;
3875 }
3876
3634 3877
3635 if(unlikely(!skb)) { 3878 if(unlikely(!skb)) {
3636 /* Better luck next round */ 3879 /* Better luck next round */
3880 adapter->alloc_rx_buff_failed++;
3637 break; 3881 break;
3638 } 3882 }
3639 3883
@@ -3670,6 +3914,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3670 3914
3671 buffer_info->skb = skb; 3915 buffer_info->skb = skb;
3672 buffer_info->length = adapter->rx_buffer_len; 3916 buffer_info->length = adapter->rx_buffer_len;
3917map_skb:
3673 buffer_info->dma = pci_map_single(pdev, 3918 buffer_info->dma = pci_map_single(pdev,
3674 skb->data, 3919 skb->data,
3675 adapter->rx_buffer_len, 3920 adapter->rx_buffer_len,
@@ -3718,7 +3963,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3718 3963
3719static void 3964static void
3720e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, 3965e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3721 struct e1000_rx_ring *rx_ring) 3966 struct e1000_rx_ring *rx_ring,
3967 int cleaned_count)
3722{ 3968{
3723 struct net_device *netdev = adapter->netdev; 3969 struct net_device *netdev = adapter->netdev;
3724 struct pci_dev *pdev = adapter->pdev; 3970 struct pci_dev *pdev = adapter->pdev;
@@ -3734,7 +3980,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3734 ps_page = &rx_ring->ps_page[i]; 3980 ps_page = &rx_ring->ps_page[i];
3735 ps_page_dma = &rx_ring->ps_page_dma[i]; 3981 ps_page_dma = &rx_ring->ps_page_dma[i];
3736 3982
3737 while(!buffer_info->skb) { 3983 while (cleaned_count--) {
3738 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3984 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3739 3985
3740 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3986 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
@@ -4106,8 +4352,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4106 4352
4107 if((adapter->hw.mng_cookie.status & 4353 if((adapter->hw.mng_cookie.status &
4108 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4109 (vid == adapter->mng_vlan_id)) 4355 (vid == adapter->mng_vlan_id)) {
4356 /* release control to f/w */
4357 e1000_release_hw_control(adapter);
4110 return; 4358 return;
4359 }
4360
4111 /* remove VID from filter table */ 4361 /* remove VID from filter table */
4112 index = (vid >> 5) & 0x7F; 4362 index = (vid >> 5) & 0x7F;
4113 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 4363 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4173,8 +4423,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4173{ 4423{
4174 struct net_device *netdev = pci_get_drvdata(pdev); 4424 struct net_device *netdev = pci_get_drvdata(pdev);
4175 struct e1000_adapter *adapter = netdev_priv(netdev); 4425 struct e1000_adapter *adapter = netdev_priv(netdev);
4176 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; 4426 uint32_t ctrl, ctrl_ext, rctl, manc, status;
4177 uint32_t wufc = adapter->wol; 4427 uint32_t wufc = adapter->wol;
4428 int retval = 0;
4178 4429
4179 netif_device_detach(netdev); 4430 netif_device_detach(netdev);
4180 4431
@@ -4220,13 +4471,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4220 4471
4221 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 4472 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
4222 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 4473 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
4223 pci_enable_wake(pdev, 3, 1); 4474 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4224 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4475 if (retval)
4476 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4477 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4478 if (retval)
4479 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4225 } else { 4480 } else {
4226 E1000_WRITE_REG(&adapter->hw, WUC, 0); 4481 E1000_WRITE_REG(&adapter->hw, WUC, 0);
4227 E1000_WRITE_REG(&adapter->hw, WUFC, 0); 4482 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
4228 pci_enable_wake(pdev, 3, 0); 4483 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4229 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ 4484 if (retval)
4485 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4486 retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
4487 if (retval)
4488 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4230 } 4489 }
4231 4490
4232 pci_save_state(pdev); 4491 pci_save_state(pdev);
@@ -4237,29 +4496,24 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4237 if(manc & E1000_MANC_SMBUS_EN) { 4496 if(manc & E1000_MANC_SMBUS_EN) {
4238 manc |= E1000_MANC_ARP_EN; 4497 manc |= E1000_MANC_ARP_EN;
4239 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4498 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4240 pci_enable_wake(pdev, 3, 1); 4499 retval = pci_enable_wake(pdev, PCI_D3hot, 1);
4241 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ 4500 if (retval)
4501 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4502 retval = pci_enable_wake(pdev, PCI_D3cold, 1);
4503 if (retval)
4504 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4242 } 4505 }
4243 } 4506 }
4244 4507
4245 switch(adapter->hw.mac_type) { 4508 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4246 case e1000_82571: 4509 * would have already happened in close and is redundant. */
4247 case e1000_82572: 4510 e1000_release_hw_control(adapter);
4248 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4249 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4250 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4251 break;
4252 case e1000_82573:
4253 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4254 E1000_WRITE_REG(&adapter->hw, SWSM,
4255 swsm & ~E1000_SWSM_DRV_LOAD);
4256 break;
4257 default:
4258 break;
4259 }
4260 4511
4261 pci_disable_device(pdev); 4512 pci_disable_device(pdev);
4262 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4513
4514 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
4515 if (retval)
4516 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4263 4517
4264 return 0; 4518 return 0;
4265} 4519}
@@ -4269,16 +4523,21 @@ e1000_resume(struct pci_dev *pdev)
4269{ 4523{
4270 struct net_device *netdev = pci_get_drvdata(pdev); 4524 struct net_device *netdev = pci_get_drvdata(pdev);
4271 struct e1000_adapter *adapter = netdev_priv(netdev); 4525 struct e1000_adapter *adapter = netdev_priv(netdev);
4272 uint32_t manc, ret_val, swsm; 4526 int retval;
4273 uint32_t ctrl_ext; 4527 uint32_t manc, ret_val;
4274 4528
4275 pci_set_power_state(pdev, PCI_D0); 4529 retval = pci_set_power_state(pdev, PCI_D0);
4276 pci_restore_state(pdev); 4530 if (retval)
4531 DPRINTK(PROBE, ERR, "Error in setting power state\n");
4277 ret_val = pci_enable_device(pdev); 4532 ret_val = pci_enable_device(pdev);
4278 pci_set_master(pdev); 4533 pci_set_master(pdev);
4279 4534
4280 pci_enable_wake(pdev, PCI_D3hot, 0); 4535 retval = pci_enable_wake(pdev, PCI_D3hot, 0);
4281 pci_enable_wake(pdev, PCI_D3cold, 0); 4536 if (retval)
4537 DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
4538 retval = pci_enable_wake(pdev, PCI_D3cold, 0);
4539 if (retval)
4540 DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
4282 4541
4283 e1000_reset(adapter); 4542 e1000_reset(adapter);
4284 E1000_WRITE_REG(&adapter->hw, WUS, ~0); 4543 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4295,21 +4554,13 @@ e1000_resume(struct pci_dev *pdev)
4295 E1000_WRITE_REG(&adapter->hw, MANC, manc); 4554 E1000_WRITE_REG(&adapter->hw, MANC, manc);
4296 } 4555 }
4297 4556
4298 switch(adapter->hw.mac_type) { 4557 /* If the controller is 82573 and f/w is AMT, do not set
4299 case e1000_82571: 4558 * DRV_LOAD until the interface is up. For all other cases,
4300 case e1000_82572: 4559 * let the f/w know that the h/w is now under the control
4301 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); 4560 * of the driver. */
4302 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, 4561 if (adapter->hw.mac_type != e1000_82573 ||
4303 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 4562 !e1000_check_mng_mode(&adapter->hw))
4304 break; 4563 e1000_get_hw_control(adapter);
4305 case e1000_82573:
4306 swsm = E1000_READ_REG(&adapter->hw, SWSM);
4307 E1000_WRITE_REG(&adapter->hw, SWSM,
4308 swsm | E1000_SWSM_DRV_LOAD);
4309 break;
4310 default:
4311 break;
4312 }
4313 4564
4314 return 0; 4565 return 0;
4315} 4566}
@@ -4327,6 +4578,9 @@ e1000_netpoll(struct net_device *netdev)
4327 disable_irq(adapter->pdev->irq); 4578 disable_irq(adapter->pdev->irq);
4328 e1000_intr(adapter->pdev->irq, netdev, NULL); 4579 e1000_intr(adapter->pdev->irq, netdev, NULL);
4329 e1000_clean_tx_irq(adapter, adapter->tx_ring); 4580 e1000_clean_tx_irq(adapter, adapter->tx_ring);
4581#ifndef CONFIG_E1000_NAPI
4582 adapter->clean_rx(adapter, adapter->rx_ring);
4583#endif
4330 enable_irq(adapter->pdev->irq); 4584 enable_irq(adapter->pdev->irq);
4331} 4585}
4332#endif 4586#endif
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index ccbbe5ad8e0f..0a7918c62557 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
177 * 177 *
178 * Valid Range: 100-100000 (0=off, 1=dynamic) 178 * Valid Range: 100-100000 (0=off, 1=dynamic)
179 * 179 *
180 * Default Value: 1 180 * Default Value: 8000
181 */ 181 */
182 182
183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); 183E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter)
320 } else { 320 } else {
321 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
322 } 322 }
323 for (i = 0; i < adapter->num_queues; i++) 323 for (i = 0; i < adapter->num_tx_queues; i++)
324 tx_ring[i].count = tx_ring->count; 324 tx_ring[i].count = tx_ring->count;
325 } 325 }
326 { /* Receive Descriptor Count */ 326 { /* Receive Descriptor Count */
@@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter)
346 } else { 346 } else {
347 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
348 } 348 }
349 for (i = 0; i < adapter->num_queues; i++) 349 for (i = 0; i < adapter->num_rx_queues; i++)
350 rx_ring[i].count = rx_ring->count; 350 rx_ring[i].count = rx_ring->count;
351 } 351 }
352 { /* Checksum Offload Enable/Disable */ 352 { /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
388 e1000_validate_option(&fc, &opt, adapter); 388 e1000_validate_option(&fc, &opt, adapter);
389 adapter->hw.fc = adapter->hw.original_fc = fc; 389 adapter->hw.fc = adapter->hw.original_fc = fc;
390 } else { 390 } else {
391 adapter->hw.fc = opt.def; 391 adapter->hw.fc = adapter->hw.original_fc = opt.def;
392 } 392 }
393 } 393 }
394 { /* Transmit Interrupt Delay */ 394 { /* Transmit Interrupt Delay */
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
584 .p = dplx_list }} 584 .p = dplx_list }}
585 }; 585 };
586 586
587 if (e1000_check_phy_reset_block(&adapter->hw)) {
588 DPRINTK(PROBE, INFO,
589 "Link active due to SoL/IDER Session. "
590 "Speed/Duplex/AutoNeg parameter ignored.\n");
591 return;
592 }
587 if (num_Duplex > bd) { 593 if (num_Duplex > bd) {
588 dplx = Duplex[bd]; 594 dplx = Duplex[bd];
589 e1000_validate_option(&dplx, &opt, adapter); 595 e1000_validate_option(&dplx, &opt, adapter);