aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c490
1 files changed, 386 insertions, 104 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 03294400bc90..73f3a85fd238 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -27,6 +27,7 @@
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include "e1000.h" 29#include "e1000.h"
30#include <net/ip6_checksum.h>
30 31
31char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
32static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -35,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
35#else 36#else
36#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
37#endif 38#endif
38#define DRV_VERSION "7.2.9-k4"DRIVERNAPI 39#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
39char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
40static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41 42
@@ -103,6 +104,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
103 INTEL_E1000_ETHERNET_DEVICE(0x10B9), 104 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
104 INTEL_E1000_ETHERNET_DEVICE(0x10BA), 105 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
105 INTEL_E1000_ETHERNET_DEVICE(0x10BB), 106 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
107 INTEL_E1000_ETHERNET_DEVICE(0x10BC),
108 INTEL_E1000_ETHERNET_DEVICE(0x10C4),
109 INTEL_E1000_ETHERNET_DEVICE(0x10C5),
106 /* required last entry */ 110 /* required last entry */
107 {0,} 111 {0,}
108}; 112};
@@ -154,6 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
154static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 158static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
155static int e1000_set_mac(struct net_device *netdev, void *p); 159static int e1000_set_mac(struct net_device *netdev, void *p);
156static irqreturn_t e1000_intr(int irq, void *data); 160static irqreturn_t e1000_intr(int irq, void *data);
161#ifdef CONFIG_PCI_MSI
162static irqreturn_t e1000_intr_msi(int irq, void *data);
163#endif
157static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, 164static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
158 struct e1000_tx_ring *tx_ring); 165 struct e1000_tx_ring *tx_ring);
159#ifdef CONFIG_E1000_NAPI 166#ifdef CONFIG_E1000_NAPI
@@ -285,7 +292,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
285 292
286 flags = IRQF_SHARED; 293 flags = IRQF_SHARED;
287#ifdef CONFIG_PCI_MSI 294#ifdef CONFIG_PCI_MSI
288 if (adapter->hw.mac_type > e1000_82547_rev_2) { 295 if (adapter->hw.mac_type >= e1000_82571) {
289 adapter->have_msi = TRUE; 296 adapter->have_msi = TRUE;
290 if ((err = pci_enable_msi(adapter->pdev))) { 297 if ((err = pci_enable_msi(adapter->pdev))) {
291 DPRINTK(PROBE, ERR, 298 DPRINTK(PROBE, ERR,
@@ -293,8 +300,14 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
293 adapter->have_msi = FALSE; 300 adapter->have_msi = FALSE;
294 } 301 }
295 } 302 }
296 if (adapter->have_msi) 303 if (adapter->have_msi) {
297 flags &= ~IRQF_SHARED; 304 flags &= ~IRQF_SHARED;
305 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
306 netdev->name, netdev);
307 if (err)
308 DPRINTK(PROBE, ERR,
309 "Unable to allocate interrupt Error: %d\n", err);
310 } else
298#endif 311#endif
299 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, 312 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
300 netdev->name, netdev))) 313 netdev->name, netdev)))
@@ -375,7 +388,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
375 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 388 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
376 * For ASF and Pass Through versions of f/w this means that the 389 * For ASF and Pass Through versions of f/w this means that the
377 * driver is no longer loaded. For AMT version (only with 82573) i 390 * driver is no longer loaded. For AMT version (only with 82573) i
378 * of the f/w this means that the netowrk i/f is closed. 391 * of the f/w this means that the network i/f is closed.
379 * 392 *
380 **/ 393 **/
381 394
@@ -416,7 +429,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
416 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 429 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
417 * For ASF and Pass Through versions of f/w this means that 430 * For ASF and Pass Through versions of f/w this means that
418 * the driver is loaded. For AMT version (only with 82573) 431 * the driver is loaded. For AMT version (only with 82573)
419 * of the f/w this means that the netowrk i/f is open. 432 * of the f/w this means that the network i/f is open.
420 * 433 *
421 **/ 434 **/
422 435
@@ -426,6 +439,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
426 uint32_t ctrl_ext; 439 uint32_t ctrl_ext;
427 uint32_t swsm; 440 uint32_t swsm;
428 uint32_t extcnf; 441 uint32_t extcnf;
442
429 /* Let firmware know the driver has taken over */ 443 /* Let firmware know the driver has taken over */
430 switch (adapter->hw.mac_type) { 444 switch (adapter->hw.mac_type) {
431 case e1000_82571: 445 case e1000_82571:
@@ -601,9 +615,6 @@ void
601e1000_reset(struct e1000_adapter *adapter) 615e1000_reset(struct e1000_adapter *adapter)
602{ 616{
603 uint32_t pba, manc; 617 uint32_t pba, manc;
604#ifdef DISABLE_MULR
605 uint32_t tctl;
606#endif
607 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 618 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
608 619
609 /* Repartition Pba for greater than 9k mtu 620 /* Repartition Pba for greater than 9k mtu
@@ -670,12 +681,7 @@ e1000_reset(struct e1000_adapter *adapter)
670 e1000_reset_hw(&adapter->hw); 681 e1000_reset_hw(&adapter->hw);
671 if (adapter->hw.mac_type >= e1000_82544) 682 if (adapter->hw.mac_type >= e1000_82544)
672 E1000_WRITE_REG(&adapter->hw, WUC, 0); 683 E1000_WRITE_REG(&adapter->hw, WUC, 0);
673#ifdef DISABLE_MULR
674 /* disable Multiple Reads in Transmit Control Register for debugging */
675 tctl = E1000_READ_REG(hw, TCTL);
676 E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR);
677 684
678#endif
679 if (e1000_init_hw(&adapter->hw)) 685 if (e1000_init_hw(&adapter->hw))
680 DPRINTK(PROBE, ERR, "Hardware Error\n"); 686 DPRINTK(PROBE, ERR, "Hardware Error\n");
681 e1000_update_mng_vlan(adapter); 687 e1000_update_mng_vlan(adapter);
@@ -851,9 +857,9 @@ e1000_probe(struct pci_dev *pdev,
851 (adapter->hw.mac_type != e1000_82547)) 857 (adapter->hw.mac_type != e1000_82547))
852 netdev->features |= NETIF_F_TSO; 858 netdev->features |= NETIF_F_TSO;
853 859
854#ifdef NETIF_F_TSO_IPV6 860#ifdef NETIF_F_TSO6
855 if (adapter->hw.mac_type > e1000_82547_rev_2) 861 if (adapter->hw.mac_type > e1000_82547_rev_2)
856 netdev->features |= NETIF_F_TSO_IPV6; 862 netdev->features |= NETIF_F_TSO6;
857#endif 863#endif
858#endif 864#endif
859 if (pci_using_dac) 865 if (pci_using_dac)
@@ -967,6 +973,7 @@ e1000_probe(struct pci_dev *pdev,
967 break; 973 break;
968 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 974 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
969 case E1000_DEV_ID_82571EB_QUAD_COPPER: 975 case E1000_DEV_ID_82571EB_QUAD_COPPER:
976 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
970 /* if quad port adapter, disable WoL on all but port A */ 977 /* if quad port adapter, disable WoL on all but port A */
971 if (global_quad_port_a != 0) 978 if (global_quad_port_a != 0)
972 adapter->eeprom_wol = 0; 979 adapter->eeprom_wol = 0;
@@ -1278,12 +1285,10 @@ e1000_open(struct net_device *netdev)
1278 return -EBUSY; 1285 return -EBUSY;
1279 1286
1280 /* allocate transmit descriptors */ 1287 /* allocate transmit descriptors */
1281
1282 if ((err = e1000_setup_all_tx_resources(adapter))) 1288 if ((err = e1000_setup_all_tx_resources(adapter)))
1283 goto err_setup_tx; 1289 goto err_setup_tx;
1284 1290
1285 /* allocate receive descriptors */ 1291 /* allocate receive descriptors */
1286
1287 if ((err = e1000_setup_all_rx_resources(adapter))) 1292 if ((err = e1000_setup_all_rx_resources(adapter)))
1288 goto err_setup_rx; 1293 goto err_setup_rx;
1289 1294
@@ -1568,6 +1573,8 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1568 1573
1569 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { 1574 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1570 tarc = E1000_READ_REG(hw, TARC0); 1575 tarc = E1000_READ_REG(hw, TARC0);
1576 /* set the speed mode bit, we'll clear it if we're not at
1577 * gigabit link later */
1571 tarc |= (1 << 21); 1578 tarc |= (1 << 21);
1572 E1000_WRITE_REG(hw, TARC0, tarc); 1579 E1000_WRITE_REG(hw, TARC0, tarc);
1573 } else if (hw->mac_type == e1000_80003es2lan) { 1580 } else if (hw->mac_type == e1000_80003es2lan) {
@@ -1582,8 +1589,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1582 e1000_config_collision_dist(hw); 1589 e1000_config_collision_dist(hw);
1583 1590
1584 /* Setup Transmit Descriptor Settings for eop descriptor */ 1591 /* Setup Transmit Descriptor Settings for eop descriptor */
1585 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | 1592 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1586 E1000_TXD_CMD_IFCS; 1593
1594 /* only set IDE if we are delaying interrupts using the timers */
1595 if (adapter->tx_int_delay)
1596 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1587 1597
1588 if (hw->mac_type < e1000_82543) 1598 if (hw->mac_type < e1000_82543)
1589 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1599 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
@@ -1820,8 +1830,11 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1820 /* Configure extra packet-split registers */ 1830 /* Configure extra packet-split registers */
1821 rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1831 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1822 rfctl |= E1000_RFCTL_EXTEN; 1832 rfctl |= E1000_RFCTL_EXTEN;
1823 /* disable IPv6 packet split support */ 1833 /* disable packet split support for IPv6 extension headers,
1824 rfctl |= E1000_RFCTL_IPV6_DIS; 1834 * because some malformed IPv6 headers can hang the RX */
1835 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1836 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1837
1825 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1838 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1826 1839
1827 rctl |= E1000_RCTL_DTYP_PS; 1840 rctl |= E1000_RCTL_DTYP_PS;
@@ -1884,7 +1897,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1884 1897
1885 if (hw->mac_type >= e1000_82540) { 1898 if (hw->mac_type >= e1000_82540) {
1886 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); 1899 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1887 if (adapter->itr > 1) 1900 if (adapter->itr_setting != 0)
1888 E1000_WRITE_REG(hw, ITR, 1901 E1000_WRITE_REG(hw, ITR,
1889 1000000000 / (adapter->itr * 256)); 1902 1000000000 / (adapter->itr * 256));
1890 } 1903 }
@@ -1894,11 +1907,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1894 /* Reset delay timers after every interrupt */ 1907 /* Reset delay timers after every interrupt */
1895 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; 1908 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
1896#ifdef CONFIG_E1000_NAPI 1909#ifdef CONFIG_E1000_NAPI
1897 /* Auto-Mask interrupts upon ICR read. */ 1910 /* Auto-Mask interrupts upon ICR access */
1898 ctrl_ext |= E1000_CTRL_EXT_IAME; 1911 ctrl_ext |= E1000_CTRL_EXT_IAME;
1912 E1000_WRITE_REG(hw, IAM, 0xffffffff);
1899#endif 1913#endif
1900 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 1914 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1901 E1000_WRITE_REG(hw, IAM, ~0);
1902 E1000_WRITE_FLUSH(hw); 1915 E1000_WRITE_FLUSH(hw);
1903 } 1916 }
1904 1917
@@ -1937,6 +1950,12 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1937 E1000_WRITE_REG(hw, RXCSUM, rxcsum); 1950 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1938 } 1951 }
1939 1952
1953 /* enable early receives on 82573, only takes effect if using > 2048
1954 * byte total frame size. for example only for jumbo frames */
1955#define E1000_ERT_2048 0x100
1956 if (hw->mac_type == e1000_82573)
1957 E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
1958
1940 /* Enable Receives */ 1959 /* Enable Receives */
1941 E1000_WRITE_REG(hw, RCTL, rctl); 1960 E1000_WRITE_REG(hw, RCTL, rctl);
1942} 1961}
@@ -1990,10 +2009,13 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1990 buffer_info->dma, 2009 buffer_info->dma,
1991 buffer_info->length, 2010 buffer_info->length,
1992 PCI_DMA_TODEVICE); 2011 PCI_DMA_TODEVICE);
2012 buffer_info->dma = 0;
1993 } 2013 }
1994 if (buffer_info->skb) 2014 if (buffer_info->skb) {
1995 dev_kfree_skb_any(buffer_info->skb); 2015 dev_kfree_skb_any(buffer_info->skb);
1996 memset(buffer_info, 0, sizeof(struct e1000_buffer)); 2016 buffer_info->skb = NULL;
2017 }
2018 /* buffer_info must be completely set up in the transmit path */
1997} 2019}
1998 2020
1999/** 2021/**
@@ -2417,6 +2439,7 @@ e1000_watchdog(unsigned long data)
2417 DPRINTK(LINK, INFO, 2439 DPRINTK(LINK, INFO,
2418 "Gigabit has been disabled, downgrading speed\n"); 2440 "Gigabit has been disabled, downgrading speed\n");
2419 } 2441 }
2442
2420 if (adapter->hw.mac_type == e1000_82573) { 2443 if (adapter->hw.mac_type == e1000_82573) {
2421 e1000_enable_tx_pkt_filtering(&adapter->hw); 2444 e1000_enable_tx_pkt_filtering(&adapter->hw);
2422 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 2445 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2461,13 +2484,12 @@ e1000_watchdog(unsigned long data)
2461 if ((adapter->hw.mac_type == e1000_82571 || 2484 if ((adapter->hw.mac_type == e1000_82571 ||
2462 adapter->hw.mac_type == e1000_82572) && 2485 adapter->hw.mac_type == e1000_82572) &&
2463 txb2b == 0) { 2486 txb2b == 0) {
2464#define SPEED_MODE_BIT (1 << 21)
2465 uint32_t tarc0; 2487 uint32_t tarc0;
2466 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 2488 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2467 tarc0 &= ~SPEED_MODE_BIT; 2489 tarc0 &= ~(1 << 21);
2468 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2490 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2469 } 2491 }
2470 2492
2471#ifdef NETIF_F_TSO 2493#ifdef NETIF_F_TSO
2472 /* disable TSO for pcie and 10/100 speeds, to avoid 2494 /* disable TSO for pcie and 10/100 speeds, to avoid
2473 * some hardware issues */ 2495 * some hardware issues */
@@ -2479,9 +2501,15 @@ e1000_watchdog(unsigned long data)
2479 DPRINTK(PROBE,INFO, 2501 DPRINTK(PROBE,INFO,
2480 "10/100 speed: disabling TSO\n"); 2502 "10/100 speed: disabling TSO\n");
2481 netdev->features &= ~NETIF_F_TSO; 2503 netdev->features &= ~NETIF_F_TSO;
2504#ifdef NETIF_F_TSO6
2505 netdev->features &= ~NETIF_F_TSO6;
2506#endif
2482 break; 2507 break;
2483 case SPEED_1000: 2508 case SPEED_1000:
2484 netdev->features |= NETIF_F_TSO; 2509 netdev->features |= NETIF_F_TSO;
2510#ifdef NETIF_F_TSO6
2511 netdev->features |= NETIF_F_TSO6;
2512#endif
2485 break; 2513 break;
2486 default: 2514 default:
2487 /* oops */ 2515 /* oops */
@@ -2548,19 +2576,6 @@ e1000_watchdog(unsigned long data)
2548 } 2576 }
2549 } 2577 }
2550 2578
2551 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2552 if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
2553 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2554 * asymmetrical Tx or Rx gets ITR=8000; everyone
2555 * else is between 2000-8000. */
2556 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
2557 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
2558 adapter->gotcl - adapter->gorcl :
2559 adapter->gorcl - adapter->gotcl) / 10000;
2560 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2561 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
2562 }
2563
2564 /* Cause software interrupt to ensure rx ring is cleaned */ 2579 /* Cause software interrupt to ensure rx ring is cleaned */
2565 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 2580 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2566 2581
@@ -2576,6 +2591,135 @@ e1000_watchdog(unsigned long data)
2576 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 2591 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
2577} 2592}
2578 2593
2594enum latency_range {
2595 lowest_latency = 0,
2596 low_latency = 1,
2597 bulk_latency = 2,
2598 latency_invalid = 255
2599};
2600
2601/**
2602 * e1000_update_itr - update the dynamic ITR value based on statistics
2603 * Stores a new ITR value based on packets and byte
2604 * counts during the last interrupt. The advantage of per interrupt
2605 * computation is faster updates and more accurate ITR for the current
2606 * traffic pattern. Constants in this function were computed
2607 * based on theoretical maximum wire speed and thresholds were set based
2608 * on testing data as well as attempting to minimize response time
2609 * while increasing bulk throughput.
2610 * this functionality is controlled by the InterruptThrottleRate module
2611 * parameter (see e1000_param.c)
2612 * @adapter: pointer to adapter
2613 * @itr_setting: current adapter->itr
2614 * @packets: the number of packets during this measurement interval
2615 * @bytes: the number of bytes during this measurement interval
2616 **/
2617static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2618 uint16_t itr_setting,
2619 int packets,
2620 int bytes)
2621{
2622 unsigned int retval = itr_setting;
2623 struct e1000_hw *hw = &adapter->hw;
2624
2625 if (unlikely(hw->mac_type < e1000_82540))
2626 goto update_itr_done;
2627
2628 if (packets == 0)
2629 goto update_itr_done;
2630
2631
2632 switch (itr_setting) {
2633 case lowest_latency:
2634 if ((packets < 5) && (bytes > 512))
2635 retval = low_latency;
2636 break;
2637 case low_latency: /* 50 usec aka 20000 ints/s */
2638 if (bytes > 10000) {
2639 if ((packets < 10) ||
2640 ((bytes/packets) > 1200))
2641 retval = bulk_latency;
2642 else if ((packets > 35))
2643 retval = lowest_latency;
2644 } else if (packets <= 2 && bytes < 512)
2645 retval = lowest_latency;
2646 break;
2647 case bulk_latency: /* 250 usec aka 4000 ints/s */
2648 if (bytes > 25000) {
2649 if (packets > 35)
2650 retval = low_latency;
2651 } else {
2652 if (bytes < 6000)
2653 retval = low_latency;
2654 }
2655 break;
2656 }
2657
2658update_itr_done:
2659 return retval;
2660}
2661
2662static void e1000_set_itr(struct e1000_adapter *adapter)
2663{
2664 struct e1000_hw *hw = &adapter->hw;
2665 uint16_t current_itr;
2666 uint32_t new_itr = adapter->itr;
2667
2668 if (unlikely(hw->mac_type < e1000_82540))
2669 return;
2670
2671 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2672 if (unlikely(adapter->link_speed != SPEED_1000)) {
2673 current_itr = 0;
2674 new_itr = 4000;
2675 goto set_itr_now;
2676 }
2677
2678 adapter->tx_itr = e1000_update_itr(adapter,
2679 adapter->tx_itr,
2680 adapter->total_tx_packets,
2681 adapter->total_tx_bytes);
2682 adapter->rx_itr = e1000_update_itr(adapter,
2683 adapter->rx_itr,
2684 adapter->total_rx_packets,
2685 adapter->total_rx_bytes);
2686
2687 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2688
2689 /* conservative mode eliminates the lowest_latency setting */
2690 if (current_itr == lowest_latency && (adapter->itr_setting == 3))
2691 current_itr = low_latency;
2692
2693 switch (current_itr) {
2694 /* counts and packets in update_itr are dependent on these numbers */
2695 case lowest_latency:
2696 new_itr = 70000;
2697 break;
2698 case low_latency:
2699 new_itr = 20000; /* aka hwitr = ~200 */
2700 break;
2701 case bulk_latency:
2702 new_itr = 4000;
2703 break;
2704 default:
2705 break;
2706 }
2707
2708set_itr_now:
2709 if (new_itr != adapter->itr) {
2710 /* this attempts to bias the interrupt rate towards Bulk
2711 * by adding intermediate steps when interrupt rate is
2712 * increasing */
2713 new_itr = new_itr > adapter->itr ?
2714 min(adapter->itr + (new_itr >> 2), new_itr) :
2715 new_itr;
2716 adapter->itr = new_itr;
2717 E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
2718 }
2719
2720 return;
2721}
2722
2579#define E1000_TX_FLAGS_CSUM 0x00000001 2723#define E1000_TX_FLAGS_CSUM 0x00000001
2580#define E1000_TX_FLAGS_VLAN 0x00000002 2724#define E1000_TX_FLAGS_VLAN 0x00000002
2581#define E1000_TX_FLAGS_TSO 0x00000004 2725#define E1000_TX_FLAGS_TSO 0x00000004
@@ -2616,7 +2760,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2616 0); 2760 0);
2617 cmd_length = E1000_TXD_CMD_IP; 2761 cmd_length = E1000_TXD_CMD_IP;
2618 ipcse = skb->h.raw - skb->data - 1; 2762 ipcse = skb->h.raw - skb->data - 1;
2619#ifdef NETIF_F_TSO_IPV6 2763#ifdef NETIF_F_TSO6
2620 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2764 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2621 skb->nh.ipv6h->payload_len = 0; 2765 skb->nh.ipv6h->payload_len = 0;
2622 skb->h.th->check = 2766 skb->h.th->check =
@@ -2652,6 +2796,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2652 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2796 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2653 2797
2654 buffer_info->time_stamp = jiffies; 2798 buffer_info->time_stamp = jiffies;
2799 buffer_info->next_to_watch = i;
2655 2800
2656 if (++i == tx_ring->count) i = 0; 2801 if (++i == tx_ring->count) i = 0;
2657 tx_ring->next_to_use = i; 2802 tx_ring->next_to_use = i;
@@ -2680,12 +2825,13 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2680 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2825 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2681 2826
2682 context_desc->upper_setup.tcp_fields.tucss = css; 2827 context_desc->upper_setup.tcp_fields.tucss = css;
2683 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; 2828 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
2684 context_desc->upper_setup.tcp_fields.tucse = 0; 2829 context_desc->upper_setup.tcp_fields.tucse = 0;
2685 context_desc->tcp_seg_setup.data = 0; 2830 context_desc->tcp_seg_setup.data = 0;
2686 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2831 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2687 2832
2688 buffer_info->time_stamp = jiffies; 2833 buffer_info->time_stamp = jiffies;
2834 buffer_info->next_to_watch = i;
2689 2835
2690 if (unlikely(++i == tx_ring->count)) i = 0; 2836 if (unlikely(++i == tx_ring->count)) i = 0;
2691 tx_ring->next_to_use = i; 2837 tx_ring->next_to_use = i;
@@ -2754,6 +2900,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2754 size, 2900 size,
2755 PCI_DMA_TODEVICE); 2901 PCI_DMA_TODEVICE);
2756 buffer_info->time_stamp = jiffies; 2902 buffer_info->time_stamp = jiffies;
2903 buffer_info->next_to_watch = i;
2757 2904
2758 len -= size; 2905 len -= size;
2759 offset += size; 2906 offset += size;
@@ -2793,6 +2940,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2793 size, 2940 size,
2794 PCI_DMA_TODEVICE); 2941 PCI_DMA_TODEVICE);
2795 buffer_info->time_stamp = jiffies; 2942 buffer_info->time_stamp = jiffies;
2943 buffer_info->next_to_watch = i;
2796 2944
2797 len -= size; 2945 len -= size;
2798 offset += size; 2946 offset += size;
@@ -2858,6 +3006,9 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2858 3006
2859 tx_ring->next_to_use = i; 3007 tx_ring->next_to_use = i;
2860 writel(i, adapter->hw.hw_addr + tx_ring->tdt); 3008 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
3009 /* we need this if more than one processor can write to our tail
3010 * at a time, it syncronizes IO on IA64/Altix systems */
3011 mmiowb();
2861} 3012}
2862 3013
2863/** 3014/**
@@ -2951,6 +3102,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
2951 3102
2952 /* A reprieve! */ 3103 /* A reprieve! */
2953 netif_start_queue(netdev); 3104 netif_start_queue(netdev);
3105 ++adapter->restart_queue;
2954 return 0; 3106 return 0;
2955} 3107}
2956 3108
@@ -3009,9 +3161,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3009 max_per_txd = min(mss << 2, max_per_txd); 3161 max_per_txd = min(mss << 2, max_per_txd);
3010 max_txd_pwr = fls(max_per_txd) - 1; 3162 max_txd_pwr = fls(max_per_txd) - 1;
3011 3163
3012 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 3164 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3013 * points to just header, pull a few bytes of payload from 3165 * points to just header, pull a few bytes of payload from
3014 * frags into skb->data */ 3166 * frags into skb->data */
3015 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 3167 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
3016 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { 3168 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3017 switch (adapter->hw.mac_type) { 3169 switch (adapter->hw.mac_type) {
@@ -3316,12 +3468,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
3316 adapter->stats.roc += E1000_READ_REG(hw, ROC); 3468 adapter->stats.roc += E1000_READ_REG(hw, ROC);
3317 3469
3318 if (adapter->hw.mac_type != e1000_ich8lan) { 3470 if (adapter->hw.mac_type != e1000_ich8lan) {
3319 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); 3471 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3320 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); 3472 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3321 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); 3473 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3322 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); 3474 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3323 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); 3475 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3324 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); 3476 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
3325 } 3477 }
3326 3478
3327 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); 3479 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
@@ -3352,12 +3504,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
3352 adapter->stats.tpr += E1000_READ_REG(hw, TPR); 3504 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
3353 3505
3354 if (adapter->hw.mac_type != e1000_ich8lan) { 3506 if (adapter->hw.mac_type != e1000_ich8lan) {
3355 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); 3507 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3356 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); 3508 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3357 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); 3509 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3358 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); 3510 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3359 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); 3511 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3360 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); 3512 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
3361 } 3513 }
3362 3514
3363 adapter->stats.mptc += E1000_READ_REG(hw, MPTC); 3515 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
@@ -3383,18 +3535,17 @@ e1000_update_stats(struct e1000_adapter *adapter)
3383 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 3535 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3384 3536
3385 if (adapter->hw.mac_type != e1000_ich8lan) { 3537 if (adapter->hw.mac_type != e1000_ich8lan) {
3386 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 3538 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3387 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); 3539 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3388 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); 3540 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
3389 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); 3541 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
3390 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); 3542 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3391 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); 3543 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3392 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); 3544 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
3393 } 3545 }
3394 } 3546 }
3395 3547
3396 /* Fill out the OS statistics structure */ 3548 /* Fill out the OS statistics structure */
3397
3398 adapter->net_stats.rx_packets = adapter->stats.gprc; 3549 adapter->net_stats.rx_packets = adapter->stats.gprc;
3399 adapter->net_stats.tx_packets = adapter->stats.gptc; 3550 adapter->net_stats.tx_packets = adapter->stats.gptc;
3400 adapter->net_stats.rx_bytes = adapter->stats.gorcl; 3551 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
@@ -3426,7 +3577,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
3426 /* Tx Dropped needs to be maintained elsewhere */ 3577 /* Tx Dropped needs to be maintained elsewhere */
3427 3578
3428 /* Phy Stats */ 3579 /* Phy Stats */
3429
3430 if (hw->media_type == e1000_media_type_copper) { 3580 if (hw->media_type == e1000_media_type_copper) {
3431 if ((adapter->link_speed == SPEED_1000) && 3581 if ((adapter->link_speed == SPEED_1000) &&
3432 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3582 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
@@ -3442,6 +3592,95 @@ e1000_update_stats(struct e1000_adapter *adapter)
3442 3592
3443 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3593 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3444} 3594}
3595#ifdef CONFIG_PCI_MSI
3596
3597/**
3598 * e1000_intr_msi - Interrupt Handler
3599 * @irq: interrupt number
3600 * @data: pointer to a network interface device structure
3601 **/
3602
3603static
3604irqreturn_t e1000_intr_msi(int irq, void *data)
3605{
3606 struct net_device *netdev = data;
3607 struct e1000_adapter *adapter = netdev_priv(netdev);
3608 struct e1000_hw *hw = &adapter->hw;
3609#ifndef CONFIG_E1000_NAPI
3610 int i;
3611#endif
3612
3613 /* this code avoids the read of ICR but has to get 1000 interrupts
3614 * at every link change event before it will notice the change */
3615 if (++adapter->detect_link >= 1000) {
3616 uint32_t icr = E1000_READ_REG(hw, ICR);
3617#ifdef CONFIG_E1000_NAPI
3618 /* read ICR disables interrupts using IAM, so keep up with our
3619 * enable/disable accounting */
3620 atomic_inc(&adapter->irq_sem);
3621#endif
3622 adapter->detect_link = 0;
3623 if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
3624 (icr & E1000_ICR_INT_ASSERTED)) {
3625 hw->get_link_status = 1;
3626 /* 80003ES2LAN workaround--
3627 * For packet buffer work-around on link down event;
3628 * disable receives here in the ISR and
3629 * reset adapter in watchdog
3630 */
3631 if (netif_carrier_ok(netdev) &&
3632 (adapter->hw.mac_type == e1000_80003es2lan)) {
3633 /* disable receives */
3634 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3635 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3636 }
3637 /* guard against interrupt when we're going down */
3638 if (!test_bit(__E1000_DOWN, &adapter->flags))
3639 mod_timer(&adapter->watchdog_timer,
3640 jiffies + 1);
3641 }
3642 } else {
3643 E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
3644 E1000_ICR_LSC)));
3645 /* bummer we have to flush here, but things break otherwise as
3646 * some event appears to be lost or delayed and throughput
3647 * drops. In almost all tests this flush is un-necessary */
3648 E1000_WRITE_FLUSH(hw);
3649#ifdef CONFIG_E1000_NAPI
3650 /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
3651 * masked. No need for the IMC write, but it does mean we
3652 * should account for it ASAP. */
3653 atomic_inc(&adapter->irq_sem);
3654#endif
3655 }
3656
3657#ifdef CONFIG_E1000_NAPI
3658 if (likely(netif_rx_schedule_prep(netdev))) {
3659 adapter->total_tx_bytes = 0;
3660 adapter->total_tx_packets = 0;
3661 adapter->total_rx_bytes = 0;
3662 adapter->total_rx_packets = 0;
3663 __netif_rx_schedule(netdev);
3664 } else
3665 e1000_irq_enable(adapter);
3666#else
3667 adapter->total_tx_bytes = 0;
3668 adapter->total_rx_bytes = 0;
3669 adapter->total_tx_packets = 0;
3670 adapter->total_rx_packets = 0;
3671
3672 for (i = 0; i < E1000_MAX_INTR; i++)
3673 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3674 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3675 break;
3676
3677 if (likely(adapter->itr_setting & 3))
3678 e1000_set_itr(adapter);
3679#endif
3680
3681 return IRQ_HANDLED;
3682}
3683#endif
3445 3684
3446/** 3685/**
3447 * e1000_intr - Interrupt Handler 3686 * e1000_intr - Interrupt Handler
@@ -3458,7 +3697,17 @@ e1000_intr(int irq, void *data)
3458 uint32_t rctl, icr = E1000_READ_REG(hw, ICR); 3697 uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
3459#ifndef CONFIG_E1000_NAPI 3698#ifndef CONFIG_E1000_NAPI
3460 int i; 3699 int i;
3461#else 3700#endif
3701 if (unlikely(!icr))
3702 return IRQ_NONE; /* Not our interrupt */
3703
3704#ifdef CONFIG_E1000_NAPI
3705 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3706 * not set, then the adapter didn't send an interrupt */
3707 if (unlikely(hw->mac_type >= e1000_82571 &&
3708 !(icr & E1000_ICR_INT_ASSERTED)))
3709 return IRQ_NONE;
3710
3462 /* Interrupt Auto-Mask...upon reading ICR, 3711 /* Interrupt Auto-Mask...upon reading ICR,
3463 * interrupts are masked. No need for the 3712 * interrupts are masked. No need for the
3464 * IMC write, but it does mean we should 3713 * IMC write, but it does mean we should
@@ -3467,14 +3716,6 @@ e1000_intr(int irq, void *data)
3467 atomic_inc(&adapter->irq_sem); 3716 atomic_inc(&adapter->irq_sem);
3468#endif 3717#endif
3469 3718
3470 if (unlikely(!icr)) {
3471#ifdef CONFIG_E1000_NAPI
3472 if (hw->mac_type >= e1000_82571)
3473 e1000_irq_enable(adapter);
3474#endif
3475 return IRQ_NONE; /* Not our interrupt */
3476 }
3477
3478 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3719 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3479 hw->get_link_status = 1; 3720 hw->get_link_status = 1;
3480 /* 80003ES2LAN workaround-- 3721 /* 80003ES2LAN workaround--
@@ -3495,13 +3736,20 @@ e1000_intr(int irq, void *data)
3495 3736
3496#ifdef CONFIG_E1000_NAPI 3737#ifdef CONFIG_E1000_NAPI
3497 if (unlikely(hw->mac_type < e1000_82571)) { 3738 if (unlikely(hw->mac_type < e1000_82571)) {
3739 /* disable interrupts, without the synchronize_irq bit */
3498 atomic_inc(&adapter->irq_sem); 3740 atomic_inc(&adapter->irq_sem);
3499 E1000_WRITE_REG(hw, IMC, ~0); 3741 E1000_WRITE_REG(hw, IMC, ~0);
3500 E1000_WRITE_FLUSH(hw); 3742 E1000_WRITE_FLUSH(hw);
3501 } 3743 }
3502 if (likely(netif_rx_schedule_prep(netdev))) 3744 if (likely(netif_rx_schedule_prep(netdev))) {
3745 adapter->total_tx_bytes = 0;
3746 adapter->total_tx_packets = 0;
3747 adapter->total_rx_bytes = 0;
3748 adapter->total_rx_packets = 0;
3503 __netif_rx_schedule(netdev); 3749 __netif_rx_schedule(netdev);
3504 else 3750 } else
3751 /* this really should not happen! if it does it is basically a
3752 * bug, but not a hard error, so enable ints and continue */
3505 e1000_irq_enable(adapter); 3753 e1000_irq_enable(adapter);
3506#else 3754#else
3507 /* Writing IMC and IMS is needed for 82547. 3755 /* Writing IMC and IMS is needed for 82547.
@@ -3519,16 +3767,23 @@ e1000_intr(int irq, void *data)
3519 E1000_WRITE_REG(hw, IMC, ~0); 3767 E1000_WRITE_REG(hw, IMC, ~0);
3520 } 3768 }
3521 3769
3770 adapter->total_tx_bytes = 0;
3771 adapter->total_rx_bytes = 0;
3772 adapter->total_tx_packets = 0;
3773 adapter->total_rx_packets = 0;
3774
3522 for (i = 0; i < E1000_MAX_INTR; i++) 3775 for (i = 0; i < E1000_MAX_INTR; i++)
3523 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3776 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3524 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3777 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3525 break; 3778 break;
3526 3779
3780 if (likely(adapter->itr_setting & 3))
3781 e1000_set_itr(adapter);
3782
3527 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3783 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3528 e1000_irq_enable(adapter); 3784 e1000_irq_enable(adapter);
3529 3785
3530#endif 3786#endif
3531
3532 return IRQ_HANDLED; 3787 return IRQ_HANDLED;
3533} 3788}
3534 3789
@@ -3572,6 +3827,8 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3572 if ((!tx_cleaned && (work_done == 0)) || 3827 if ((!tx_cleaned && (work_done == 0)) ||
3573 !netif_running(poll_dev)) { 3828 !netif_running(poll_dev)) {
3574quit_polling: 3829quit_polling:
3830 if (likely(adapter->itr_setting & 3))
3831 e1000_set_itr(adapter);
3575 netif_rx_complete(poll_dev); 3832 netif_rx_complete(poll_dev);
3576 e1000_irq_enable(adapter); 3833 e1000_irq_enable(adapter);
3577 return 0; 3834 return 0;
@@ -3598,6 +3855,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3598 unsigned int count = 0; 3855 unsigned int count = 0;
3599#endif 3856#endif
3600 boolean_t cleaned = FALSE; 3857 boolean_t cleaned = FALSE;
3858 unsigned int total_tx_bytes=0, total_tx_packets=0;
3601 3859
3602 i = tx_ring->next_to_clean; 3860 i = tx_ring->next_to_clean;
3603 eop = tx_ring->buffer_info[i].next_to_watch; 3861 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3609,13 +3867,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3609 buffer_info = &tx_ring->buffer_info[i]; 3867 buffer_info = &tx_ring->buffer_info[i];
3610 cleaned = (i == eop); 3868 cleaned = (i == eop);
3611 3869
3870 if (cleaned) {
3871 /* this packet count is wrong for TSO but has a
3872 * tendency to make dynamic ITR change more
3873 * towards bulk */
3874 total_tx_packets++;
3875 total_tx_bytes += buffer_info->skb->len;
3876 }
3612 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3877 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3613 memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); 3878 tx_desc->upper.data = 0;
3614 3879
3615 if (unlikely(++i == tx_ring->count)) i = 0; 3880 if (unlikely(++i == tx_ring->count)) i = 0;
3616 } 3881 }
3617 3882
3618
3619 eop = tx_ring->buffer_info[i].next_to_watch; 3883 eop = tx_ring->buffer_info[i].next_to_watch;
3620 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3884 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3621#ifdef CONFIG_E1000_NAPI 3885#ifdef CONFIG_E1000_NAPI
@@ -3634,8 +3898,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3634 * sees the new next_to_clean. 3898 * sees the new next_to_clean.
3635 */ 3899 */
3636 smp_mb(); 3900 smp_mb();
3637 if (netif_queue_stopped(netdev)) 3901 if (netif_queue_stopped(netdev)) {
3638 netif_wake_queue(netdev); 3902 netif_wake_queue(netdev);
3903 ++adapter->restart_queue;
3904 }
3639 } 3905 }
3640 3906
3641 if (adapter->detect_tx_hung) { 3907 if (adapter->detect_tx_hung) {
@@ -3673,6 +3939,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
3673 netif_stop_queue(netdev); 3939 netif_stop_queue(netdev);
3674 } 3940 }
3675 } 3941 }
3942 adapter->total_tx_bytes += total_tx_bytes;
3943 adapter->total_tx_packets += total_tx_packets;
3676 return cleaned; 3944 return cleaned;
3677} 3945}
3678 3946
@@ -3752,6 +4020,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3752 unsigned int i; 4020 unsigned int i;
3753 int cleaned_count = 0; 4021 int cleaned_count = 0;
3754 boolean_t cleaned = FALSE; 4022 boolean_t cleaned = FALSE;
4023 unsigned int total_rx_bytes=0, total_rx_packets=0;
3755 4024
3756 i = rx_ring->next_to_clean; 4025 i = rx_ring->next_to_clean;
3757 rx_desc = E1000_RX_DESC(*rx_ring, i); 4026 rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3760,6 +4029,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3760 while (rx_desc->status & E1000_RXD_STAT_DD) { 4029 while (rx_desc->status & E1000_RXD_STAT_DD) {
3761 struct sk_buff *skb; 4030 struct sk_buff *skb;
3762 u8 status; 4031 u8 status;
4032
3763#ifdef CONFIG_E1000_NAPI 4033#ifdef CONFIG_E1000_NAPI
3764 if (*work_done >= work_to_do) 4034 if (*work_done >= work_to_do)
3765 break; 4035 break;
@@ -3817,6 +4087,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3817 * done after the TBI_ACCEPT workaround above */ 4087 * done after the TBI_ACCEPT workaround above */
3818 length -= 4; 4088 length -= 4;
3819 4089
4090 /* probably a little skewed due to removing CRC */
4091 total_rx_bytes += length;
4092 total_rx_packets++;
4093
3820 /* code added for copybreak, this should improve 4094 /* code added for copybreak, this should improve
3821 * performance for small packets with large amounts 4095 * performance for small packets with large amounts
3822 * of reassembly being done in the stack */ 4096 * of reassembly being done in the stack */
@@ -3832,12 +4106,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3832 /* save the skb in buffer_info as good */ 4106 /* save the skb in buffer_info as good */
3833 buffer_info->skb = skb; 4107 buffer_info->skb = skb;
3834 skb = new_skb; 4108 skb = new_skb;
3835 skb_put(skb, length);
3836 } 4109 }
3837 } else 4110 /* else just continue with the old one */
3838 skb_put(skb, length); 4111 }
3839
3840 /* end copybreak code */ 4112 /* end copybreak code */
4113 skb_put(skb, length);
3841 4114
3842 /* Receive Checksum Offload */ 4115 /* Receive Checksum Offload */
3843 e1000_rx_checksum(adapter, 4116 e1000_rx_checksum(adapter,
@@ -3886,6 +4159,8 @@ next_desc:
3886 if (cleaned_count) 4159 if (cleaned_count)
3887 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4160 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3888 4161
4162 adapter->total_rx_packets += total_rx_packets;
4163 adapter->total_rx_bytes += total_rx_bytes;
3889 return cleaned; 4164 return cleaned;
3890} 4165}
3891 4166
@@ -3915,6 +4190,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3915 uint32_t length, staterr; 4190 uint32_t length, staterr;
3916 int cleaned_count = 0; 4191 int cleaned_count = 0;
3917 boolean_t cleaned = FALSE; 4192 boolean_t cleaned = FALSE;
4193 unsigned int total_rx_bytes=0, total_rx_packets=0;
3918 4194
3919 i = rx_ring->next_to_clean; 4195 i = rx_ring->next_to_clean;
3920 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 4196 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
@@ -3999,7 +4275,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3999 goto copydone; 4275 goto copydone;
4000 } /* if */ 4276 } /* if */
4001 } 4277 }
4002 4278
4003 for (j = 0; j < adapter->rx_ps_pages; j++) { 4279 for (j = 0; j < adapter->rx_ps_pages; j++) {
4004 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) 4280 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
4005 break; 4281 break;
@@ -4019,6 +4295,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4019 pskb_trim(skb, skb->len - 4); 4295 pskb_trim(skb, skb->len - 4);
4020 4296
4021copydone: 4297copydone:
4298 total_rx_bytes += skb->len;
4299 total_rx_packets++;
4300
4022 e1000_rx_checksum(adapter, staterr, 4301 e1000_rx_checksum(adapter, staterr,
4023 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); 4302 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4024 skb->protocol = eth_type_trans(skb, netdev); 4303 skb->protocol = eth_type_trans(skb, netdev);
@@ -4067,6 +4346,8 @@ next_desc:
4067 if (cleaned_count) 4346 if (cleaned_count)
4068 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4347 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4069 4348
4349 adapter->total_rx_packets += total_rx_packets;
4350 adapter->total_rx_bytes += total_rx_bytes;
4070 return cleaned; 4351 return cleaned;
4071} 4352}
4072 4353
@@ -4234,7 +4515,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4234 } 4515 }
4235 4516
4236 skb = netdev_alloc_skb(netdev, 4517 skb = netdev_alloc_skb(netdev,
4237 adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4518 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4238 4519
4239 if (unlikely(!skb)) { 4520 if (unlikely(!skb)) {
4240 adapter->alloc_rx_buff_failed++; 4521 adapter->alloc_rx_buff_failed++;
@@ -4511,7 +4792,6 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4511 return E1000_SUCCESS; 4792 return E1000_SUCCESS;
4512} 4793}
4513 4794
4514
4515void 4795void
4516e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4796e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
4517{ 4797{
@@ -4534,12 +4814,12 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4534 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4814 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4535 4815
4536 if (adapter->hw.mac_type != e1000_ich8lan) { 4816 if (adapter->hw.mac_type != e1000_ich8lan) {
4537 /* enable VLAN receive filtering */ 4817 /* enable VLAN receive filtering */
4538 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4818 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4539 rctl |= E1000_RCTL_VFE; 4819 rctl |= E1000_RCTL_VFE;
4540 rctl &= ~E1000_RCTL_CFIEN; 4820 rctl &= ~E1000_RCTL_CFIEN;
4541 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4821 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4542 e1000_update_mng_vlan(adapter); 4822 e1000_update_mng_vlan(adapter);
4543 } 4823 }
4544 } else { 4824 } else {
4545 /* disable VLAN tag insert/strip */ 4825 /* disable VLAN tag insert/strip */
@@ -4548,14 +4828,16 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4548 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); 4828 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4549 4829
4550 if (adapter->hw.mac_type != e1000_ich8lan) { 4830 if (adapter->hw.mac_type != e1000_ich8lan) {
4551 /* disable VLAN filtering */ 4831 /* disable VLAN filtering */
4552 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4832 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4553 rctl &= ~E1000_RCTL_VFE; 4833 rctl &= ~E1000_RCTL_VFE;
4554 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4834 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4555 if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 4835 if (adapter->mng_vlan_id !=
4556 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4836 (uint16_t)E1000_MNG_VLAN_NONE) {
4557 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4837 e1000_vlan_rx_kill_vid(netdev,
4558 } 4838 adapter->mng_vlan_id);
4839 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4840 }
4559 } 4841 }
4560 } 4842 }
4561 4843