diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2006-11-01 11:48:13 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-02 00:12:00 -0500 |
commit | 835bb1298311f372a3387fb40b952b18d90aa9f8 (patch) | |
tree | 1a58003158f03397212979727c2f5cfa37ebc4f2 /drivers/net/e1000/e1000_main.c | |
parent | 9ac98284428961bd5be285a6cc1f5e6f5b6644aa (diff) |
e1000: add dynamic itr modes
Add a new dynamic itr algorithm, with 2 modes, and make it the default
operation mode. This greatly reduces latency and increases small packet
performance, at the "cost" of some CPU utilization. Bulk traffic
throughput is unaffected.
The driver can limit the amount of interrupts per second that the
adapter will generate for incoming packets. It does this by writing a
value to the adapter that is based on the maximum amount of interrupts
that the adapter will generate per second.
Setting InterruptThrottleRate to a value greater or equal to 100 will
program the adapter to send out a maximum of that many interrupts per
second, even if more packets have come in. This reduces interrupt
load on the system and can lower CPU utilization under heavy load,
but will increase latency as packets are not processed as quickly.
The default behaviour of the driver previously assumed a static
InterruptThrottleRate value of 8000, providing a good fallback value
for all traffic types,but lacking in small packet performance and
latency. The hardware can handle many more small packets per second
however, and for this reason an adaptive interrupt moderation algorithm
was implemented.
Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in
which it dynamically adjusts the InterruptThrottleRate value based on
the traffic that it receives. After determining the type of incoming
traffic in the last timeframe, it will adjust the InterruptThrottleRate
to an appropriate value for that traffic.
The algorithm classifies the incoming traffic every interval into
classes. Once the class is determined, the InterruptThrottleRate
value is adjusted to suit that traffic type the best. There are
three classes defined: "Bulk traffic", for large amounts of packets
of normal size; "Low latency", for small amounts of traffic and/or
a significant percentage of small packets; and "Lowest latency",
for almost completely small packets or minimal traffic.
In dynamic conservative mode, the InterruptThrottleRate value is
set to 4000 for traffic that falls in class "Bulk traffic". If
traffic falls in the "Low latency" or "Lowest latency" class, the
InterruptThrottleRate is increased stepwise to 20000. This default
mode is suitable for most applications.
For situations where low latency is vital such as cluster or
grid computing, the algorithm can reduce latency even more when
InterruptThrottleRate is set to mode 1. In this mode, which operates
the same as mode 3, the InterruptThrottleRate will be increased
stepwise to 70000 for traffic in class "Lowest latency".
Setting InterruptThrottleRate to 0 turns off any interrupt moderation
and may improve small packet latency, but is generally not suitable
for bulk throughput traffic.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Cc: Rick Jones <rick.jones2@hp.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 226 |
1 files changed, 197 insertions, 29 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 35e4e32c7702..56be5c89363c 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -1897,7 +1897,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1897 | 1897 | ||
1898 | if (hw->mac_type >= e1000_82540) { | 1898 | if (hw->mac_type >= e1000_82540) { |
1899 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); | 1899 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); |
1900 | if (adapter->itr > 1) | 1900 | if (adapter->itr_setting != 0) |
1901 | E1000_WRITE_REG(hw, ITR, | 1901 | E1000_WRITE_REG(hw, ITR, |
1902 | 1000000000 / (adapter->itr * 256)); | 1902 | 1000000000 / (adapter->itr * 256)); |
1903 | } | 1903 | } |
@@ -1907,11 +1907,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1907 | /* Reset delay timers after every interrupt */ | 1907 | /* Reset delay timers after every interrupt */ |
1908 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | 1908 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; |
1909 | #ifdef CONFIG_E1000_NAPI | 1909 | #ifdef CONFIG_E1000_NAPI |
1910 | /* Auto-Mask interrupts upon ICR read. */ | 1910 | /* Auto-Mask interrupts upon ICR access */ |
1911 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 1911 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
1912 | E1000_WRITE_REG(hw, IAM, 0xffffffff); | ||
1912 | #endif | 1913 | #endif |
1913 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1914 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); |
1914 | E1000_WRITE_REG(hw, IAM, ~0); | ||
1915 | E1000_WRITE_FLUSH(hw); | 1915 | E1000_WRITE_FLUSH(hw); |
1916 | } | 1916 | } |
1917 | 1917 | ||
@@ -2576,19 +2576,6 @@ e1000_watchdog(unsigned long data) | |||
2576 | } | 2576 | } |
2577 | } | 2577 | } |
2578 | 2578 | ||
2579 | /* Dynamic mode for Interrupt Throttle Rate (ITR) */ | ||
2580 | if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { | ||
2581 | /* Symmetric Tx/Rx gets a reduced ITR=2000; Total | ||
2582 | * asymmetrical Tx or Rx gets ITR=8000; everyone | ||
2583 | * else is between 2000-8000. */ | ||
2584 | uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; | ||
2585 | uint32_t dif = (adapter->gotcl > adapter->gorcl ? | ||
2586 | adapter->gotcl - adapter->gorcl : | ||
2587 | adapter->gorcl - adapter->gotcl) / 10000; | ||
2588 | uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; | ||
2589 | E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256)); | ||
2590 | } | ||
2591 | |||
2592 | /* Cause software interrupt to ensure rx ring is cleaned */ | 2579 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2593 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); | 2580 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); |
2594 | 2581 | ||
@@ -2604,6 +2591,135 @@ e1000_watchdog(unsigned long data) | |||
2604 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | 2591 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); |
2605 | } | 2592 | } |
2606 | 2593 | ||
2594 | enum latency_range { | ||
2595 | lowest_latency = 0, | ||
2596 | low_latency = 1, | ||
2597 | bulk_latency = 2, | ||
2598 | latency_invalid = 255 | ||
2599 | }; | ||
2600 | |||
2601 | /** | ||
2602 | * e1000_update_itr - update the dynamic ITR value based on statistics | ||
2603 | * Stores a new ITR value based on packets and byte | ||
2604 | * counts during the last interrupt. The advantage of per interrupt | ||
2605 | * computation is faster updates and more accurate ITR for the current | ||
2606 | * traffic pattern. Constants in this function were computed | ||
2607 | * based on theoretical maximum wire speed and thresholds were set based | ||
2608 | * on testing data as well as attempting to minimize response time | ||
2609 | * while increasing bulk throughput. | ||
2610 | * this functionality is controlled by the InterruptThrottleRate module | ||
2611 | * parameter (see e1000_param.c) | ||
2612 | * @adapter: pointer to adapter | ||
2613 | * @itr_setting: current adapter->itr | ||
2614 | * @packets: the number of packets during this measurement interval | ||
2615 | * @bytes: the number of bytes during this measurement interval | ||
2616 | **/ | ||
2617 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | ||
2618 | uint16_t itr_setting, | ||
2619 | int packets, | ||
2620 | int bytes) | ||
2621 | { | ||
2622 | unsigned int retval = itr_setting; | ||
2623 | struct e1000_hw *hw = &adapter->hw; | ||
2624 | |||
2625 | if (unlikely(hw->mac_type < e1000_82540)) | ||
2626 | goto update_itr_done; | ||
2627 | |||
2628 | if (packets == 0) | ||
2629 | goto update_itr_done; | ||
2630 | |||
2631 | |||
2632 | switch (itr_setting) { | ||
2633 | case lowest_latency: | ||
2634 | if ((packets < 5) && (bytes > 512)) | ||
2635 | retval = low_latency; | ||
2636 | break; | ||
2637 | case low_latency: /* 50 usec aka 20000 ints/s */ | ||
2638 | if (bytes > 10000) { | ||
2639 | if ((packets < 10) || | ||
2640 | ((bytes/packets) > 1200)) | ||
2641 | retval = bulk_latency; | ||
2642 | else if ((packets > 35)) | ||
2643 | retval = lowest_latency; | ||
2644 | } else if (packets <= 2 && bytes < 512) | ||
2645 | retval = lowest_latency; | ||
2646 | break; | ||
2647 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | ||
2648 | if (bytes > 25000) { | ||
2649 | if (packets > 35) | ||
2650 | retval = low_latency; | ||
2651 | } else { | ||
2652 | if (bytes < 6000) | ||
2653 | retval = low_latency; | ||
2654 | } | ||
2655 | break; | ||
2656 | } | ||
2657 | |||
2658 | update_itr_done: | ||
2659 | return retval; | ||
2660 | } | ||
2661 | |||
2662 | static void e1000_set_itr(struct e1000_adapter *adapter) | ||
2663 | { | ||
2664 | struct e1000_hw *hw = &adapter->hw; | ||
2665 | uint16_t current_itr; | ||
2666 | uint32_t new_itr = adapter->itr; | ||
2667 | |||
2668 | if (unlikely(hw->mac_type < e1000_82540)) | ||
2669 | return; | ||
2670 | |||
2671 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | ||
2672 | if (unlikely(adapter->link_speed != SPEED_1000)) { | ||
2673 | current_itr = 0; | ||
2674 | new_itr = 4000; | ||
2675 | goto set_itr_now; | ||
2676 | } | ||
2677 | |||
2678 | adapter->tx_itr = e1000_update_itr(adapter, | ||
2679 | adapter->tx_itr, | ||
2680 | adapter->total_tx_packets, | ||
2681 | adapter->total_tx_bytes); | ||
2682 | adapter->rx_itr = e1000_update_itr(adapter, | ||
2683 | adapter->rx_itr, | ||
2684 | adapter->total_rx_packets, | ||
2685 | adapter->total_rx_bytes); | ||
2686 | |||
2687 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | ||
2688 | |||
2689 | /* conservative mode eliminates the lowest_latency setting */ | ||
2690 | if (current_itr == lowest_latency && (adapter->itr_setting == 3)) | ||
2691 | current_itr = low_latency; | ||
2692 | |||
2693 | switch (current_itr) { | ||
2694 | /* counts and packets in update_itr are dependent on these numbers */ | ||
2695 | case lowest_latency: | ||
2696 | new_itr = 70000; | ||
2697 | break; | ||
2698 | case low_latency: | ||
2699 | new_itr = 20000; /* aka hwitr = ~200 */ | ||
2700 | break; | ||
2701 | case bulk_latency: | ||
2702 | new_itr = 4000; | ||
2703 | break; | ||
2704 | default: | ||
2705 | break; | ||
2706 | } | ||
2707 | |||
2708 | set_itr_now: | ||
2709 | if (new_itr != adapter->itr) { | ||
2710 | /* this attempts to bias the interrupt rate towards Bulk | ||
2711 | * by adding intermediate steps when interrupt rate is | ||
2712 | * increasing */ | ||
2713 | new_itr = new_itr > adapter->itr ? | ||
2714 | min(adapter->itr + (new_itr >> 2), new_itr) : | ||
2715 | new_itr; | ||
2716 | adapter->itr = new_itr; | ||
2717 | E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256)); | ||
2718 | } | ||
2719 | |||
2720 | return; | ||
2721 | } | ||
2722 | |||
2607 | #define E1000_TX_FLAGS_CSUM 0x00000001 | 2723 | #define E1000_TX_FLAGS_CSUM 0x00000001 |
2608 | #define E1000_TX_FLAGS_VLAN 0x00000002 | 2724 | #define E1000_TX_FLAGS_VLAN 0x00000002 |
2609 | #define E1000_TX_FLAGS_TSO 0x00000004 | 2725 | #define E1000_TX_FLAGS_TSO 0x00000004 |
@@ -3538,15 +3654,27 @@ irqreturn_t e1000_intr_msi(int irq, void *data) | |||
3538 | } | 3654 | } |
3539 | 3655 | ||
3540 | #ifdef CONFIG_E1000_NAPI | 3656 | #ifdef CONFIG_E1000_NAPI |
3541 | if (likely(netif_rx_schedule_prep(netdev))) | 3657 | if (likely(netif_rx_schedule_prep(netdev))) { |
3658 | adapter->total_tx_bytes = 0; | ||
3659 | adapter->total_tx_packets = 0; | ||
3660 | adapter->total_rx_bytes = 0; | ||
3661 | adapter->total_rx_packets = 0; | ||
3542 | __netif_rx_schedule(netdev); | 3662 | __netif_rx_schedule(netdev); |
3543 | else | 3663 | } else |
3544 | e1000_irq_enable(adapter); | 3664 | e1000_irq_enable(adapter); |
3545 | #else | 3665 | #else |
3666 | adapter->total_tx_bytes = 0; | ||
3667 | adapter->total_rx_bytes = 0; | ||
3668 | adapter->total_tx_packets = 0; | ||
3669 | adapter->total_rx_packets = 0; | ||
3670 | |||
3546 | for (i = 0; i < E1000_MAX_INTR; i++) | 3671 | for (i = 0; i < E1000_MAX_INTR; i++) |
3547 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3672 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
3548 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3673 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
3549 | break; | 3674 | break; |
3675 | |||
3676 | if (likely(adapter->itr_setting & 3)) | ||
3677 | e1000_set_itr(adapter); | ||
3550 | #endif | 3678 | #endif |
3551 | 3679 | ||
3552 | return IRQ_HANDLED; | 3680 | return IRQ_HANDLED; |
@@ -3568,7 +3696,17 @@ e1000_intr(int irq, void *data) | |||
3568 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); | 3696 | uint32_t rctl, icr = E1000_READ_REG(hw, ICR); |
3569 | #ifndef CONFIG_E1000_NAPI | 3697 | #ifndef CONFIG_E1000_NAPI |
3570 | int i; | 3698 | int i; |
3571 | #else | 3699 | #endif |
3700 | if (unlikely(!icr)) | ||
3701 | return IRQ_NONE; /* Not our interrupt */ | ||
3702 | |||
3703 | #ifdef CONFIG_E1000_NAPI | ||
3704 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | ||
3705 | * not set, then the adapter didn't send an interrupt */ | ||
3706 | if (unlikely(hw->mac_type >= e1000_82571 && | ||
3707 | !(icr & E1000_ICR_INT_ASSERTED))) | ||
3708 | return IRQ_NONE; | ||
3709 | |||
3572 | /* Interrupt Auto-Mask...upon reading ICR, | 3710 | /* Interrupt Auto-Mask...upon reading ICR, |
3573 | * interrupts are masked. No need for the | 3711 | * interrupts are masked. No need for the |
3574 | * IMC write, but it does mean we should | 3712 | * IMC write, but it does mean we should |
@@ -3577,14 +3715,6 @@ e1000_intr(int irq, void *data) | |||
3577 | atomic_inc(&adapter->irq_sem); | 3715 | atomic_inc(&adapter->irq_sem); |
3578 | #endif | 3716 | #endif |
3579 | 3717 | ||
3580 | if (unlikely(!icr)) { | ||
3581 | #ifdef CONFIG_E1000_NAPI | ||
3582 | if (hw->mac_type >= e1000_82571) | ||
3583 | e1000_irq_enable(adapter); | ||
3584 | #endif | ||
3585 | return IRQ_NONE; /* Not our interrupt */ | ||
3586 | } | ||
3587 | |||
3588 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3718 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3589 | hw->get_link_status = 1; | 3719 | hw->get_link_status = 1; |
3590 | /* 80003ES2LAN workaround-- | 3720 | /* 80003ES2LAN workaround-- |
@@ -3605,13 +3735,18 @@ e1000_intr(int irq, void *data) | |||
3605 | 3735 | ||
3606 | #ifdef CONFIG_E1000_NAPI | 3736 | #ifdef CONFIG_E1000_NAPI |
3607 | if (unlikely(hw->mac_type < e1000_82571)) { | 3737 | if (unlikely(hw->mac_type < e1000_82571)) { |
3738 | /* disable interrupts, without the synchronize_irq bit */ | ||
3608 | atomic_inc(&adapter->irq_sem); | 3739 | atomic_inc(&adapter->irq_sem); |
3609 | E1000_WRITE_REG(hw, IMC, ~0); | 3740 | E1000_WRITE_REG(hw, IMC, ~0); |
3610 | E1000_WRITE_FLUSH(hw); | 3741 | E1000_WRITE_FLUSH(hw); |
3611 | } | 3742 | } |
3612 | if (likely(netif_rx_schedule_prep(netdev))) | 3743 | if (likely(netif_rx_schedule_prep(netdev))) { |
3744 | adapter->total_tx_bytes = 0; | ||
3745 | adapter->total_tx_packets = 0; | ||
3746 | adapter->total_rx_bytes = 0; | ||
3747 | adapter->total_rx_packets = 0; | ||
3613 | __netif_rx_schedule(netdev); | 3748 | __netif_rx_schedule(netdev); |
3614 | else | 3749 | } else |
3615 | /* this really should not happen! if it does it is basically a | 3750 | /* this really should not happen! if it does it is basically a |
3616 | * bug, but not a hard error, so enable ints and continue */ | 3751 | * bug, but not a hard error, so enable ints and continue */ |
3617 | e1000_irq_enable(adapter); | 3752 | e1000_irq_enable(adapter); |
@@ -3631,11 +3766,19 @@ e1000_intr(int irq, void *data) | |||
3631 | E1000_WRITE_REG(hw, IMC, ~0); | 3766 | E1000_WRITE_REG(hw, IMC, ~0); |
3632 | } | 3767 | } |
3633 | 3768 | ||
3769 | adapter->total_tx_bytes = 0; | ||
3770 | adapter->total_rx_bytes = 0; | ||
3771 | adapter->total_tx_packets = 0; | ||
3772 | adapter->total_rx_packets = 0; | ||
3773 | |||
3634 | for (i = 0; i < E1000_MAX_INTR; i++) | 3774 | for (i = 0; i < E1000_MAX_INTR; i++) |
3635 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3775 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
3636 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3776 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
3637 | break; | 3777 | break; |
3638 | 3778 | ||
3779 | if (likely(adapter->itr_setting & 3)) | ||
3780 | e1000_set_itr(adapter); | ||
3781 | |||
3639 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3782 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3640 | e1000_irq_enable(adapter); | 3783 | e1000_irq_enable(adapter); |
3641 | 3784 | ||
@@ -3683,6 +3826,8 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3683 | if ((!tx_cleaned && (work_done == 0)) || | 3826 | if ((!tx_cleaned && (work_done == 0)) || |
3684 | !netif_running(poll_dev)) { | 3827 | !netif_running(poll_dev)) { |
3685 | quit_polling: | 3828 | quit_polling: |
3829 | if (likely(adapter->itr_setting & 3)) | ||
3830 | e1000_set_itr(adapter); | ||
3686 | netif_rx_complete(poll_dev); | 3831 | netif_rx_complete(poll_dev); |
3687 | e1000_irq_enable(adapter); | 3832 | e1000_irq_enable(adapter); |
3688 | return 0; | 3833 | return 0; |
@@ -3709,6 +3854,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3709 | unsigned int count = 0; | 3854 | unsigned int count = 0; |
3710 | #endif | 3855 | #endif |
3711 | boolean_t cleaned = FALSE; | 3856 | boolean_t cleaned = FALSE; |
3857 | unsigned int total_tx_bytes=0, total_tx_packets=0; | ||
3712 | 3858 | ||
3713 | i = tx_ring->next_to_clean; | 3859 | i = tx_ring->next_to_clean; |
3714 | eop = tx_ring->buffer_info[i].next_to_watch; | 3860 | eop = tx_ring->buffer_info[i].next_to_watch; |
@@ -3720,6 +3866,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3720 | buffer_info = &tx_ring->buffer_info[i]; | 3866 | buffer_info = &tx_ring->buffer_info[i]; |
3721 | cleaned = (i == eop); | 3867 | cleaned = (i == eop); |
3722 | 3868 | ||
3869 | if (cleaned) { | ||
3870 | /* this packet count is wrong for TSO but has a | ||
3871 | * tendency to make dynamic ITR change more | ||
3872 | * towards bulk */ | ||
3873 | total_tx_packets++; | ||
3874 | total_tx_bytes += buffer_info->skb->len; | ||
3875 | } | ||
3723 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3876 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3724 | tx_desc->upper.data = 0; | 3877 | tx_desc->upper.data = 0; |
3725 | 3878 | ||
@@ -3785,6 +3938,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3785 | netif_stop_queue(netdev); | 3938 | netif_stop_queue(netdev); |
3786 | } | 3939 | } |
3787 | } | 3940 | } |
3941 | adapter->total_tx_bytes += total_tx_bytes; | ||
3942 | adapter->total_tx_packets += total_tx_packets; | ||
3788 | return cleaned; | 3943 | return cleaned; |
3789 | } | 3944 | } |
3790 | 3945 | ||
@@ -3864,6 +4019,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3864 | unsigned int i; | 4019 | unsigned int i; |
3865 | int cleaned_count = 0; | 4020 | int cleaned_count = 0; |
3866 | boolean_t cleaned = FALSE; | 4021 | boolean_t cleaned = FALSE; |
4022 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
3867 | 4023 | ||
3868 | i = rx_ring->next_to_clean; | 4024 | i = rx_ring->next_to_clean; |
3869 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 4025 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
@@ -3930,6 +4086,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3930 | * done after the TBI_ACCEPT workaround above */ | 4086 | * done after the TBI_ACCEPT workaround above */ |
3931 | length -= 4; | 4087 | length -= 4; |
3932 | 4088 | ||
4089 | /* probably a little skewed due to removing CRC */ | ||
4090 | total_rx_bytes += length; | ||
4091 | total_rx_packets++; | ||
4092 | |||
3933 | /* code added for copybreak, this should improve | 4093 | /* code added for copybreak, this should improve |
3934 | * performance for small packets with large amounts | 4094 | * performance for small packets with large amounts |
3935 | * of reassembly being done in the stack */ | 4095 | * of reassembly being done in the stack */ |
@@ -3998,6 +4158,8 @@ next_desc: | |||
3998 | if (cleaned_count) | 4158 | if (cleaned_count) |
3999 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | 4159 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4000 | 4160 | ||
4161 | adapter->total_rx_packets += total_rx_packets; | ||
4162 | adapter->total_rx_bytes += total_rx_bytes; | ||
4001 | return cleaned; | 4163 | return cleaned; |
4002 | } | 4164 | } |
4003 | 4165 | ||
@@ -4027,6 +4189,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4027 | uint32_t length, staterr; | 4189 | uint32_t length, staterr; |
4028 | int cleaned_count = 0; | 4190 | int cleaned_count = 0; |
4029 | boolean_t cleaned = FALSE; | 4191 | boolean_t cleaned = FALSE; |
4192 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
4030 | 4193 | ||
4031 | i = rx_ring->next_to_clean; | 4194 | i = rx_ring->next_to_clean; |
4032 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 4195 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
@@ -4131,6 +4294,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4131 | pskb_trim(skb, skb->len - 4); | 4294 | pskb_trim(skb, skb->len - 4); |
4132 | 4295 | ||
4133 | copydone: | 4296 | copydone: |
4297 | total_rx_bytes += skb->len; | ||
4298 | total_rx_packets++; | ||
4299 | |||
4134 | e1000_rx_checksum(adapter, staterr, | 4300 | e1000_rx_checksum(adapter, staterr, |
4135 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 4301 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
4136 | skb->protocol = eth_type_trans(skb, netdev); | 4302 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -4179,6 +4345,8 @@ next_desc: | |||
4179 | if (cleaned_count) | 4345 | if (cleaned_count) |
4180 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | 4346 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4181 | 4347 | ||
4348 | adapter->total_rx_packets += total_rx_packets; | ||
4349 | adapter->total_rx_bytes += total_rx_bytes; | ||
4182 | return cleaned; | 4350 | return cleaned; |
4183 | } | 4351 | } |
4184 | 4352 | ||