diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2006-12-15 04:30:44 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-26 15:51:28 -0500 |
commit | 2b65326e67f89899e8bcaed1989d8cfb0ed01f55 (patch) | |
tree | 56e78baa2b711638eba3bc7d8d9540076dfaf508 /drivers/net/e1000/e1000_main.c | |
parent | 7d16e65ba57f181732ec52626736b27904198edf (diff) |
[PATCH] e1000: dynamic itr: take TSO and jumbo into account
The dynamic interrupt rate control patches omitted proper counting
for jumbo's and TSO resulting in suboptimal interrupt mitigation strategies.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 73f3a85fd238..62ef267b3d64 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2628,29 +2628,34 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |||
2628 | if (packets == 0) | 2628 | if (packets == 0) |
2629 | goto update_itr_done; | 2629 | goto update_itr_done; |
2630 | 2630 | ||
2631 | |||
2632 | switch (itr_setting) { | 2631 | switch (itr_setting) { |
2633 | case lowest_latency: | 2632 | case lowest_latency: |
2634 | if ((packets < 5) && (bytes > 512)) | 2633 | /* jumbo frames get bulk treatment*/ |
2634 | if (bytes/packets > 8000) | ||
2635 | retval = bulk_latency; | ||
2636 | else if ((packets < 5) && (bytes > 512)) | ||
2635 | retval = low_latency; | 2637 | retval = low_latency; |
2636 | break; | 2638 | break; |
2637 | case low_latency: /* 50 usec aka 20000 ints/s */ | 2639 | case low_latency: /* 50 usec aka 20000 ints/s */ |
2638 | if (bytes > 10000) { | 2640 | if (bytes > 10000) { |
2639 | if ((packets < 10) || | 2641 | /* jumbo frames need bulk latency setting */ |
2640 | ((bytes/packets) > 1200)) | 2642 | if (bytes/packets > 8000) |
2643 | retval = bulk_latency; | ||
2644 | else if ((packets < 10) || ((bytes/packets) > 1200)) | ||
2641 | retval = bulk_latency; | 2645 | retval = bulk_latency; |
2642 | else if ((packets > 35)) | 2646 | else if ((packets > 35)) |
2643 | retval = lowest_latency; | 2647 | retval = lowest_latency; |
2644 | } else if (packets <= 2 && bytes < 512) | 2648 | } else if (bytes/packets > 2000) |
2649 | retval = bulk_latency; | ||
2650 | else if (packets <= 2 && bytes < 512) | ||
2645 | retval = lowest_latency; | 2651 | retval = lowest_latency; |
2646 | break; | 2652 | break; |
2647 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | 2653 | case bulk_latency: /* 250 usec aka 4000 ints/s */ |
2648 | if (bytes > 25000) { | 2654 | if (bytes > 25000) { |
2649 | if (packets > 35) | 2655 | if (packets > 35) |
2650 | retval = low_latency; | 2656 | retval = low_latency; |
2651 | } else { | 2657 | } else if (bytes < 6000) { |
2652 | if (bytes < 6000) | 2658 | retval = low_latency; |
2653 | retval = low_latency; | ||
2654 | } | 2659 | } |
2655 | break; | 2660 | break; |
2656 | } | 2661 | } |
@@ -2679,17 +2684,20 @@ static void e1000_set_itr(struct e1000_adapter *adapter) | |||
2679 | adapter->tx_itr, | 2684 | adapter->tx_itr, |
2680 | adapter->total_tx_packets, | 2685 | adapter->total_tx_packets, |
2681 | adapter->total_tx_bytes); | 2686 | adapter->total_tx_bytes); |
2687 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
2688 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | ||
2689 | adapter->tx_itr = low_latency; | ||
2690 | |||
2682 | adapter->rx_itr = e1000_update_itr(adapter, | 2691 | adapter->rx_itr = e1000_update_itr(adapter, |
2683 | adapter->rx_itr, | 2692 | adapter->rx_itr, |
2684 | adapter->total_rx_packets, | 2693 | adapter->total_rx_packets, |
2685 | adapter->total_rx_bytes); | 2694 | adapter->total_rx_bytes); |
2695 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
2696 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | ||
2697 | adapter->rx_itr = low_latency; | ||
2686 | 2698 | ||
2687 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | 2699 | current_itr = max(adapter->rx_itr, adapter->tx_itr); |
2688 | 2700 | ||
2689 | /* conservative mode eliminates the lowest_latency setting */ | ||
2690 | if (current_itr == lowest_latency && (adapter->itr_setting == 3)) | ||
2691 | current_itr = low_latency; | ||
2692 | |||
2693 | switch (current_itr) { | 2701 | switch (current_itr) { |
2694 | /* counts and packets in update_itr are dependent on these numbers */ | 2702 | /* counts and packets in update_itr are dependent on these numbers */ |
2695 | case lowest_latency: | 2703 | case lowest_latency: |
@@ -3868,11 +3876,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3868 | cleaned = (i == eop); | 3876 | cleaned = (i == eop); |
3869 | 3877 | ||
3870 | if (cleaned) { | 3878 | if (cleaned) { |
3871 | /* this packet count is wrong for TSO but has a | 3879 | struct sk_buff *skb = buffer_info->skb; |
3872 | * tendency to make dynamic ITR change more | 3880 | unsigned int segs = skb_shinfo(skb)->gso_segs; |
3873 | * towards bulk */ | 3881 | total_tx_packets += segs; |
3874 | total_tx_packets++; | 3882 | total_tx_packets++; |
3875 | total_tx_bytes += buffer_info->skb->len; | 3883 | total_tx_bytes += skb->len; |
3876 | } | 3884 | } |
3877 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3885 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3878 | tx_desc->upper.data = 0; | 3886 | tx_desc->upper.data = 0; |