aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-06-10 21:45:08 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-07-22 01:56:17 -0400
commitbd19805803a954415ec36a559fd3b8a0a3647d7c (patch)
treef0c61bccd3bd2e019c5bec692171532a4566a326 /drivers/net/ixgbe
parent08c8833b29cfa4343ff132eebc5648b234eb3f85 (diff)
ixgbe: Move interrupt related values out of ring and into q_vector
This change moves work_limit, total_packets, and total_bytes into the ring container struct of the q_vector. The advantage of this is that it should reduce the size of memory used in the event of multiple rings being assigned to a single q_vector. In addition it should help to reduce the total workload for calculating itr since now total_packets and total_bytes will be the total work done of the interrupt instead of for the ring. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c170
3 files changed, 62 insertions, 138 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index a5ed9fd69fdd..fbae7352b036 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -214,12 +214,10 @@ struct ixgbe_ring {
214 struct ixgbe_rx_buffer *rx_buffer_info; 214 struct ixgbe_rx_buffer *rx_buffer_info;
215 }; 215 };
216 unsigned long state; 216 unsigned long state;
217 u8 atr_sample_rate; 217 u8 __iomem *tail;
218 u8 atr_count; 218
219 u16 count; /* amount of descriptors */ 219 u16 count; /* amount of descriptors */
220 u16 rx_buf_len; 220 u16 rx_buf_len;
221 u16 next_to_use;
222 u16 next_to_clean;
223 221
224 u8 queue_index; /* needed for multiqueue queue management */ 222 u8 queue_index; /* needed for multiqueue queue management */
225 u8 reg_idx; /* holds the special value that gets 223 u8 reg_idx; /* holds the special value that gets
@@ -227,15 +225,13 @@ struct ixgbe_ring {
227 * associated with this ring, which is 225 * associated with this ring, which is
228 * different for DCB and RSS modes 226 * different for DCB and RSS modes
229 */ 227 */
230 u8 dcb_tc; 228 u8 atr_sample_rate;
231 229 u8 atr_count;
232 u16 work_limit; /* max work per interrupt */
233
234 u8 __iomem *tail;
235 230
236 unsigned int total_bytes; 231 u16 next_to_use;
237 unsigned int total_packets; 232 u16 next_to_clean;
238 233
234 u8 dcb_tc;
239 struct ixgbe_queue_stats stats; 235 struct ixgbe_queue_stats stats;
240 struct u64_stats_sync syncp; 236 struct u64_stats_sync syncp;
241 union { 237 union {
@@ -283,6 +279,9 @@ struct ixgbe_ring_container {
283#else 279#else
284 DECLARE_BITMAP(idx, MAX_TX_QUEUES); 280 DECLARE_BITMAP(idx, MAX_TX_QUEUES);
285#endif 281#endif
282 unsigned int total_bytes; /* total bytes processed this int */
283 unsigned int total_packets; /* total packets processed this int */
284 u16 work_limit; /* total work allowed per interrupt */
286 u8 count; /* total number of rings in vector */ 285 u8 count; /* total number of rings in vector */
287 u8 itr; /* current ITR setting for ring */ 286 u8 itr; /* current ITR setting for ring */
288}; 287};
@@ -417,6 +416,9 @@ struct ixgbe_adapter {
417 u16 eitr_low; 416 u16 eitr_low;
418 u16 eitr_high; 417 u16 eitr_high;
419 418
419 /* Work limits */
420 u16 tx_work_limit;
421
420 /* TX */ 422 /* TX */
421 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 423 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
422 int num_tx_queues; 424 int num_tx_queues;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index cd842f9dcae5..6035cfa1b4c7 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -2103,7 +2103,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2103{ 2103{
2104 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2104 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2105 2105
2106 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit; 2106 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
2107 2107
2108 /* only valid if in constant ITR mode */ 2108 /* only valid if in constant ITR mode */
2109 switch (adapter->rx_itr_setting) { 2109 switch (adapter->rx_itr_setting) {
@@ -2192,7 +2192,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2192 return -EINVAL; 2192 return -EINVAL;
2193 2193
2194 if (ec->tx_max_coalesced_frames_irq) 2194 if (ec->tx_max_coalesced_frames_irq)
2195 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2195 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
2196 2196
2197 if (ec->rx_coalesce_usecs > 1) { 2197 if (ec->rx_coalesce_usecs > 1) {
2198 /* check the limits */ 2198 /* check the limits */
@@ -2267,12 +2267,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2267 else 2267 else
2268 /* rx only or mixed */ 2268 /* rx only or mixed */
2269 q_vector->eitr = adapter->rx_eitr_param; 2269 q_vector->eitr = adapter->rx_eitr_param;
2270 q_vector->tx.work_limit = adapter->tx_work_limit;
2270 ixgbe_write_eitr(q_vector); 2271 ixgbe_write_eitr(q_vector);
2271 } 2272 }
2272 /* Legacy Interrupt Mode */ 2273 /* Legacy Interrupt Mode */
2273 } else { 2274 } else {
2274 q_vector = adapter->q_vector[0]; 2275 q_vector = adapter->q_vector[0];
2275 q_vector->eitr = adapter->rx_eitr_param; 2276 q_vector->eitr = adapter->rx_eitr_param;
2277 q_vector->tx.work_limit = adapter->tx_work_limit;
2276 ixgbe_write_eitr(q_vector); 2278 ixgbe_write_eitr(q_vector);
2277 } 2279 }
2278 2280
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index bd1fd8f422dc..298c95b1480f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -805,7 +805,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
805 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 805 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
806 806
807 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 807 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
808 (count < tx_ring->work_limit)) { 808 (count < q_vector->tx.work_limit)) {
809 bool cleaned = false; 809 bool cleaned = false;
810 rmb(); /* read buffer_info after eop_desc */ 810 rmb(); /* read buffer_info after eop_desc */
811 for ( ; !cleaned; count++) { 811 for ( ; !cleaned; count++) {
@@ -834,11 +834,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
834 } 834 }
835 835
836 tx_ring->next_to_clean = i; 836 tx_ring->next_to_clean = i;
837 tx_ring->total_bytes += total_bytes;
838 tx_ring->total_packets += total_packets;
839 u64_stats_update_begin(&tx_ring->syncp);
840 tx_ring->stats.packets += total_packets;
841 tx_ring->stats.bytes += total_bytes; 837 tx_ring->stats.bytes += total_bytes;
838 tx_ring->stats.packets += total_packets;
839 u64_stats_update_begin(&tx_ring->syncp);
840 q_vector->tx.total_bytes += total_bytes;
841 q_vector->tx.total_packets += total_packets;
842 u64_stats_update_end(&tx_ring->syncp); 842 u64_stats_update_end(&tx_ring->syncp);
843 843
844 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 844 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -886,7 +886,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
886 } 886 }
887 } 887 }
888 888
889 return count < tx_ring->work_limit; 889 return count < q_vector->tx.work_limit;
890} 890}
891 891
892#ifdef CONFIG_IXGBE_DCA 892#ifdef CONFIG_IXGBE_DCA
@@ -1486,12 +1486,12 @@ next_desc:
1486 } 1486 }
1487#endif /* IXGBE_FCOE */ 1487#endif /* IXGBE_FCOE */
1488 1488
1489 rx_ring->total_packets += total_rx_packets;
1490 rx_ring->total_bytes += total_rx_bytes;
1491 u64_stats_update_begin(&rx_ring->syncp); 1489 u64_stats_update_begin(&rx_ring->syncp);
1492 rx_ring->stats.packets += total_rx_packets; 1490 rx_ring->stats.packets += total_rx_packets;
1493 rx_ring->stats.bytes += total_rx_bytes; 1491 rx_ring->stats.bytes += total_rx_bytes;
1494 u64_stats_update_end(&rx_ring->syncp); 1492 u64_stats_update_end(&rx_ring->syncp);
1493 q_vector->rx.total_packets += total_rx_packets;
1494 q_vector->rx.total_bytes += total_rx_bytes;
1495} 1495}
1496 1496
1497static int ixgbe_clean_rxonly(struct napi_struct *, int); 1497static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1597,11 +1597,8 @@ enum latency_range {
1597 1597
1598/** 1598/**
1599 * ixgbe_update_itr - update the dynamic ITR value based on statistics 1599 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1600 * @adapter: pointer to adapter 1600 * @q_vector: structure containing interrupt and ring information
1601 * @eitr: eitr setting (ints per sec) to give last timeslice 1601 * @ring_container: structure containing ring performance data
1602 * @itr_setting: current throttle rate in ints/second
1603 * @packets: the number of packets during this measurement interval
1604 * @bytes: the number of bytes during this measurement interval
1605 * 1602 *
1606 * Stores a new ITR value based on packets and byte 1603 * Stores a new ITR value based on packets and byte
1607 * counts during the last interrupt. The advantage of per interrupt 1604 * counts during the last interrupt. The advantage of per interrupt
@@ -1613,17 +1610,18 @@ enum latency_range {
1613 * this functionality is controlled by the InterruptThrottleRate module 1610 * this functionality is controlled by the InterruptThrottleRate module
1614 * parameter (see ixgbe_param.c) 1611 * parameter (see ixgbe_param.c)
1615 **/ 1612 **/
1616static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1613static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
1617 u32 eitr, u8 itr_setting, 1614 struct ixgbe_ring_container *ring_container)
1618 int packets, int bytes)
1619{ 1615{
1620 unsigned int retval = itr_setting;
1621 u32 timepassed_us;
1622 u64 bytes_perint; 1616 u64 bytes_perint;
1617 struct ixgbe_adapter *adapter = q_vector->adapter;
1618 int bytes = ring_container->total_bytes;
1619 int packets = ring_container->total_packets;
1620 u32 timepassed_us;
1621 u8 itr_setting = ring_container->itr;
1623 1622
1624 if (packets == 0) 1623 if (packets == 0)
1625 goto update_itr_done; 1624 return;
1626
1627 1625
1628 /* simple throttlerate management 1626 /* simple throttlerate management
1629 * 0-20MB/s lowest (100000 ints/s) 1627 * 0-20MB/s lowest (100000 ints/s)
@@ -1631,28 +1629,32 @@ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1631 * 100-1249MB/s bulk (8000 ints/s) 1629 * 100-1249MB/s bulk (8000 ints/s)
1632 */ 1630 */
1633 /* what was last interrupt timeslice? */ 1631 /* what was last interrupt timeslice? */
1634 timepassed_us = 1000000/eitr; 1632 timepassed_us = 1000000/q_vector->eitr;
1635 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 1633 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1636 1634
1637 switch (itr_setting) { 1635 switch (itr_setting) {
1638 case lowest_latency: 1636 case lowest_latency:
1639 if (bytes_perint > adapter->eitr_low) 1637 if (bytes_perint > adapter->eitr_low)
1640 retval = low_latency; 1638 itr_setting = low_latency;
1641 break; 1639 break;
1642 case low_latency: 1640 case low_latency:
1643 if (bytes_perint > adapter->eitr_high) 1641 if (bytes_perint > adapter->eitr_high)
1644 retval = bulk_latency; 1642 itr_setting = bulk_latency;
1645 else if (bytes_perint <= adapter->eitr_low) 1643 else if (bytes_perint <= adapter->eitr_low)
1646 retval = lowest_latency; 1644 itr_setting = lowest_latency;
1647 break; 1645 break;
1648 case bulk_latency: 1646 case bulk_latency:
1649 if (bytes_perint <= adapter->eitr_high) 1647 if (bytes_perint <= adapter->eitr_high)
1650 retval = low_latency; 1648 itr_setting = low_latency;
1651 break; 1649 break;
1652 } 1650 }
1653 1651
1654update_itr_done: 1652 /* clear work counters since we have the values we need */
1655 return retval; 1653 ring_container->total_bytes = 0;
1654 ring_container->total_packets = 0;
1655
1656 /* write updated itr to ring container */
1657 ring_container->itr = itr_setting;
1656} 1658}
1657 1659
1658/** 1660/**
@@ -1698,42 +1700,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1698 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1700 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1699} 1701}
1700 1702
1701static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1703static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
1702{ 1704{
1703 struct ixgbe_adapter *adapter = q_vector->adapter; 1705 u32 new_itr = q_vector->eitr;
1704 int i, r_idx; 1706 u8 current_itr;
1705 u32 new_itr;
1706 u8 current_itr, ret_itr;
1707
1708 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
1709 for (i = 0; i < q_vector->tx.count; i++) {
1710 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1711 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1712 q_vector->tx.itr,
1713 tx_ring->total_packets,
1714 tx_ring->total_bytes);
1715 /* if the result for this queue would decrease interrupt
1716 * rate for this vector then use that result */
1717 q_vector->tx.itr = ((q_vector->tx.itr > ret_itr) ?
1718 q_vector->tx.itr - 1 : ret_itr);
1719 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
1720 r_idx + 1);
1721 }
1722 1707
1723 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 1708 ixgbe_update_itr(q_vector, &q_vector->tx);
1724 for (i = 0; i < q_vector->rx.count; i++) { 1709 ixgbe_update_itr(q_vector, &q_vector->rx);
1725 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1726 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1727 q_vector->rx.itr,
1728 rx_ring->total_packets,
1729 rx_ring->total_bytes);
1730 /* if the result for this queue would decrease interrupt
1731 * rate for this vector then use that result */
1732 q_vector->rx.itr = ((q_vector->rx.itr > ret_itr) ?
1733 q_vector->rx.itr - 1 : ret_itr);
1734 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
1735 r_idx + 1);
1736 }
1737 1710
1738 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 1711 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1739 1712
@@ -1746,16 +1719,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1746 new_itr = 20000; /* aka hwitr = ~200 */ 1719 new_itr = 20000; /* aka hwitr = ~200 */
1747 break; 1720 break;
1748 case bulk_latency: 1721 case bulk_latency:
1749 default:
1750 new_itr = 8000; 1722 new_itr = 8000;
1751 break; 1723 break;
1724 default:
1725 break;
1752 } 1726 }
1753 1727
1754 if (new_itr != q_vector->eitr) { 1728 if (new_itr != q_vector->eitr) {
1755 /* do an exponential smoothing */ 1729 /* do an exponential smoothing */
1756 new_itr = ((q_vector->eitr * 9) + new_itr)/10; 1730 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1757 1731
1758 /* save the algorithm value here, not the smoothed one */ 1732 /* save the algorithm value here */
1759 q_vector->eitr = new_itr; 1733 q_vector->eitr = new_itr;
1760 1734
1761 ixgbe_write_eitr(q_vector); 1735 ixgbe_write_eitr(q_vector);
@@ -2001,8 +1975,6 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
2001 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); 1975 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2002 for (i = 0; i < q_vector->tx.count; i++) { 1976 for (i = 0; i < q_vector->tx.count; i++) {
2003 tx_ring = adapter->tx_ring[r_idx]; 1977 tx_ring = adapter->tx_ring[r_idx];
2004 tx_ring->total_bytes = 0;
2005 tx_ring->total_packets = 0;
2006 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, 1978 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2007 r_idx + 1); 1979 r_idx + 1);
2008 } 1980 }
@@ -2034,8 +2006,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2034 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 2006 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2035 for (i = 0; i < q_vector->rx.count; i++) { 2007 for (i = 0; i < q_vector->rx.count; i++) {
2036 rx_ring = adapter->rx_ring[r_idx]; 2008 rx_ring = adapter->rx_ring[r_idx];
2037 rx_ring->total_bytes = 0;
2038 rx_ring->total_packets = 0;
2039 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, 2009 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2040 r_idx + 1); 2010 r_idx + 1);
2041 } 2011 }
@@ -2063,8 +2033,6 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2063 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues); 2033 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2064 for (i = 0; i < q_vector->tx.count; i++) { 2034 for (i = 0; i < q_vector->tx.count; i++) {
2065 ring = adapter->tx_ring[r_idx]; 2035 ring = adapter->tx_ring[r_idx];
2066 ring->total_bytes = 0;
2067 ring->total_packets = 0;
2068 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues, 2036 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2069 r_idx + 1); 2037 r_idx + 1);
2070 } 2038 }
@@ -2072,8 +2040,6 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2072 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues); 2040 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2073 for (i = 0; i < q_vector->rx.count; i++) { 2041 for (i = 0; i < q_vector->rx.count; i++) {
2074 ring = adapter->rx_ring[r_idx]; 2042 ring = adapter->rx_ring[r_idx];
2075 ring->total_bytes = 0;
2076 ring->total_packets = 0;
2077 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues, 2043 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2078 r_idx + 1); 2044 r_idx + 1);
2079 } 2045 }
@@ -2115,7 +2081,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2115 if (work_done < budget) { 2081 if (work_done < budget) {
2116 napi_complete(napi); 2082 napi_complete(napi);
2117 if (adapter->rx_itr_setting & 1) 2083 if (adapter->rx_itr_setting & 1)
2118 ixgbe_set_itr_msix(q_vector); 2084 ixgbe_set_itr(q_vector);
2119 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2085 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2120 ixgbe_irq_enable_queues(adapter, 2086 ixgbe_irq_enable_queues(adapter,
2121 ((u64)1 << q_vector->v_idx)); 2087 ((u64)1 << q_vector->v_idx));
@@ -2173,7 +2139,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
2173 if (work_done < budget) { 2139 if (work_done < budget) {
2174 napi_complete(napi); 2140 napi_complete(napi);
2175 if (adapter->rx_itr_setting & 1) 2141 if (adapter->rx_itr_setting & 1)
2176 ixgbe_set_itr_msix(q_vector); 2142 ixgbe_set_itr(q_vector);
2177 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2143 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2178 ixgbe_irq_enable_queues(adapter, 2144 ixgbe_irq_enable_queues(adapter,
2179 ((u64)1 << q_vector->v_idx)); 2145 ((u64)1 << q_vector->v_idx));
@@ -2215,7 +2181,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2215 if (work_done < budget) { 2181 if (work_done < budget) {
2216 napi_complete(napi); 2182 napi_complete(napi);
2217 if (adapter->tx_itr_setting & 1) 2183 if (adapter->tx_itr_setting & 1)
2218 ixgbe_set_itr_msix(q_vector); 2184 ixgbe_set_itr(q_vector);
2219 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2185 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2220 ixgbe_irq_enable_queues(adapter, 2186 ixgbe_irq_enable_queues(adapter,
2221 ((u64)1 << q_vector->v_idx)); 2187 ((u64)1 << q_vector->v_idx));
@@ -2244,6 +2210,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2244 set_bit(t_idx, q_vector->tx.idx); 2210 set_bit(t_idx, q_vector->tx.idx);
2245 q_vector->tx.count++; 2211 q_vector->tx.count++;
2246 tx_ring->q_vector = q_vector; 2212 tx_ring->q_vector = q_vector;
2213 q_vector->tx.work_limit = a->tx_work_limit;
2247} 2214}
2248 2215
2249/** 2216/**
@@ -2386,51 +2353,6 @@ free_queue_irqs:
2386 return err; 2353 return err;
2387} 2354}
2388 2355
2389static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2390{
2391 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2392 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2393 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2394 u32 new_itr = q_vector->eitr;
2395 u8 current_itr;
2396
2397 q_vector->tx.itr = ixgbe_update_itr(adapter, new_itr,
2398 q_vector->tx.itr,
2399 tx_ring->total_packets,
2400 tx_ring->total_bytes);
2401 q_vector->rx.itr = ixgbe_update_itr(adapter, new_itr,
2402 q_vector->rx.itr,
2403 rx_ring->total_packets,
2404 rx_ring->total_bytes);
2405
2406 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2407
2408 switch (current_itr) {
2409 /* counts and packets in update_itr are dependent on these numbers */
2410 case lowest_latency:
2411 new_itr = 100000;
2412 break;
2413 case low_latency:
2414 new_itr = 20000; /* aka hwitr = ~200 */
2415 break;
2416 case bulk_latency:
2417 new_itr = 8000;
2418 break;
2419 default:
2420 break;
2421 }
2422
2423 if (new_itr != q_vector->eitr) {
2424 /* do an exponential smoothing */
2425 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
2426
2427 /* save the algorithm value here */
2428 q_vector->eitr = new_itr;
2429
2430 ixgbe_write_eitr(q_vector);
2431 }
2432}
2433
2434/** 2356/**
2435 * ixgbe_irq_enable - Enable default interrupt generation settings 2357 * ixgbe_irq_enable - Enable default interrupt generation settings
2436 * @adapter: board private structure 2358 * @adapter: board private structure
@@ -2528,10 +2450,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2528 ixgbe_check_fan_failure(adapter, eicr); 2450 ixgbe_check_fan_failure(adapter, eicr);
2529 2451
2530 if (napi_schedule_prep(&(q_vector->napi))) { 2452 if (napi_schedule_prep(&(q_vector->napi))) {
2531 adapter->tx_ring[0]->total_packets = 0;
2532 adapter->tx_ring[0]->total_bytes = 0;
2533 adapter->rx_ring[0]->total_packets = 0;
2534 adapter->rx_ring[0]->total_bytes = 0;
2535 /* would disable interrupts here but EIAM disabled it */ 2453 /* would disable interrupts here but EIAM disabled it */
2536 __napi_schedule(&(q_vector->napi)); 2454 __napi_schedule(&(q_vector->napi));
2537 } 2455 }
@@ -4299,7 +4217,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
4299 if (work_done < budget) { 4217 if (work_done < budget) {
4300 napi_complete(napi); 4218 napi_complete(napi);
4301 if (adapter->rx_itr_setting & 1) 4219 if (adapter->rx_itr_setting & 1)
4302 ixgbe_set_itr(adapter); 4220 ixgbe_set_itr(q_vector);
4303 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 4221 if (!test_bit(__IXGBE_DOWN, &adapter->state))
4304 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); 4222 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
4305 } 4223 }
@@ -5224,6 +5142,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5224 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 5142 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5225 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 5143 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5226 5144
5145 /* set default work limits */
5146 adapter->tx_work_limit = adapter->tx_ring_count;
5147
5227 /* initialize eeprom parameters */ 5148 /* initialize eeprom parameters */
5228 if (ixgbe_init_eeprom_params_generic(hw)) { 5149 if (ixgbe_init_eeprom_params_generic(hw)) {
5229 e_dev_err("EEPROM initialization failed\n"); 5150 e_dev_err("EEPROM initialization failed\n");
@@ -5270,7 +5191,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5270 5191
5271 tx_ring->next_to_use = 0; 5192 tx_ring->next_to_use = 0;
5272 tx_ring->next_to_clean = 0; 5193 tx_ring->next_to_clean = 0;
5273 tx_ring->work_limit = tx_ring->count;
5274 return 0; 5194 return 0;
5275 5195
5276err: 5196err: