aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-08-30 20:01:11 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-09-16 00:09:44 -0400
commit4ff7fb12cf92fd15e0fbae0b36cca0599f8a7d1b (patch)
treebca4c1422c6882818394debdd686020e39287125 /drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
parent592245559e9007845ef6603cc930c784031eb076 (diff)
v2 ixgbe: consolidate all MSI-X ring interrupts and poll routines into one
This change consolidates all of the MSI-X interrupt and polling routines into two single functions. One for the interrupt and one for the code. The main advantage to doing this is that the compiler can optimize the routines into single monolithic functions which should allow all of them function to occupy a single block of memory and as such avoid jumping around. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c279
1 files changed, 63 insertions, 216 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0f633ad9e8cd..3ce0277cdbf3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1297,9 +1297,9 @@ static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1297 IXGBE_RXDADV_RSCCNT_MASK); 1297 IXGBE_RXDADV_RSCCNT_MASK);
1298} 1298}
1299 1299
1300static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1300static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1301 struct ixgbe_ring *rx_ring, 1301 struct ixgbe_ring *rx_ring,
1302 int *work_done, int work_to_do) 1302 int budget)
1303{ 1303{
1304 struct ixgbe_adapter *adapter = q_vector->adapter; 1304 struct ixgbe_adapter *adapter = q_vector->adapter;
1305 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1305 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
@@ -1479,11 +1479,11 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1479#endif /* IXGBE_FCOE */ 1479#endif /* IXGBE_FCOE */
1480 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 1480 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
1481 1481
1482 budget--;
1482next_desc: 1483next_desc:
1483 rx_desc->wb.upper.status_error = 0; 1484 rx_desc->wb.upper.status_error = 0;
1484 1485
1485 (*work_done)++; 1486 if (!budget)
1486 if (*work_done >= work_to_do)
1487 break; 1487 break;
1488 1488
1489 /* return some buffers to hardware, one at a time is too slow */ 1489 /* return some buffers to hardware, one at a time is too slow */
@@ -1524,9 +1524,10 @@ next_desc:
1524 u64_stats_update_end(&rx_ring->syncp); 1524 u64_stats_update_end(&rx_ring->syncp);
1525 q_vector->rx.total_packets += total_rx_packets; 1525 q_vector->rx.total_packets += total_rx_packets;
1526 q_vector->rx.total_bytes += total_rx_bytes; 1526 q_vector->rx.total_bytes += total_rx_bytes;
1527
1528 return !!budget;
1527} 1529}
1528 1530
1529static int ixgbe_clean_rxonly(struct napi_struct *, int);
1530/** 1531/**
1531 * ixgbe_configure_msix - Configure MSI-X hardware 1532 * ixgbe_configure_msix - Configure MSI-X hardware
1532 * @adapter: board private structure 1533 * @adapter: board private structure
@@ -1980,167 +1981,18 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1980 /* skip the flush */ 1981 /* skip the flush */
1981} 1982}
1982 1983
1983static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 1984static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
1984{
1985 struct ixgbe_q_vector *q_vector = data;
1986
1987 if (!q_vector->tx.count)
1988 return IRQ_HANDLED;
1989
1990 /* EIAM disabled interrupts (on this vector) for us */
1991 napi_schedule(&q_vector->napi);
1992
1993 return IRQ_HANDLED;
1994}
1995
1996/**
1997 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1998 * @irq: unused
1999 * @data: pointer to our q_vector struct for this interrupt vector
2000 **/
2001static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2002{ 1985{
2003 struct ixgbe_q_vector *q_vector = data; 1986 struct ixgbe_q_vector *q_vector = data;
2004 1987
2005 if (!q_vector->rx.count)
2006 return IRQ_HANDLED;
2007
2008 /* EIAM disabled interrupts (on this vector) for us */ 1988 /* EIAM disabled interrupts (on this vector) for us */
2009 napi_schedule(&q_vector->napi);
2010
2011 return IRQ_HANDLED;
2012}
2013
2014static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2015{
2016 struct ixgbe_q_vector *q_vector = data;
2017 1989
2018 if (!q_vector->tx.count && !q_vector->rx.count) 1990 if (q_vector->rx.ring || q_vector->tx.ring)
2019 return IRQ_HANDLED; 1991 napi_schedule(&q_vector->napi);
2020
2021 /* EIAM disabled interrupts (on this vector) for us */
2022 napi_schedule(&q_vector->napi);
2023 1992
2024 return IRQ_HANDLED; 1993 return IRQ_HANDLED;
2025} 1994}
2026 1995
2027/**
2028 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
2029 * @napi: napi struct with our devices info in it
2030 * @budget: amount of work driver is allowed to do this pass, in packets
2031 *
2032 * This function is optimized for cleaning one queue only on a single
2033 * q_vector!!!
2034 **/
2035static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2036{
2037 struct ixgbe_q_vector *q_vector =
2038 container_of(napi, struct ixgbe_q_vector, napi);
2039 struct ixgbe_adapter *adapter = q_vector->adapter;
2040 int work_done = 0;
2041
2042#ifdef CONFIG_IXGBE_DCA
2043 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2044 ixgbe_update_dca(q_vector);
2045#endif
2046
2047 ixgbe_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget);
2048
2049 /* If all Rx work done, exit the polling mode */
2050 if (work_done < budget) {
2051 napi_complete(napi);
2052 if (adapter->rx_itr_setting & 1)
2053 ixgbe_set_itr(q_vector);
2054 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2055 ixgbe_irq_enable_queues(adapter,
2056 ((u64)1 << q_vector->v_idx));
2057 }
2058
2059 return work_done;
2060}
2061
2062/**
2063 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
2064 * @napi: napi struct with our devices info in it
2065 * @budget: amount of work driver is allowed to do this pass, in packets
2066 *
2067 * This function will clean more than one rx queue associated with a
2068 * q_vector.
2069 **/
2070static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
2071{
2072 struct ixgbe_q_vector *q_vector =
2073 container_of(napi, struct ixgbe_q_vector, napi);
2074 struct ixgbe_adapter *adapter = q_vector->adapter;
2075 struct ixgbe_ring *ring;
2076 int work_done = 0;
2077 bool clean_complete = true;
2078
2079#ifdef CONFIG_IXGBE_DCA
2080 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2081 ixgbe_update_dca(q_vector);
2082#endif
2083
2084 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
2085 clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2086
2087 /* attempt to distribute budget to each queue fairly, but don't allow
2088 * the budget to go below 1 because we'll exit polling */
2089 budget /= (q_vector->rx.count ?: 1);
2090 budget = max(budget, 1);
2091
2092 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
2093 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
2094
2095 if (!clean_complete)
2096 work_done = budget;
2097
2098 /* If all Rx work done, exit the polling mode */
2099 if (work_done < budget) {
2100 napi_complete(napi);
2101 if (adapter->rx_itr_setting & 1)
2102 ixgbe_set_itr(q_vector);
2103 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2104 ixgbe_irq_enable_queues(adapter,
2105 ((u64)1 << q_vector->v_idx));
2106 return 0;
2107 }
2108
2109 return work_done;
2110}
2111
2112/**
2113 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
2114 * @napi: napi struct with our devices info in it
2115 * @budget: amount of work driver is allowed to do this pass, in packets
2116 *
2117 * This function is optimized for cleaning one queue only on a single
2118 * q_vector!!!
2119 **/
2120static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2121{
2122 struct ixgbe_q_vector *q_vector =
2123 container_of(napi, struct ixgbe_q_vector, napi);
2124 struct ixgbe_adapter *adapter = q_vector->adapter;
2125
2126#ifdef CONFIG_IXGBE_DCA
2127 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2128 ixgbe_update_dca(q_vector);
2129#endif
2130
2131 if (!ixgbe_clean_tx_irq(q_vector, q_vector->tx.ring))
2132 return budget;
2133
2134 /* If all Tx work done, exit the polling mode */
2135 napi_complete(napi);
2136 if (adapter->tx_itr_setting & 1)
2137 ixgbe_set_itr(q_vector);
2138 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2139 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2140
2141 return 0;
2142}
2143
2144static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1996static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2145 int r_idx) 1997 int r_idx)
2146{ 1998{
@@ -2241,7 +2093,6 @@ out:
2241static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 2093static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2242{ 2094{
2243 struct net_device *netdev = adapter->netdev; 2095 struct net_device *netdev = adapter->netdev;
2244 irqreturn_t (*handler)(int, void *);
2245 int i, vector, q_vectors, err; 2096 int i, vector, q_vectors, err;
2246 int ri = 0, ti = 0; 2097 int ri = 0, ti = 0;
2247 2098
@@ -2252,31 +2103,25 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2252 if (err) 2103 if (err)
2253 return err; 2104 return err;
2254 2105
2255#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
2256 ? &ixgbe_msix_clean_many : \
2257 (_v)->rx.count ? &ixgbe_msix_clean_rx : \
2258 (_v)->tx.count ? &ixgbe_msix_clean_tx : \
2259 NULL)
2260 for (vector = 0; vector < q_vectors; vector++) { 2106 for (vector = 0; vector < q_vectors; vector++) {
2261 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2107 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2262 handler = SET_HANDLER(q_vector);
2263 2108
2264 if (handler == &ixgbe_msix_clean_rx) { 2109 if (q_vector->tx.ring && q_vector->rx.ring) {
2265 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2110 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2266 "%s-%s-%d", netdev->name, "rx", ri++); 2111 "%s-%s-%d", netdev->name, "TxRx", ri++);
2267 } else if (handler == &ixgbe_msix_clean_tx) { 2112 ti++;
2113 } else if (q_vector->rx.ring) {
2268 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2114 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2269 "%s-%s-%d", netdev->name, "tx", ti++); 2115 "%s-%s-%d", netdev->name, "rx", ri++);
2270 } else if (handler == &ixgbe_msix_clean_many) { 2116 } else if (q_vector->tx.ring) {
2271 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2117 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2272 "%s-%s-%d", netdev->name, "TxRx", ri++); 2118 "%s-%s-%d", netdev->name, "tx", ti++);
2273 ti++;
2274 } else { 2119 } else {
2275 /* skip this unused q_vector */ 2120 /* skip this unused q_vector */
2276 continue; 2121 continue;
2277 } 2122 }
2278 err = request_irq(adapter->msix_entries[vector].vector, 2123 err = request_irq(adapter->msix_entries[vector].vector,
2279 handler, 0, q_vector->name, 2124 &ixgbe_msix_clean_rings, 0, q_vector->name,
2280 q_vector); 2125 q_vector);
2281 if (err) { 2126 if (err) {
2282 e_err(probe, "request_irq failed for MSIX interrupt " 2127 e_err(probe, "request_irq failed for MSIX interrupt "
@@ -2484,8 +2329,8 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2484 i--; 2329 i--;
2485 for (; i >= 0; i--) { 2330 for (; i >= 0; i--) {
2486 /* free only the irqs that were actually requested */ 2331 /* free only the irqs that were actually requested */
2487 if (!adapter->q_vector[i]->rx.count && 2332 if (!adapter->q_vector[i]->rx.ring &&
2488 !adapter->q_vector[i]->tx.count) 2333 !adapter->q_vector[i]->tx.ring)
2489 continue; 2334 continue;
2490 2335
2491 free_irq(adapter->msix_entries[i].vector, 2336 free_irq(adapter->msix_entries[i].vector,
@@ -3478,19 +3323,8 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3478 q_vectors = 1; 3323 q_vectors = 1;
3479 3324
3480 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 3325 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3481 struct napi_struct *napi;
3482 q_vector = adapter->q_vector[q_idx]; 3326 q_vector = adapter->q_vector[q_idx];
3483 napi = &q_vector->napi; 3327 napi_enable(&q_vector->napi);
3484 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3485 if (!q_vector->rx.count || !q_vector->tx.count) {
3486 if (q_vector->tx.count == 1)
3487 napi->poll = &ixgbe_clean_txonly;
3488 else if (q_vector->rx.count == 1)
3489 napi->poll = &ixgbe_clean_rxonly;
3490 }
3491 }
3492
3493 napi_enable(napi);
3494 } 3328 }
3495} 3329}
3496 3330
@@ -4148,28 +3982,41 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
4148 struct ixgbe_q_vector *q_vector = 3982 struct ixgbe_q_vector *q_vector =
4149 container_of(napi, struct ixgbe_q_vector, napi); 3983 container_of(napi, struct ixgbe_q_vector, napi);
4150 struct ixgbe_adapter *adapter = q_vector->adapter; 3984 struct ixgbe_adapter *adapter = q_vector->adapter;
4151 int tx_clean_complete, work_done = 0; 3985 struct ixgbe_ring *ring;
3986 int per_ring_budget;
3987 bool clean_complete = true;
4152 3988
4153#ifdef CONFIG_IXGBE_DCA 3989#ifdef CONFIG_IXGBE_DCA
4154 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 3990 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4155 ixgbe_update_dca(q_vector); 3991 ixgbe_update_dca(q_vector);
4156#endif 3992#endif
4157 3993
4158 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); 3994 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
4159 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); 3995 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
4160 3996
4161 if (!tx_clean_complete) 3997 /* attempt to distribute budget to each queue fairly, but don't allow
4162 work_done = budget; 3998 * the budget to go below 1 because we'll exit polling */
3999 if (q_vector->rx.count > 1)
4000 per_ring_budget = max(budget/q_vector->rx.count, 1);
4001 else
4002 per_ring_budget = budget;
4163 4003
4164 /* If budget not fully consumed, exit the polling mode */ 4004 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
4165 if (work_done < budget) { 4005 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
4166 napi_complete(napi); 4006 per_ring_budget);
4167 if (adapter->rx_itr_setting & 1) 4007
4168 ixgbe_set_itr(q_vector); 4008 /* If all work not completed, return budget and keep polling */
4169 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 4009 if (!clean_complete)
4170 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); 4010 return budget;
4171 } 4011
4172 return work_done; 4012 /* all work done, exit the polling mode */
4013 napi_complete(napi);
4014 if (adapter->rx_itr_setting & 1)
4015 ixgbe_set_itr(q_vector);
4016 if (!test_bit(__IXGBE_DOWN, &adapter->state))
4017 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
4018
4019 return 0;
4173} 4020}
4174 4021
4175/** 4022/**
@@ -4810,19 +4657,15 @@ out:
4810 **/ 4657 **/
4811static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 4658static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4812{ 4659{
4813 int q_idx, num_q_vectors; 4660 int v_idx, num_q_vectors;
4814 struct ixgbe_q_vector *q_vector; 4661 struct ixgbe_q_vector *q_vector;
4815 int (*poll)(struct napi_struct *, int);
4816 4662
4817 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4663 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4818 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4664 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4819 poll = &ixgbe_clean_rxtx_many; 4665 else
4820 } else {
4821 num_q_vectors = 1; 4666 num_q_vectors = 1;
4822 poll = &ixgbe_poll;
4823 }
4824 4667
4825 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 4668 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
4826 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), 4669 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4827 GFP_KERNEL, adapter->node); 4670 GFP_KERNEL, adapter->node);
4828 if (!q_vector) 4671 if (!q_vector)
@@ -4830,25 +4673,29 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4830 GFP_KERNEL); 4673 GFP_KERNEL);
4831 if (!q_vector) 4674 if (!q_vector)
4832 goto err_out; 4675 goto err_out;
4676
4833 q_vector->adapter = adapter; 4677 q_vector->adapter = adapter;
4678 q_vector->v_idx = v_idx;
4679
4834 if (q_vector->tx.count && !q_vector->rx.count) 4680 if (q_vector->tx.count && !q_vector->rx.count)
4835 q_vector->eitr = adapter->tx_eitr_param; 4681 q_vector->eitr = adapter->tx_eitr_param;
4836 else 4682 else
4837 q_vector->eitr = adapter->rx_eitr_param; 4683 q_vector->eitr = adapter->rx_eitr_param;
4838 q_vector->v_idx = q_idx; 4684
4839 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); 4685 netif_napi_add(adapter->netdev, &q_vector->napi,
4840 adapter->q_vector[q_idx] = q_vector; 4686 ixgbe_poll, 64);
4687 adapter->q_vector[v_idx] = q_vector;
4841 } 4688 }
4842 4689
4843 return 0; 4690 return 0;
4844 4691
4845err_out: 4692err_out:
4846 while (q_idx) { 4693 while (v_idx) {
4847 q_idx--; 4694 v_idx--;
4848 q_vector = adapter->q_vector[q_idx]; 4695 q_vector = adapter->q_vector[v_idx];
4849 netif_napi_del(&q_vector->napi); 4696 netif_napi_del(&q_vector->napi);
4850 kfree(q_vector); 4697 kfree(q_vector);
4851 adapter->q_vector[q_idx] = NULL; 4698 adapter->q_vector[v_idx] = NULL;
4852 } 4699 }
4853 return -ENOMEM; 4700 return -ENOMEM;
4854} 4701}
@@ -6960,7 +6807,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
6960 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 6807 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6961 for (i = 0; i < num_q_vectors; i++) { 6808 for (i = 0; i < num_q_vectors; i++) {
6962 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 6809 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6963 ixgbe_msix_clean_many(0, q_vector); 6810 ixgbe_msix_clean_rings(0, q_vector);
6964 } 6811 }
6965 } else { 6812 } else {
6966 ixgbe_intr(adapter->pdev->irq, netdev); 6813 ixgbe_intr(adapter->pdev->irq, netdev);