aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-07-14 23:05:51 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-09-16 00:16:43 -0400
commit4cc6df29d9f4cf90dad8167cbbf5c21810ae56cf (patch)
treef7ed23af5645ec715141d129e6059dc4eb7b3ed9 /drivers
parent263a84e785deb3613bbdd01a071b0bde429c3804 (diff)
ixgbe: commonize ixgbe_map_rings_to_vectors to work for all interrupt types
This patch makes it so that the map_rings_to_vectors call will work with all interrupt types. The advantage to this is that there will now be a predictable mapping for all given interrupt types. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c88
1 files changed, 34 insertions, 54 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0564c659fb94..dd1b57ba190a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2014,59 +2014,41 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2014 * group the rings as "efficiently" as possible. You would add new 2014 * group the rings as "efficiently" as possible. You would add new
2015 * mapping configurations in here. 2015 * mapping configurations in here.
2016 **/ 2016 **/
2017static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) 2017static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2018{ 2018{
2019 int q_vectors; 2019 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2020 int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
2021 int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
2020 int v_start = 0; 2022 int v_start = 0;
2021 int rxr_idx = 0, txr_idx = 0;
2022 int rxr_remaining = adapter->num_rx_queues;
2023 int txr_remaining = adapter->num_tx_queues;
2024 int i, j;
2025 int rqpv, tqpv;
2026 int err = 0;
2027 2023
2028 /* No mapping required if MSI-X is disabled. */ 2024 /* only one q_vector if MSI-X is disabled. */
2029 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2025 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2030 goto out; 2026 q_vectors = 1;
2031
2032 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2033 2027
2034 /* 2028 /*
2035 * The ideal configuration... 2029 * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2036 * We have enough vectors to map one per queue. 2030 * group them so there are multiple queues per vector.
2031 *
2032 * Re-adjusting *qpv takes care of the remainder.
2037 */ 2033 */
2038 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 2034 for (; v_start < q_vectors && rxr_remaining; v_start++) {
2039 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 2035 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
2036 for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
2040 map_vector_to_rxq(adapter, v_start, rxr_idx); 2037 map_vector_to_rxq(adapter, v_start, rxr_idx);
2041
2042 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2043 map_vector_to_txq(adapter, v_start, txr_idx);
2044
2045 goto out;
2046 } 2038 }
2047 2039
2048 /* 2040 /*
2049 * If we don't have enough vectors for a 1-to-1 2041 * If there are not enough q_vectors for each ring to have it's own
2050 * mapping, we'll have to group them so there are 2042 * vector then we must pair up Rx/Tx on a each vector
2051 * multiple queues per vector.
2052 */ 2043 */
2053 /* Re-adjusting *qpv takes care of the remainder. */ 2044 if ((v_start + txr_remaining) > q_vectors)
2054 for (i = v_start; i < q_vectors; i++) { 2045 v_start = 0;
2055 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 2046
2056 for (j = 0; j < rqpv; j++) { 2047 for (; v_start < q_vectors && txr_remaining; v_start++) {
2057 map_vector_to_rxq(adapter, i, rxr_idx); 2048 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
2058 rxr_idx++; 2049 for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
2059 rxr_remaining--; 2050 map_vector_to_txq(adapter, v_start, txr_idx);
2060 }
2061 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
2062 for (j = 0; j < tqpv; j++) {
2063 map_vector_to_txq(adapter, i, txr_idx);
2064 txr_idx++;
2065 txr_remaining--;
2066 }
2067 } 2051 }
2068out:
2069 return err;
2070} 2052}
2071 2053
2072/** 2054/**
@@ -2083,10 +2065,6 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2083 int vector, err; 2065 int vector, err;
2084 int ri = 0, ti = 0; 2066 int ri = 0, ti = 0;
2085 2067
2086 err = ixgbe_map_rings_to_vectors(adapter);
2087 if (err)
2088 return err;
2089
2090 for (vector = 0; vector < q_vectors; vector++) { 2068 for (vector = 0; vector < q_vectors; vector++) {
2091 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2069 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2092 struct msix_entry *entry = &adapter->msix_entries[vector]; 2070 struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -2294,19 +2272,25 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2294 struct net_device *netdev = adapter->netdev; 2272 struct net_device *netdev = adapter->netdev;
2295 int err; 2273 int err;
2296 2274
2297 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2275 /* map all of the rings to the q_vectors */
2276 ixgbe_map_rings_to_vectors(adapter);
2277
2278 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2298 err = ixgbe_request_msix_irqs(adapter); 2279 err = ixgbe_request_msix_irqs(adapter);
2299 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2280 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2300 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2281 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2301 netdev->name, adapter); 2282 netdev->name, adapter);
2302 } else { 2283 else
2303 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2284 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2304 netdev->name, adapter); 2285 netdev->name, adapter);
2305 }
2306 2286
2307 if (err) 2287 if (err) {
2308 e_err(probe, "request_irq failed, Error %d\n", err); 2288 e_err(probe, "request_irq failed, Error %d\n", err);
2309 2289
2290 /* place q_vectors and rings back into a known good state */
2291 ixgbe_reset_q_vectors(adapter);
2292 }
2293
2310 return err; 2294 return err;
2311} 2295}
2312 2296
@@ -2316,11 +2300,10 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2316 int i, q_vectors; 2300 int i, q_vectors;
2317 2301
2318 q_vectors = adapter->num_msix_vectors; 2302 q_vectors = adapter->num_msix_vectors;
2319
2320 i = q_vectors - 1; 2303 i = q_vectors - 1;
2321 free_irq(adapter->msix_entries[i].vector, adapter); 2304 free_irq(adapter->msix_entries[i].vector, adapter);
2322
2323 i--; 2305 i--;
2306
2324 for (; i >= 0; i--) { 2307 for (; i >= 0; i--) {
2325 /* free only the irqs that were actually requested */ 2308 /* free only the irqs that were actually requested */
2326 if (!adapter->q_vector[i]->rx.ring && 2309 if (!adapter->q_vector[i]->rx.ring &&
@@ -2387,9 +2370,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2387 ixgbe_set_ivar(adapter, 0, 0, 0); 2370 ixgbe_set_ivar(adapter, 0, 0, 0);
2388 ixgbe_set_ivar(adapter, 1, 0, 0); 2371 ixgbe_set_ivar(adapter, 1, 0, 0);
2389 2372
2390 map_vector_to_rxq(adapter, 0, 0);
2391 map_vector_to_txq(adapter, 0, 0);
2392
2393 e_info(hw, "Legacy interrupt IVAR setup done\n"); 2373 e_info(hw, "Legacy interrupt IVAR setup done\n");
2394} 2374}
2395 2375