aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:27:09 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:27:09 -0500
commitd0759ebb051972f8557a19aa13cf02fc314856e9 (patch)
tree75b7d8bf65e32d7bd734063f1a7d4f5b224676bd
parent125601bf03a13e24d3785ccbc3a25ad401c92772 (diff)
ixgbe: cleanup ixgbe_map_rings_to_vectors
This change cleans up some of the items in ixgbe_map_rings_to_vectors. Specifically it merges the two for loops and drops the unnecessary vectors parameter. It also moves the vector names into the q_vectors themselves. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c55
2 files changed, 30 insertions, 28 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 6d9fcb4e0854..ce43c9352681 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -282,6 +282,7 @@ struct ixgbe_q_vector {
282 u8 rx_itr; 282 u8 rx_itr;
283 u32 eitr; 283 u32 eitr;
284 cpumask_var_t affinity_mask; 284 cpumask_var_t affinity_mask;
285 char name[IFNAMSIZ + 9];
285}; 286};
286 287
287/* Helper macros to switch between ints/sec and what the register uses. 288/* Helper macros to switch between ints/sec and what the register uses.
@@ -330,7 +331,6 @@ struct ixgbe_adapter {
330 u16 bd_number; 331 u16 bd_number;
331 struct work_struct reset_task; 332 struct work_struct reset_task;
332 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 333 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
333 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
334 struct ixgbe_dcb_config dcb_cfg; 334 struct ixgbe_dcb_config dcb_cfg;
335 struct ixgbe_dcb_config temp_dcb_cfg; 335 struct ixgbe_dcb_config temp_dcb_cfg;
336 u8 dcb_set_bitmap; 336 u8 dcb_set_bitmap;
@@ -453,6 +453,7 @@ struct ixgbe_adapter {
453 int node; 453 int node;
454 struct work_struct check_overtemp_task; 454 struct work_struct check_overtemp_task;
455 u32 interrupt_event; 455 u32 interrupt_event;
456 char lsc_int_name[IFNAMSIZ + 9];
456 457
457 /* SR-IOV */ 458 /* SR-IOV */
458 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 459 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 3ad754824ffa..0d6422c59723 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2130,7 +2130,6 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2130/** 2130/**
2131 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 2131 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2132 * @adapter: board private structure to initialize 2132 * @adapter: board private structure to initialize
2133 * @vectors: allotted vector count for descriptor rings
2134 * 2133 *
2135 * This function maps descriptor rings to the queue-specific vectors 2134 * This function maps descriptor rings to the queue-specific vectors
2136 * we were allotted through the MSI-X enabling code. Ideally, we'd have 2135 * we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2138,9 +2137,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2138 * group the rings as "efficiently" as possible. You would add new 2137 * group the rings as "efficiently" as possible. You would add new
2139 * mapping configurations in here. 2138 * mapping configurations in here.
2140 **/ 2139 **/
2141static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2140static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2142 int vectors)
2143{ 2141{
2142 int q_vectors;
2144 int v_start = 0; 2143 int v_start = 0;
2145 int rxr_idx = 0, txr_idx = 0; 2144 int rxr_idx = 0, txr_idx = 0;
2146 int rxr_remaining = adapter->num_rx_queues; 2145 int rxr_remaining = adapter->num_rx_queues;
@@ -2153,11 +2152,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2153 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2152 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2154 goto out; 2153 goto out;
2155 2154
2155 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2156
2156 /* 2157 /*
2157 * The ideal configuration... 2158 * The ideal configuration...
2158 * We have enough vectors to map one per queue. 2159 * We have enough vectors to map one per queue.
2159 */ 2160 */
2160 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 2161 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2161 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 2162 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2162 map_vector_to_rxq(adapter, v_start, rxr_idx); 2163 map_vector_to_rxq(adapter, v_start, rxr_idx);
2163 2164
@@ -2173,23 +2174,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2173 * multiple queues per vector. 2174 * multiple queues per vector.
2174 */ 2175 */
2175 /* Re-adjusting *qpv takes care of the remainder. */ 2176 /* Re-adjusting *qpv takes care of the remainder. */
2176 for (i = v_start; i < vectors; i++) { 2177 for (i = v_start; i < q_vectors; i++) {
2177 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 2178 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
2178 for (j = 0; j < rqpv; j++) { 2179 for (j = 0; j < rqpv; j++) {
2179 map_vector_to_rxq(adapter, i, rxr_idx); 2180 map_vector_to_rxq(adapter, i, rxr_idx);
2180 rxr_idx++; 2181 rxr_idx++;
2181 rxr_remaining--; 2182 rxr_remaining--;
2182 } 2183 }
2183 } 2184 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
2184 for (i = v_start; i < vectors; i++) {
2185 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2186 for (j = 0; j < tqpv; j++) { 2185 for (j = 0; j < tqpv; j++) {
2187 map_vector_to_txq(adapter, i, txr_idx); 2186 map_vector_to_txq(adapter, i, txr_idx);
2188 txr_idx++; 2187 txr_idx++;
2189 txr_remaining--; 2188 txr_remaining--;
2190 } 2189 }
2191 } 2190 }
2192
2193out: 2191out:
2194 return err; 2192 return err;
2195} 2193}
@@ -2211,32 +2209,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2211 /* Decrement for Other and TCP Timer vectors */ 2209 /* Decrement for Other and TCP Timer vectors */
2212 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2210 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2213 2211
2214 /* Map the Tx/Rx rings to the vectors we were allotted. */ 2212 err = ixgbe_map_rings_to_vectors(adapter);
2215 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2216 if (err) 2213 if (err)
2217 goto out; 2214 return err;
2218 2215
2219#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2216#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2220 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2217 ? &ixgbe_msix_clean_many : \
2221 &ixgbe_msix_clean_many) 2218 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2219 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2220 NULL)
2222 for (vector = 0; vector < q_vectors; vector++) { 2221 for (vector = 0; vector < q_vectors; vector++) {
2223 handler = SET_HANDLER(adapter->q_vector[vector]); 2222 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2223 handler = SET_HANDLER(q_vector);
2224 2224
2225 if (handler == &ixgbe_msix_clean_rx) { 2225 if (handler == &ixgbe_msix_clean_rx) {
2226 sprintf(adapter->name[vector], "%s-%s-%d", 2226 sprintf(q_vector->name, "%s-%s-%d",
2227 netdev->name, "rx", ri++); 2227 netdev->name, "rx", ri++);
2228 } else if (handler == &ixgbe_msix_clean_tx) { 2228 } else if (handler == &ixgbe_msix_clean_tx) {
2229 sprintf(adapter->name[vector], "%s-%s-%d", 2229 sprintf(q_vector->name, "%s-%s-%d",
2230 netdev->name, "tx", ti++); 2230 netdev->name, "tx", ti++);
2231 } else { 2231 } else if (handler == &ixgbe_msix_clean_many) {
2232 sprintf(adapter->name[vector], "%s-%s-%d", 2232 sprintf(q_vector->name, "%s-%s-%d",
2233 netdev->name, "TxRx", ri++); 2233 netdev->name, "TxRx", ri++);
2234 ti++; 2234 ti++;
2235 } else {
2236 /* skip this unused q_vector */
2237 continue;
2235 } 2238 }
2236
2237 err = request_irq(adapter->msix_entries[vector].vector, 2239 err = request_irq(adapter->msix_entries[vector].vector,
2238 handler, 0, adapter->name[vector], 2240 handler, 0, q_vector->name,
2239 adapter->q_vector[vector]); 2241 q_vector);
2240 if (err) { 2242 if (err) {
2241 e_err(probe, "request_irq failed for MSIX interrupt " 2243 e_err(probe, "request_irq failed for MSIX interrupt "
2242 "Error: %d\n", err); 2244 "Error: %d\n", err);
@@ -2244,9 +2246,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2244 } 2246 }
2245 } 2247 }
2246 2248
2247 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2249 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
2248 err = request_irq(adapter->msix_entries[vector].vector, 2250 err = request_irq(adapter->msix_entries[vector].vector,
2249 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2251 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
2250 if (err) { 2252 if (err) {
2251 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2253 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2252 goto free_queue_irqs; 2254 goto free_queue_irqs;
@@ -2262,7 +2264,6 @@ free_queue_irqs:
2262 pci_disable_msix(adapter->pdev); 2264 pci_disable_msix(adapter->pdev);
2263 kfree(adapter->msix_entries); 2265 kfree(adapter->msix_entries);
2264 adapter->msix_entries = NULL; 2266 adapter->msix_entries = NULL;
2265out:
2266 return err; 2267 return err;
2267} 2268}
2268 2269