aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2018-01-30 19:51:43 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-02-26 12:32:46 -0500
commit21c046e448616529a181a35445d9f6d60352e01f (patch)
tree4b1db94d18efa2f10865c2e447c6c0e99dc02c58
parent5cc0f1c0dc56404a46e8bccd6c96e63cde257268 (diff)
ixgbevf: allocate the rings as part of q_vector
Make it so that all rings allocations are made as part of q_vector. The advantage to this is that we can keep all of the memory related to a single interrupt in one page. The goal is to bring the logic of handling rings closer to ixgbe. Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Krishneil Singh <krishneil.k.singh@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c392
2 files changed, 182 insertions, 217 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a5e9127a1156..f65ca156af2d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -97,6 +97,7 @@ enum ixgbevf_ring_state_t {
97 97
98struct ixgbevf_ring { 98struct ixgbevf_ring {
99 struct ixgbevf_ring *next; 99 struct ixgbevf_ring *next;
100 struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
100 struct net_device *netdev; 101 struct net_device *netdev;
101 struct device *dev; 102 struct device *dev;
102 void *desc; /* descriptor ring memory */ 103 void *desc; /* descriptor ring memory */
@@ -128,7 +129,7 @@ struct ixgbevf_ring {
128 */ 129 */
129 u16 reg_idx; 130 u16 reg_idx;
130 int queue_index; /* needed for multiqueue queue management */ 131 int queue_index; /* needed for multiqueue queue management */
131}; 132} ____cacheline_internodealigned_in_smp;
132 133
133/* How many Rx Buffers do we bundle into one write to the hardware ? */ 134/* How many Rx Buffers do we bundle into one write to the hardware ? */
134#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 135#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -241,7 +242,11 @@ struct ixgbevf_q_vector {
241 u16 itr; /* Interrupt throttle rate written to EITR */ 242 u16 itr; /* Interrupt throttle rate written to EITR */
242 struct napi_struct napi; 243 struct napi_struct napi;
243 struct ixgbevf_ring_container rx, tx; 244 struct ixgbevf_ring_container rx, tx;
245 struct rcu_head rcu; /* to avoid race with update stats on free */
244 char name[IFNAMSIZ + 9]; 246 char name[IFNAMSIZ + 9];
247
248 /* for dynamic allocation of rings associated with this q_vector */
249 struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
245#ifdef CONFIG_NET_RX_BUSY_POLL 250#ifdef CONFIG_NET_RX_BUSY_POLL
246 unsigned int state; 251 unsigned int state;
247#define IXGBEVF_QV_STATE_IDLE 0 252#define IXGBEVF_QV_STATE_IDLE 0
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a10b1bdc99e3..6219ab2e3f52 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1270,85 +1270,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1270 return IRQ_HANDLED; 1270 return IRQ_HANDLED;
1271} 1271}
1272 1272
1273static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1274 int r_idx)
1275{
1276 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1277
1278 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1279 q_vector->rx.ring = a->rx_ring[r_idx];
1280 q_vector->rx.count++;
1281}
1282
1283static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1284 int t_idx)
1285{
1286 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1287
1288 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1289 q_vector->tx.ring = a->tx_ring[t_idx];
1290 q_vector->tx.count++;
1291}
1292
1293/**
1294 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1295 * @adapter: board private structure to initialize
1296 *
1297 * This function maps descriptor rings to the queue-specific vectors
1298 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1299 * one vector per ring/queue, but on a constrained vector budget, we
1300 * group the rings as "efficiently" as possible. You would add new
1301 * mapping configurations in here.
1302 **/
1303static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1304{
1305 int q_vectors;
1306 int v_start = 0;
1307 int rxr_idx = 0, txr_idx = 0;
1308 int rxr_remaining = adapter->num_rx_queues;
1309 int txr_remaining = adapter->num_tx_queues;
1310 int i, j;
1311 int rqpv, tqpv;
1312
1313 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1314
1315 /* The ideal configuration...
1316 * We have enough vectors to map one per queue.
1317 */
1318 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1319 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1320 map_vector_to_rxq(adapter, v_start, rxr_idx);
1321
1322 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1323 map_vector_to_txq(adapter, v_start, txr_idx);
1324 return 0;
1325 }
1326
1327 /* If we don't have enough vectors for a 1-to-1
1328 * mapping, we'll have to group them so there are
1329 * multiple queues per vector.
1330 */
1331 /* Re-adjusting *qpv takes care of the remainder. */
1332 for (i = v_start; i < q_vectors; i++) {
1333 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1334 for (j = 0; j < rqpv; j++) {
1335 map_vector_to_rxq(adapter, i, rxr_idx);
1336 rxr_idx++;
1337 rxr_remaining--;
1338 }
1339 }
1340 for (i = v_start; i < q_vectors; i++) {
1341 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1342 for (j = 0; j < tqpv; j++) {
1343 map_vector_to_txq(adapter, i, txr_idx);
1344 txr_idx++;
1345 txr_remaining--;
1346 }
1347 }
1348
1349 return 0;
1350}
1351
1352/** 1273/**
1353 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 1274 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1354 * @adapter: board private structure 1275 * @adapter: board private structure
@@ -1421,20 +1342,6 @@ free_queue_irqs:
1421 return err; 1342 return err;
1422} 1343}
1423 1344
1424static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1425{
1426 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1427
1428 for (i = 0; i < q_vectors; i++) {
1429 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1430
1431 q_vector->rx.ring = NULL;
1432 q_vector->tx.ring = NULL;
1433 q_vector->rx.count = 0;
1434 q_vector->tx.count = 0;
1435 }
1436}
1437
1438/** 1345/**
1439 * ixgbevf_request_irq - initialize interrupts 1346 * ixgbevf_request_irq - initialize interrupts
1440 * @adapter: board private structure 1347 * @adapter: board private structure
@@ -1474,8 +1381,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1474 free_irq(adapter->msix_entries[i].vector, 1381 free_irq(adapter->msix_entries[i].vector,
1475 adapter->q_vector[i]); 1382 adapter->q_vector[i]);
1476 } 1383 }
1477
1478 ixgbevf_reset_q_vectors(adapter);
1479} 1384}
1480 1385
1481/** 1386/**
@@ -2457,105 +2362,171 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2457} 2362}
2458 2363
2459/** 2364/**
2460 * ixgbevf_alloc_queues - Allocate memory for all rings 2365 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2366 * @adapter: board private structure to initialize
2367 *
2368 * Attempt to configure the interrupts using the best available
2369 * capabilities of the hardware and the kernel.
2370 **/
2371static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2372{
2373 int vector, v_budget;
2374
2375 /* It's easy to be greedy for MSI-X vectors, but it really
2376 * doesn't do us much good if we have a lot more vectors
2377 * than CPU's. So let's be conservative and only ask for
2378 * (roughly) the same number of vectors as there are CPU's.
2379 * The default is to use pairs of vectors.
2380 */
2381 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2382 v_budget = min_t(int, v_budget, num_online_cpus());
2383 v_budget += NON_Q_VECTORS;
2384
2385 adapter->msix_entries = kcalloc(v_budget,
2386 sizeof(struct msix_entry), GFP_KERNEL);
2387 if (!adapter->msix_entries)
2388 return -ENOMEM;
2389
2390 for (vector = 0; vector < v_budget; vector++)
2391 adapter->msix_entries[vector].entry = vector;
2392
2393 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
2394 * does not support any other modes, so we will simply fail here. Note
2395 * that we clean up the msix_entries pointer else-where.
2396 */
2397 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2398}
2399
2400static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2401 struct ixgbevf_ring_container *head)
2402{
2403 ring->next = head->ring;
2404 head->ring = ring;
2405 head->count++;
2406}
2407
2408/**
2409 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2461 * @adapter: board private structure to initialize 2410 * @adapter: board private structure to initialize
2411 * @v_idx: index of vector in adapter struct
2412 * @txr_count: number of Tx rings for q vector
2413 * @txr_idx: index of first Tx ring to assign
2414 * @rxr_count: number of Rx rings for q vector
2415 * @rxr_idx: index of first Rx ring to assign
2462 * 2416 *
2463 * We allocate one ring per queue at run-time since we don't know the 2417 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2464 * number of queues at compile-time. The polling_netdev array is
2465 * intended for Multiqueue, but should work fine with a single queue.
2466 **/ 2418 **/
2467static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 2419static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2420 int txr_count, int txr_idx,
2421 int rxr_count, int rxr_idx)
2468{ 2422{
2423 struct ixgbevf_q_vector *q_vector;
2469 struct ixgbevf_ring *ring; 2424 struct ixgbevf_ring *ring;
2470 int rx = 0, tx = 0; 2425 int ring_count, size;
2426
2427 ring_count = txr_count + rxr_count;
2428 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2429
2430 /* allocate q_vector and rings */
2431 q_vector = kzalloc(size, GFP_KERNEL);
2432 if (!q_vector)
2433 return -ENOMEM;
2434
2435 /* initialize NAPI */
2436 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2437
2438 /* tie q_vector and adapter together */
2439 adapter->q_vector[v_idx] = q_vector;
2440 q_vector->adapter = adapter;
2441 q_vector->v_idx = v_idx;
2471 2442
2472 for (; tx < adapter->num_tx_queues; tx++) { 2443 /* initialize pointer to rings */
2473 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2444 ring = q_vector->ring;
2474 if (!ring)
2475 goto err_allocation;
2476 2445
2446 while (txr_count) {
2447 /* assign generic ring traits */
2477 ring->dev = &adapter->pdev->dev; 2448 ring->dev = &adapter->pdev->dev;
2478 ring->netdev = adapter->netdev; 2449 ring->netdev = adapter->netdev;
2450
2451 /* configure backlink on ring */
2452 ring->q_vector = q_vector;
2453
2454 /* update q_vector Tx values */
2455 ixgbevf_add_ring(ring, &q_vector->tx);
2456
2457 /* apply Tx specific ring traits */
2479 ring->count = adapter->tx_ring_count; 2458 ring->count = adapter->tx_ring_count;
2480 ring->queue_index = tx; 2459 ring->queue_index = txr_idx;
2481 ring->reg_idx = tx; 2460 ring->reg_idx = txr_idx;
2482 2461
2483 adapter->tx_ring[tx] = ring; 2462 /* assign ring to adapter */
2484 } 2463 adapter->tx_ring[txr_idx] = ring;
2464
2465 /* update count and index */
2466 txr_count--;
2467 txr_idx++;
2485 2468
2486 for (; rx < adapter->num_rx_queues; rx++) { 2469 /* push pointer to next ring */
2487 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2470 ring++;
2488 if (!ring) 2471 }
2489 goto err_allocation;
2490 2472
2473 while (rxr_count) {
2474 /* assign generic ring traits */
2491 ring->dev = &adapter->pdev->dev; 2475 ring->dev = &adapter->pdev->dev;
2492 ring->netdev = adapter->netdev; 2476 ring->netdev = adapter->netdev;
2493 2477
2478 /* configure backlink on ring */
2479 ring->q_vector = q_vector;
2480
2481 /* update q_vector Rx values */
2482 ixgbevf_add_ring(ring, &q_vector->rx);
2483
2484 /* apply Rx specific ring traits */
2494 ring->count = adapter->rx_ring_count; 2485 ring->count = adapter->rx_ring_count;
2495 ring->queue_index = rx; 2486 ring->queue_index = rxr_idx;
2496 ring->reg_idx = rx; 2487 ring->reg_idx = rxr_idx;
2497 2488
2498 adapter->rx_ring[rx] = ring; 2489 /* assign ring to adapter */
2499 } 2490 adapter->rx_ring[rxr_idx] = ring;
2500 2491
2501 return 0; 2492 /* update count and index */
2493 rxr_count--;
2494 rxr_idx++;
2502 2495
2503err_allocation: 2496 /* push pointer to next ring */
2504 while (tx) { 2497 ring++;
2505 kfree(adapter->tx_ring[--tx]);
2506 adapter->tx_ring[tx] = NULL;
2507 } 2498 }
2508 2499
2509 while (rx) { 2500 return 0;
2510 kfree(adapter->rx_ring[--rx]);
2511 adapter->rx_ring[rx] = NULL;
2512 }
2513 return -ENOMEM;
2514} 2501}
2515 2502
2516/** 2503/**
2517 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 2504 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2518 * @adapter: board private structure to initialize 2505 * @adapter: board private structure to initialize
2506 * @v_idx: index of vector in adapter struct
2519 * 2507 *
2520 * Attempt to configure the interrupts using the best available 2508 * This function frees the memory allocated to the q_vector. In addition if
2521 * capabilities of the hardware and the kernel. 2509 * NAPI is enabled it will delete any references to the NAPI struct prior
2510 * to freeing the q_vector.
2522 **/ 2511 **/
2523static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 2512static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2524{ 2513{
2525 struct net_device *netdev = adapter->netdev; 2514 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2526 int err; 2515 struct ixgbevf_ring *ring;
2527 int vector, v_budget;
2528 2516
2529 /* It's easy to be greedy for MSI-X vectors, but it really 2517 ixgbevf_for_each_ring(ring, q_vector->tx)
2530 * doesn't do us much good if we have a lot more vectors 2518 adapter->tx_ring[ring->queue_index] = NULL;
2531 * than CPU's. So let's be conservative and only ask for
2532 * (roughly) the same number of vectors as there are CPU's.
2533 * The default is to use pairs of vectors.
2534 */
2535 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2536 v_budget = min_t(int, v_budget, num_online_cpus());
2537 v_budget += NON_Q_VECTORS;
2538 2519
2539 /* A failure in MSI-X entry allocation isn't fatal, but it does 2520 ixgbevf_for_each_ring(ring, q_vector->rx)
2540 * mean we disable MSI-X capabilities of the adapter. 2521 adapter->rx_ring[ring->queue_index] = NULL;
2541 */
2542 adapter->msix_entries = kcalloc(v_budget,
2543 sizeof(struct msix_entry), GFP_KERNEL);
2544 if (!adapter->msix_entries)
2545 return -ENOMEM;
2546 2522
2547 for (vector = 0; vector < v_budget; vector++) 2523 adapter->q_vector[v_idx] = NULL;
2548 adapter->msix_entries[vector].entry = vector; 2524 netif_napi_del(&q_vector->napi);
2549 2525
2550 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 2526 /* ixgbevf_get_stats() might access the rings on this vector,
2551 if (err) 2527 * we must wait a grace period before freeing it.
2552 return err; 2528 */
2553 2529 kfree_rcu(q_vector, rcu);
2554 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2555 if (err)
2556 return err;
2557
2558 return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2559} 2530}
2560 2531
2561/** 2532/**
@@ -2567,35 +2538,53 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2567 **/ 2538 **/
2568static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2539static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2569{ 2540{
2570 int q_idx, num_q_vectors; 2541 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2571 struct ixgbevf_q_vector *q_vector; 2542 int rxr_remaining = adapter->num_rx_queues;
2543 int txr_remaining = adapter->num_tx_queues;
2544 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
2545 int err;
2546
2547 if (q_vectors >= (rxr_remaining + txr_remaining)) {
2548 for (; rxr_remaining; v_idx++, q_vectors--) {
2549 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2550
2551 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2552 0, 0, rqpv, rxr_idx);
2553 if (err)
2554 goto err_out;
2555
2556 /* update counts and index */
2557 rxr_remaining -= rqpv;
2558 rxr_idx += rqpv;
2559 }
2560 }
2561
2562 for (; q_vectors; v_idx++, q_vectors--) {
2563 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2564 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2572 2565
2573 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2566 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2567 tqpv, txr_idx,
2568 rqpv, rxr_idx);
2574 2569
2575 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2570 if (err)
2576 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2577 if (!q_vector)
2578 goto err_out; 2571 goto err_out;
2579 q_vector->adapter = adapter; 2572
2580 q_vector->v_idx = q_idx; 2573 /* update counts and index */
2581 netif_napi_add(adapter->netdev, &q_vector->napi, 2574 rxr_remaining -= rqpv;
2582 ixgbevf_poll, 64); 2575 rxr_idx += rqpv;
2583 adapter->q_vector[q_idx] = q_vector; 2576 txr_remaining -= tqpv;
2577 txr_idx += tqpv;
2584 } 2578 }
2585 2579
2586 return 0; 2580 return 0;
2587 2581
2588err_out: 2582err_out:
2589 while (q_idx) { 2583 while (v_idx) {
2590 q_idx--; 2584 v_idx--;
2591 q_vector = adapter->q_vector[q_idx]; 2585 ixgbevf_free_q_vector(adapter, v_idx);
2592#ifdef CONFIG_NET_RX_BUSY_POLL
2593 napi_hash_del(&q_vector->napi);
2594#endif
2595 netif_napi_del(&q_vector->napi);
2596 kfree(q_vector);
2597 adapter->q_vector[q_idx] = NULL;
2598 } 2586 }
2587
2599 return -ENOMEM; 2588 return -ENOMEM;
2600} 2589}
2601 2590
@@ -2609,17 +2598,11 @@ err_out:
2609 **/ 2598 **/
2610static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2599static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2611{ 2600{
2612 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2601 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2613
2614 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2615 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2616 2602
2617 adapter->q_vector[q_idx] = NULL; 2603 while (q_vectors) {
2618#ifdef CONFIG_NET_RX_BUSY_POLL 2604 q_vectors--;
2619 napi_hash_del(&q_vector->napi); 2605 ixgbevf_free_q_vector(adapter, q_vectors);
2620#endif
2621 netif_napi_del(&q_vector->napi);
2622 kfree(q_vector);
2623 } 2606 }
2624} 2607}
2625 2608
@@ -2663,12 +2646,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2663 goto err_alloc_q_vectors; 2646 goto err_alloc_q_vectors;
2664 } 2647 }
2665 2648
2666 err = ixgbevf_alloc_queues(adapter);
2667 if (err) {
2668 pr_err("Unable to allocate memory for queues\n");
2669 goto err_alloc_queues;
2670 }
2671
2672 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", 2649 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2673 (adapter->num_rx_queues > 1) ? "Enabled" : 2650 (adapter->num_rx_queues > 1) ? "Enabled" :
2674 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2651 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
@@ -2676,8 +2653,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2676 set_bit(__IXGBEVF_DOWN, &adapter->state); 2653 set_bit(__IXGBEVF_DOWN, &adapter->state);
2677 2654
2678 return 0; 2655 return 0;
2679err_alloc_queues:
2680 ixgbevf_free_q_vectors(adapter);
2681err_alloc_q_vectors: 2656err_alloc_q_vectors:
2682 ixgbevf_reset_interrupt_capability(adapter); 2657 ixgbevf_reset_interrupt_capability(adapter);
2683err_set_interrupt: 2658err_set_interrupt:
@@ -2693,17 +2668,6 @@ err_set_interrupt:
2693 **/ 2668 **/
2694static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2669static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2695{ 2670{
2696 int i;
2697
2698 for (i = 0; i < adapter->num_tx_queues; i++) {
2699 kfree(adapter->tx_ring[i]);
2700 adapter->tx_ring[i] = NULL;
2701 }
2702 for (i = 0; i < adapter->num_rx_queues; i++) {
2703 kfree(adapter->rx_ring[i]);
2704 adapter->rx_ring[i] = NULL;
2705 }
2706
2707 adapter->num_tx_queues = 0; 2671 adapter->num_tx_queues = 0;
2708 adapter->num_rx_queues = 0; 2672 adapter->num_rx_queues = 0;
2709 2673
@@ -3307,12 +3271,6 @@ int ixgbevf_open(struct net_device *netdev)
3307 3271
3308 ixgbevf_configure(adapter); 3272 ixgbevf_configure(adapter);
3309 3273
3310 /* Map the Tx/Rx rings to the vectors we were allotted.
3311 * if request_irq will be called in this function map_rings
3312 * must be called *before* up_complete
3313 */
3314 ixgbevf_map_rings_to_vectors(adapter);
3315
3316 err = ixgbevf_request_irq(adapter); 3274 err = ixgbevf_request_irq(adapter);
3317 if (err) 3275 if (err)
3318 goto err_req_irq; 3276 goto err_req_irq;
@@ -4042,6 +4000,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
4042 4000
4043 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 4001 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4044 4002
4003 rcu_read_lock();
4045 for (i = 0; i < adapter->num_rx_queues; i++) { 4004 for (i = 0; i < adapter->num_rx_queues; i++) {
4046 ring = adapter->rx_ring[i]; 4005 ring = adapter->rx_ring[i];
4047 do { 4006 do {
@@ -4063,6 +4022,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
4063 stats->tx_bytes += bytes; 4022 stats->tx_bytes += bytes;
4064 stats->tx_packets += packets; 4023 stats->tx_packets += packets;
4065 } 4024 }
4025 rcu_read_unlock();
4066} 4026}
4067 4027
4068#define IXGBEVF_MAX_MAC_HDR_LEN 127 4028#define IXGBEVF_MAX_MAC_HDR_LEN 127