aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Rose <gregory.v.rose@intel.com>2010-09-23 08:46:25 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-25 01:43:31 -0400
commitbba50b99b2410e863b38afdcd0280eb37f8a8bcc (patch)
tree82189c764ac0dacb10d6085b31100c0ae544881b
parent543876c92837a8b208b5c99ec225c1f5a581900e (diff)
ixgbevf: Refactor ring parameter re-size
The function to resize the Tx/Rx rings had the potential to dereference a NULL pointer and the code would attempt to resize the Tx ring even if the Rx ring allocation had failed. This would cause some confusion in the return code semantics. Fixed up to just unwind the allocations if any of them fail and return an error. Signed-off-by: Greg Rose <gregory.v.rose@intel.com> Tested-by: Emil Tantilov <emil.s.tantilov@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ixgbevf/ethtool.c153
1 files changed, 79 insertions, 74 deletions
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4680b069b84f..4cc817acfb62 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -330,10 +330,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
330{ 330{
331 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 331 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
332 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 332 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
333 int i, err; 333 int i, err = 0;
334 u32 new_rx_count, new_tx_count; 334 u32 new_rx_count, new_tx_count;
335 bool need_tx_update = false;
336 bool need_rx_update = false;
337 335
338 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 336 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
339 return -EINVAL; 337 return -EINVAL;
@@ -355,89 +353,96 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
355 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 353 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
356 msleep(1); 354 msleep(1);
357 355
358 if (new_tx_count != adapter->tx_ring_count) { 356 /*
359 tx_ring = kcalloc(adapter->num_tx_queues, 357 * If the adapter isn't up and running then just set the
360 sizeof(struct ixgbevf_ring), GFP_KERNEL); 358 * new parameters and scurry for the exits.
361 if (!tx_ring) { 359 */
362 err = -ENOMEM; 360 if (!netif_running(adapter->netdev)) {
363 goto err_setup; 361 for (i = 0; i < adapter->num_tx_queues; i++)
364 } 362 adapter->tx_ring[i].count = new_tx_count;
365 memcpy(tx_ring, adapter->tx_ring, 363 for (i = 0; i < adapter->num_rx_queues; i++)
366 adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); 364 adapter->rx_ring[i].count = new_rx_count;
367 for (i = 0; i < adapter->num_tx_queues; i++) { 365 adapter->tx_ring_count = new_tx_count;
368 tx_ring[i].count = new_tx_count; 366 adapter->rx_ring_count = new_rx_count;
369 err = ixgbevf_setup_tx_resources(adapter, 367 goto clear_reset;
370 &tx_ring[i]);
371 if (err) {
372 while (i) {
373 i--;
374 ixgbevf_free_tx_resources(adapter,
375 &tx_ring[i]);
376 }
377 kfree(tx_ring);
378 goto err_setup;
379 }
380 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
381 }
382 need_tx_update = true;
383 } 368 }
384 369
385 if (new_rx_count != adapter->rx_ring_count) { 370 tx_ring = kcalloc(adapter->num_tx_queues,
386 rx_ring = kcalloc(adapter->num_rx_queues, 371 sizeof(struct ixgbevf_ring), GFP_KERNEL);
387 sizeof(struct ixgbevf_ring), GFP_KERNEL); 372 if (!tx_ring) {
388 if ((!rx_ring) && (need_tx_update)) { 373 err = -ENOMEM;
389 err = -ENOMEM; 374 goto clear_reset;
390 goto err_rx_setup;
391 }
392 memcpy(rx_ring, adapter->rx_ring,
393 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
394 for (i = 0; i < adapter->num_rx_queues; i++) {
395 rx_ring[i].count = new_rx_count;
396 err = ixgbevf_setup_rx_resources(adapter,
397 &rx_ring[i]);
398 if (err) {
399 while (i) {
400 i--;
401 ixgbevf_free_rx_resources(adapter,
402 &rx_ring[i]);
403 }
404 kfree(rx_ring);
405 goto err_rx_setup;
406 }
407 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
408 }
409 need_rx_update = true;
410 } 375 }
411 376
412err_rx_setup: 377 rx_ring = kcalloc(adapter->num_rx_queues,
413 /* if rings need to be updated, here's the place to do it in one shot */ 378 sizeof(struct ixgbevf_ring), GFP_KERNEL);
414 if (need_tx_update || need_rx_update) { 379 if (!rx_ring) {
415 if (netif_running(netdev)) 380 err = -ENOMEM;
416 ixgbevf_down(adapter); 381 goto err_rx_setup;
417 } 382 }
418 383
419 /* tx */ 384 ixgbevf_down(adapter);
420 if (need_tx_update) { 385
421 kfree(adapter->tx_ring); 386 memcpy(tx_ring, adapter->tx_ring,
422 adapter->tx_ring = tx_ring; 387 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
423 tx_ring = NULL; 388 for (i = 0; i < adapter->num_tx_queues; i++) {
424 adapter->tx_ring_count = new_tx_count; 389 tx_ring[i].count = new_tx_count;
390 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
391 if (err) {
392 while (i) {
393 i--;
394 ixgbevf_free_tx_resources(adapter,
395 &tx_ring[i]);
396 }
397 goto err_tx_ring_setup;
398 }
399 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
425 } 400 }
426 401
427 /* rx */ 402 memcpy(rx_ring, adapter->rx_ring,
428 if (need_rx_update) { 403 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
429 kfree(adapter->rx_ring); 404 for (i = 0; i < adapter->num_rx_queues; i++) {
430 adapter->rx_ring = rx_ring; 405 rx_ring[i].count = new_rx_count;
431 rx_ring = NULL; 406 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
432 adapter->rx_ring_count = new_rx_count; 407 if (err) {
408 while (i) {
409 i--;
410 ixgbevf_free_rx_resources(adapter,
411 &rx_ring[i]);
412 }
413 goto err_rx_ring_setup;
414 }
415 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
433 } 416 }
434 417
418 /*
419 * Only switch to new rings if all the prior allocations
420 * and ring setups have succeeded.
421 */
422 kfree(adapter->tx_ring);
423 adapter->tx_ring = tx_ring;
424 adapter->tx_ring_count = new_tx_count;
425
426 kfree(adapter->rx_ring);
427 adapter->rx_ring = rx_ring;
428 adapter->rx_ring_count = new_rx_count;
429
435 /* success! */ 430 /* success! */
436 err = 0; 431 ixgbevf_up(adapter);
437 if (netif_running(netdev)) 432
438 ixgbevf_up(adapter); 433 goto clear_reset;
434
435err_rx_ring_setup:
436 for(i = 0; i < adapter->num_tx_queues; i++)
437 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
438
439err_tx_ring_setup:
440 kfree(rx_ring);
441
442err_rx_setup:
443 kfree(tx_ring);
439 444
440err_setup: 445clear_reset:
441 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 446 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
442 return err; 447 return err;
443} 448}