aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Carpenter <dan.carpenter@oracle.com>2015-02-05 03:00:42 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-06 15:50:12 -0500
commit37c85c3498c5538db050ff287e346127dbc16f7c (patch)
tree8c1bbc4e4009c703b1cf7a1a8d6fa0cec59df007
parente8a308affcd79d95dad111f7872e43e9f73abb3b (diff)
net: sxgbe: fix error handling in init_rx_ring()
There are a couple bugs with the error handling in this function. 1) If we can't allocate "rx_ring->rx_skbuff" then we should call dma_free_coherent() but we don't. 2) free_rx_ring() frees "rx_ring->rx_skbuff_dma" and "rx_ring->rx_skbuff" so calling it in a loop causes a double free. Also it was a bit confusing how we sometimes freed things before doing the goto. I've cleaned it up so it does error handling in normal kernel style. Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index b1a271853d85..d860dca01475 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -365,6 +365,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
365 365
366 return 0; 366 return 0;
367} 367}
368
369/**
370 * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
371 * @dev: net device structure
372 * @rx_ring: ring to be freed
373 * @rx_rsize: ring size
374 * Description: this function initializes the DMA RX descriptor
375 */
376static void sxgbe_free_rx_buffers(struct net_device *dev,
377 struct sxgbe_rx_norm_desc *p, int i,
378 unsigned int dma_buf_sz,
379 struct sxgbe_rx_queue *rx_ring)
380{
381 struct sxgbe_priv_data *priv = netdev_priv(dev);
382
383 kfree_skb(rx_ring->rx_skbuff[i]);
384 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
385 dma_buf_sz, DMA_FROM_DEVICE);
386}
387
368/** 388/**
369 * init_tx_ring - init the TX descriptor ring 389 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure 390 * @dev: net device structure
@@ -457,7 +477,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
457 /* RX ring is not allcoated */ 477 /* RX ring is not allcoated */
458 if (rx_ring == NULL) { 478 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n"); 479 netdev_err(dev, "No memory for RX queue\n");
460 goto error; 480 return -ENOMEM;
461 } 481 }
462 482
463 /* assign queue number */ 483 /* assign queue number */
@@ -469,23 +489,21 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
469 &rx_ring->dma_rx_phy, GFP_KERNEL); 489 &rx_ring->dma_rx_phy, GFP_KERNEL);
470 490
471 if (rx_ring->dma_rx == NULL) 491 if (rx_ring->dma_rx == NULL)
472 goto error; 492 return -ENOMEM;
473 493
474 /* allocate memory for RX skbuff array */ 494 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, 495 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL); 496 sizeof(dma_addr_t), GFP_KERNEL);
477 if (!rx_ring->rx_skbuff_dma) { 497 if (!rx_ring->rx_skbuff_dma) {
478 dma_free_coherent(priv->device, 498 ret = -ENOMEM;
479 rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 499 goto err_free_dma_rx;
480 rx_ring->dma_rx, rx_ring->dma_rx_phy);
481 goto error;
482 } 500 }
483 501
484 rx_ring->rx_skbuff = kmalloc_array(rx_rsize, 502 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
485 sizeof(struct sk_buff *), GFP_KERNEL); 503 sizeof(struct sk_buff *), GFP_KERNEL);
486 if (!rx_ring->rx_skbuff) { 504 if (!rx_ring->rx_skbuff) {
487 kfree(rx_ring->rx_skbuff_dma); 505 ret = -ENOMEM;
488 goto error; 506 goto err_free_skbuff_dma;
489 } 507 }
490 508
491 /* initialise the buffers */ 509 /* initialise the buffers */
@@ -495,7 +513,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
495 ret = sxgbe_init_rx_buffers(dev, p, desc_index, 513 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
496 bfsize, rx_ring); 514 bfsize, rx_ring);
497 if (ret) 515 if (ret)
498 goto err_init_rx_buffers; 516 goto err_free_rx_buffers;
499 } 517 }
500 518
501 /* initalise counters */ 519 /* initalise counters */
@@ -505,11 +523,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
505 523
506 return 0; 524 return 0;
507 525
508err_init_rx_buffers: 526err_free_rx_buffers:
509 while (--desc_index >= 0) 527 while (--desc_index >= 0) {
510 free_rx_ring(priv->device, rx_ring, desc_index); 528 struct sxgbe_rx_norm_desc *p;
511error: 529
512 return -ENOMEM; 530 p = rx_ring->dma_rx + desc_index;
531 sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
532 }
533 kfree(rx_ring->rx_skbuff);
534err_free_skbuff_dma:
535 kfree(rx_ring->rx_skbuff_dma);
536err_free_dma_rx:
537 dma_free_coherent(priv->device,
538 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
539 rx_ring->dma_rx, rx_ring->dma_rx_phy);
540
541 return ret;
513} 542}
514/** 543/**
515 * free_tx_ring - free the TX descriptor ring 544 * free_tx_ring - free the TX descriptor ring