diff options
author | Roel Kluin <roel.kluin@gmail.com> | 2009-08-18 23:21:40 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-18 23:21:40 -0400 |
commit | e7a5965a81a29a13cd4994fa23a6a7a1488bcdb6 (patch) | |
tree | 57cbcab518552e8325f368c8e5255123fd1c97f4 | |
parent | 08fdef99342955a62884fb5c49ab43431a1cafbf (diff) |
yellowfin: Fix buffer underrun after dev_alloc_skb() failure
yellowfin_init_ring() needs to clean up if dev_alloc_skb() fails and
should pass an error status up to the caller. This also prevents an
buffer underrun if failure occurred in the first iteration.
yellowfin_open() which calls yellowfin_init_ring() should free its
requested irq upon failure.
Signed-off-by: Roel Kluin <roel.kluin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/yellowfin.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index a07580138e81..c2fd6187773f 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |||
346 | static int yellowfin_open(struct net_device *dev); | 346 | static int yellowfin_open(struct net_device *dev); |
347 | static void yellowfin_timer(unsigned long data); | 347 | static void yellowfin_timer(unsigned long data); |
348 | static void yellowfin_tx_timeout(struct net_device *dev); | 348 | static void yellowfin_tx_timeout(struct net_device *dev); |
349 | static void yellowfin_init_ring(struct net_device *dev); | 349 | static int yellowfin_init_ring(struct net_device *dev); |
350 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); | 350 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); |
351 | static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); | 351 | static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); |
352 | static int yellowfin_rx(struct net_device *dev); | 352 | static int yellowfin_rx(struct net_device *dev); |
@@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev) | |||
573 | { | 573 | { |
574 | struct yellowfin_private *yp = netdev_priv(dev); | 574 | struct yellowfin_private *yp = netdev_priv(dev); |
575 | void __iomem *ioaddr = yp->base; | 575 | void __iomem *ioaddr = yp->base; |
576 | int i; | 576 | int i, ret; |
577 | 577 | ||
578 | /* Reset the chip. */ | 578 | /* Reset the chip. */ |
579 | iowrite32(0x80000000, ioaddr + DMACtrl); | 579 | iowrite32(0x80000000, ioaddr + DMACtrl); |
580 | 580 | ||
581 | i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); | 581 | ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); |
582 | if (i) return i; | 582 | if (ret) |
583 | return ret; | ||
583 | 584 | ||
584 | if (yellowfin_debug > 1) | 585 | if (yellowfin_debug > 1) |
585 | printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", | 586 | printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", |
586 | dev->name, dev->irq); | 587 | dev->name, dev->irq); |
587 | 588 | ||
588 | yellowfin_init_ring(dev); | 589 | ret = yellowfin_init_ring(dev); |
590 | if (ret) { | ||
591 | free_irq(dev->irq, dev); | ||
592 | return ret; | ||
593 | } | ||
589 | 594 | ||
590 | iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); | 595 | iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); |
591 | iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); | 596 | iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); |
@@ -725,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev) | |||
725 | } | 730 | } |
726 | 731 | ||
727 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | 732 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
728 | static void yellowfin_init_ring(struct net_device *dev) | 733 | static int yellowfin_init_ring(struct net_device *dev) |
729 | { | 734 | { |
730 | struct yellowfin_private *yp = netdev_priv(dev); | 735 | struct yellowfin_private *yp = netdev_priv(dev); |
731 | int i; | 736 | int i, j; |
732 | 737 | ||
733 | yp->tx_full = 0; | 738 | yp->tx_full = 0; |
734 | yp->cur_rx = yp->cur_tx = 0; | 739 | yp->cur_rx = yp->cur_tx = 0; |
@@ -753,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
753 | yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, | 758 | yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
754 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 759 | skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
755 | } | 760 | } |
761 | if (i != RX_RING_SIZE) { | ||
762 | for (j = 0; j < i; j++) | ||
763 | dev_kfree_skb(yp->rx_skbuff[j]); | ||
764 | return -ENOMEM; | ||
765 | } | ||
756 | yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); | 766 | yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); |
757 | yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | 767 | yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
758 | 768 | ||
@@ -769,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
769 | yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); | 779 | yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); |
770 | #else | 780 | #else |
771 | { | 781 | { |
772 | int j; | ||
773 | |||
774 | /* Tx ring needs a pair of descriptors, the second for the status. */ | 782 | /* Tx ring needs a pair of descriptors, the second for the status. */ |
775 | for (i = 0; i < TX_RING_SIZE; i++) { | 783 | for (i = 0; i < TX_RING_SIZE; i++) { |
776 | j = 2*i; | 784 | j = 2*i; |
@@ -805,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
805 | } | 813 | } |
806 | #endif | 814 | #endif |
807 | yp->tx_tail_desc = &yp->tx_status[0]; | 815 | yp->tx_tail_desc = &yp->tx_status[0]; |
808 | return; | 816 | return 0; |
809 | } | 817 | } |
810 | 818 | ||
811 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) | 819 | static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) |