diff options
-rw-r--r-- | drivers/net/mv643xx_eth.c | 39 |
1 files changed, 10 insertions, 29 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 7410eca87823..099e0be0d8c3 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -336,7 +336,8 @@ struct tx_queue { | |||
336 | struct tx_desc *tx_desc_area; | 336 | struct tx_desc *tx_desc_area; |
337 | dma_addr_t tx_desc_dma; | 337 | dma_addr_t tx_desc_dma; |
338 | int tx_desc_area_size; | 338 | int tx_desc_area_size; |
339 | struct sk_buff **tx_skb; | 339 | |
340 | struct sk_buff_head tx_skb; | ||
340 | 341 | ||
341 | unsigned long tx_packets; | 342 | unsigned long tx_packets; |
342 | unsigned long tx_bytes; | 343 | unsigned long tx_bytes; |
@@ -676,10 +677,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
676 | desc->cmd_sts = BUFFER_OWNED_BY_DMA | | 677 | desc->cmd_sts = BUFFER_OWNED_BY_DMA | |
677 | ZERO_PADDING | TX_LAST_DESC | | 678 | ZERO_PADDING | TX_LAST_DESC | |
678 | TX_ENABLE_INTERRUPT; | 679 | TX_ENABLE_INTERRUPT; |
679 | txq->tx_skb[tx_index] = skb; | ||
680 | } else { | 680 | } else { |
681 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; | 681 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; |
682 | txq->tx_skb[tx_index] = NULL; | ||
683 | } | 682 | } |
684 | 683 | ||
685 | desc->l4i_chk = 0; | 684 | desc->l4i_chk = 0; |
@@ -712,13 +711,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
712 | 711 | ||
713 | if (nr_frags) { | 712 | if (nr_frags) { |
714 | txq_submit_frag_skb(txq, skb); | 713 | txq_submit_frag_skb(txq, skb); |
715 | |||
716 | length = skb_headlen(skb); | 714 | length = skb_headlen(skb); |
717 | txq->tx_skb[tx_index] = NULL; | ||
718 | } else { | 715 | } else { |
719 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; | 716 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; |
720 | length = skb->len; | 717 | length = skb->len; |
721 | txq->tx_skb[tx_index] = skb; | ||
722 | } | 718 | } |
723 | 719 | ||
724 | desc->byte_cnt = length; | 720 | desc->byte_cnt = length; |
@@ -772,6 +768,8 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
772 | desc->l4i_chk = 0; | 768 | desc->l4i_chk = 0; |
773 | } | 769 | } |
774 | 770 | ||
771 | __skb_queue_tail(&txq->tx_skb, skb); | ||
772 | |||
775 | /* ensure all other descriptors are written before first cmd_sts */ | 773 | /* ensure all other descriptors are written before first cmd_sts */ |
776 | wmb(); | 774 | wmb(); |
777 | desc->cmd_sts = cmd_sts; | 775 | desc->cmd_sts = cmd_sts; |
@@ -884,8 +882,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
884 | reclaimed++; | 882 | reclaimed++; |
885 | txq->tx_desc_count--; | 883 | txq->tx_desc_count--; |
886 | 884 | ||
887 | skb = txq->tx_skb[tx_index]; | 885 | skb = NULL; |
888 | txq->tx_skb[tx_index] = NULL; | 886 | if (cmd_sts & TX_LAST_DESC) |
887 | skb = __skb_dequeue(&txq->tx_skb); | ||
889 | 888 | ||
890 | if (cmd_sts & ERROR_SUMMARY) { | 889 | if (cmd_sts & ERROR_SUMMARY) { |
891 | dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); | 890 | dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); |
@@ -1692,18 +1691,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1692 | if (txq->tx_desc_area == NULL) { | 1691 | if (txq->tx_desc_area == NULL) { |
1693 | dev_printk(KERN_ERR, &mp->dev->dev, | 1692 | dev_printk(KERN_ERR, &mp->dev->dev, |
1694 | "can't allocate tx ring (%d bytes)\n", size); | 1693 | "can't allocate tx ring (%d bytes)\n", size); |
1695 | goto out; | 1694 | return -ENOMEM; |
1696 | } | 1695 | } |
1697 | memset(txq->tx_desc_area, 0, size); | 1696 | memset(txq->tx_desc_area, 0, size); |
1698 | 1697 | ||
1699 | txq->tx_desc_area_size = size; | 1698 | txq->tx_desc_area_size = size; |
1700 | txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb), | ||
1701 | GFP_KERNEL); | ||
1702 | if (txq->tx_skb == NULL) { | ||
1703 | dev_printk(KERN_ERR, &mp->dev->dev, | ||
1704 | "can't allocate tx skb ring\n"); | ||
1705 | goto out_free; | ||
1706 | } | ||
1707 | 1699 | ||
1708 | tx_desc = (struct tx_desc *)txq->tx_desc_area; | 1700 | tx_desc = (struct tx_desc *)txq->tx_desc_area; |
1709 | for (i = 0; i < txq->tx_ring_size; i++) { | 1701 | for (i = 0; i < txq->tx_ring_size; i++) { |
@@ -1719,18 +1711,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1719 | nexti * sizeof(struct tx_desc); | 1711 | nexti * sizeof(struct tx_desc); |
1720 | } | 1712 | } |
1721 | 1713 | ||
1722 | return 0; | 1714 | skb_queue_head_init(&txq->tx_skb); |
1723 | 1715 | ||
1724 | out_free: | 1716 | return 0; |
1725 | if (index == 0 && size <= mp->tx_desc_sram_size) | ||
1726 | iounmap(txq->tx_desc_area); | ||
1727 | else | ||
1728 | dma_free_coherent(NULL, size, | ||
1729 | txq->tx_desc_area, | ||
1730 | txq->tx_desc_dma); | ||
1731 | |||
1732 | out: | ||
1733 | return -ENOMEM; | ||
1734 | } | 1717 | } |
1735 | 1718 | ||
1736 | static void txq_deinit(struct tx_queue *txq) | 1719 | static void txq_deinit(struct tx_queue *txq) |
@@ -1748,8 +1731,6 @@ static void txq_deinit(struct tx_queue *txq) | |||
1748 | else | 1731 | else |
1749 | dma_free_coherent(NULL, txq->tx_desc_area_size, | 1732 | dma_free_coherent(NULL, txq->tx_desc_area_size, |
1750 | txq->tx_desc_area, txq->tx_desc_dma); | 1733 | txq->tx_desc_area, txq->tx_desc_dma); |
1751 | |||
1752 | kfree(txq->tx_skb); | ||
1753 | } | 1734 | } |
1754 | 1735 | ||
1755 | 1736 | ||