diff options
author | Matt Carlson <mcarlson@broadcom.com> | 2011-05-19 08:12:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-05-19 17:59:59 -0400 |
commit | 432aa7ed75b3adaef6040d2cbe745fdd1c899415 (patch) | |
tree | e2f9d271bdbfbd3b4b9e35cb00e8a72cd366dc38 /drivers/net/tg3.c | |
parent | 2ffcc981d823a0518c627ca22d51ef72d0b7ca9a (diff) |
tg3: Cleanup transmit error path
This patch consolidates the skb cleanup code into a function named
tg3_skb_error_unmap(). The modification addresses a long-standing bug
where pci_unmap_single() was incorrectly being called instead of
pci_unmap_page() in tigon3_dma_hwbug_workaround().
Signed-off-by: Matt Carlson <mcarlson@broadcom.com>
Reviewed-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 98 |
1 files changed, 38 insertions, 60 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 4c441682a291..b2b1ba168c88 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -5758,16 +5758,39 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry, | |||
5758 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | 5758 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; |
5759 | } | 5759 | } |
5760 | 5760 | ||
5761 | static void tg3_skb_error_unmap(struct tg3_napi *tnapi, | ||
5762 | struct sk_buff *skb, int last) | ||
5763 | { | ||
5764 | int i; | ||
5765 | u32 entry = tnapi->tx_prod; | ||
5766 | struct ring_info *txb = &tnapi->tx_buffers[entry]; | ||
5767 | |||
5768 | pci_unmap_single(tnapi->tp->pdev, | ||
5769 | dma_unmap_addr(txb, mapping), | ||
5770 | skb_headlen(skb), | ||
5771 | PCI_DMA_TODEVICE); | ||
5772 | for (i = 0; i <= last; i++) { | ||
5773 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5774 | |||
5775 | entry = NEXT_TX(entry); | ||
5776 | txb = &tnapi->tx_buffers[entry]; | ||
5777 | |||
5778 | pci_unmap_page(tnapi->tp->pdev, | ||
5779 | dma_unmap_addr(txb, mapping), | ||
5780 | frag->size, PCI_DMA_TODEVICE); | ||
5781 | } | ||
5782 | } | ||
5783 | |||
5761 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 5784 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
5762 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | 5785 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, |
5763 | struct sk_buff *skb, u32 last_plus_one, | 5786 | struct sk_buff *skb, |
5764 | u32 *start, u32 base_flags, u32 mss) | 5787 | u32 base_flags, u32 mss) |
5765 | { | 5788 | { |
5766 | struct tg3 *tp = tnapi->tp; | 5789 | struct tg3 *tp = tnapi->tp; |
5767 | struct sk_buff *new_skb; | 5790 | struct sk_buff *new_skb; |
5768 | dma_addr_t new_addr = 0; | 5791 | dma_addr_t new_addr = 0; |
5769 | u32 entry = *start; | 5792 | u32 entry = tnapi->tx_prod; |
5770 | int i, ret = 0; | 5793 | int ret = 0; |
5771 | 5794 | ||
5772 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 5795 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) |
5773 | new_skb = skb_copy(skb, GFP_ATOMIC); | 5796 | new_skb = skb_copy(skb, GFP_ATOMIC); |
@@ -5783,14 +5806,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
5783 | ret = -1; | 5806 | ret = -1; |
5784 | } else { | 5807 | } else { |
5785 | /* New SKB is guaranteed to be linear. */ | 5808 | /* New SKB is guaranteed to be linear. */ |
5786 | entry = *start; | ||
5787 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | 5809 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, |
5788 | PCI_DMA_TODEVICE); | 5810 | PCI_DMA_TODEVICE); |
5789 | /* Make sure the mapping succeeded */ | 5811 | /* Make sure the mapping succeeded */ |
5790 | if (pci_dma_mapping_error(tp->pdev, new_addr)) { | 5812 | if (pci_dma_mapping_error(tp->pdev, new_addr)) { |
5791 | ret = -1; | 5813 | ret = -1; |
5792 | dev_kfree_skb(new_skb); | 5814 | dev_kfree_skb(new_skb); |
5793 | new_skb = NULL; | ||
5794 | 5815 | ||
5795 | /* Make sure new skb does not cross any 4G boundaries. | 5816 | /* Make sure new skb does not cross any 4G boundaries. |
5796 | * Drop the packet if it does. | 5817 | * Drop the packet if it does. |
@@ -5801,39 +5822,16 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
5801 | PCI_DMA_TODEVICE); | 5822 | PCI_DMA_TODEVICE); |
5802 | ret = -1; | 5823 | ret = -1; |
5803 | dev_kfree_skb(new_skb); | 5824 | dev_kfree_skb(new_skb); |
5804 | new_skb = NULL; | ||
5805 | } else { | 5825 | } else { |
5826 | tnapi->tx_buffers[entry].skb = new_skb; | ||
5827 | dma_unmap_addr_set(&tnapi->tx_buffers[entry], | ||
5828 | mapping, new_addr); | ||
5829 | |||
5806 | tg3_set_txd(tnapi, entry, new_addr, new_skb->len, | 5830 | tg3_set_txd(tnapi, entry, new_addr, new_skb->len, |
5807 | base_flags, 1 | (mss << 1)); | 5831 | base_flags, 1 | (mss << 1)); |
5808 | *start = NEXT_TX(entry); | ||
5809 | } | 5832 | } |
5810 | } | 5833 | } |
5811 | 5834 | ||
5812 | /* Now clean up the sw ring entries. */ | ||
5813 | i = 0; | ||
5814 | while (entry != last_plus_one) { | ||
5815 | int len; | ||
5816 | |||
5817 | if (i == 0) | ||
5818 | len = skb_headlen(skb); | ||
5819 | else | ||
5820 | len = skb_shinfo(skb)->frags[i-1].size; | ||
5821 | |||
5822 | pci_unmap_single(tp->pdev, | ||
5823 | dma_unmap_addr(&tnapi->tx_buffers[entry], | ||
5824 | mapping), | ||
5825 | len, PCI_DMA_TODEVICE); | ||
5826 | if (i == 0) { | ||
5827 | tnapi->tx_buffers[entry].skb = new_skb; | ||
5828 | dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5829 | new_addr); | ||
5830 | } else { | ||
5831 | tnapi->tx_buffers[entry].skb = NULL; | ||
5832 | } | ||
5833 | entry = NEXT_TX(entry); | ||
5834 | i++; | ||
5835 | } | ||
5836 | |||
5837 | dev_kfree_skb(skb); | 5835 | dev_kfree_skb(skb); |
5838 | 5836 | ||
5839 | return ret; | 5837 | return ret; |
@@ -5889,11 +5887,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
5889 | { | 5887 | { |
5890 | struct tg3 *tp = netdev_priv(dev); | 5888 | struct tg3 *tp = netdev_priv(dev); |
5891 | u32 len, entry, base_flags, mss; | 5889 | u32 len, entry, base_flags, mss; |
5892 | int would_hit_hwbug; | 5890 | int i = -1, would_hit_hwbug; |
5893 | dma_addr_t mapping; | 5891 | dma_addr_t mapping; |
5894 | struct tg3_napi *tnapi; | 5892 | struct tg3_napi *tnapi; |
5895 | struct netdev_queue *txq; | 5893 | struct netdev_queue *txq; |
5896 | unsigned int i, last; | 5894 | unsigned int last; |
5897 | 5895 | ||
5898 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 5896 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
5899 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | 5897 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; |
@@ -6074,20 +6072,15 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6074 | } | 6072 | } |
6075 | 6073 | ||
6076 | if (would_hit_hwbug) { | 6074 | if (would_hit_hwbug) { |
6077 | u32 last_plus_one = entry; | 6075 | tg3_skb_error_unmap(tnapi, skb, i); |
6078 | u32 start; | ||
6079 | |||
6080 | start = entry - 1 - skb_shinfo(skb)->nr_frags; | ||
6081 | start &= (TG3_TX_RING_SIZE - 1); | ||
6082 | 6076 | ||
6083 | /* If the workaround fails due to memory/mapping | 6077 | /* If the workaround fails due to memory/mapping |
6084 | * failure, silently drop this packet. | 6078 | * failure, silently drop this packet. |
6085 | */ | 6079 | */ |
6086 | if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, | 6080 | if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss)) |
6087 | &start, base_flags, mss)) | ||
6088 | goto out_unlock; | 6081 | goto out_unlock; |
6089 | 6082 | ||
6090 | entry = start; | 6083 | entry = NEXT_TX(tnapi->tx_prod); |
6091 | } | 6084 | } |
6092 | 6085 | ||
6093 | /* Packets are ready, update Tx producer idx local and on card. */ | 6086 | /* Packets are ready, update Tx producer idx local and on card. */ |
@@ -6113,24 +6106,9 @@ out_unlock: | |||
6113 | return NETDEV_TX_OK; | 6106 | return NETDEV_TX_OK; |
6114 | 6107 | ||
6115 | dma_error: | 6108 | dma_error: |
6116 | last = i; | 6109 | tg3_skb_error_unmap(tnapi, skb, i); |
6117 | entry = tnapi->tx_prod; | ||
6118 | tnapi->tx_buffers[entry].skb = NULL; | ||
6119 | pci_unmap_single(tp->pdev, | ||
6120 | dma_unmap_addr(&tnapi->tx_buffers[entry], mapping), | ||
6121 | skb_headlen(skb), | ||
6122 | PCI_DMA_TODEVICE); | ||
6123 | for (i = 0; i <= last; i++) { | ||
6124 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
6125 | entry = NEXT_TX(entry); | ||
6126 | |||
6127 | pci_unmap_page(tp->pdev, | ||
6128 | dma_unmap_addr(&tnapi->tx_buffers[entry], | ||
6129 | mapping), | ||
6130 | frag->size, PCI_DMA_TODEVICE); | ||
6131 | } | ||
6132 | |||
6133 | dev_kfree_skb(skb); | 6110 | dev_kfree_skb(skb); |
6111 | tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; | ||
6134 | return NETDEV_TX_OK; | 6112 | return NETDEV_TX_OK; |
6135 | } | 6113 | } |
6136 | 6114 | ||