diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2014-09-17 09:04:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-19 16:28:57 -0400 |
commit | 6f2b6a3005b2c34c39f207a87667564f64f2f91a (patch) | |
tree | e3c52b316f3920596359191cd51a114f8960563e | |
parent | f6f2332dce0efeea8c5653b6e9d1e8c379ace65c (diff) |
3c59x: Add dma error checking and recovery
Noted that 3c59x has no checks on transmit for failed DMA mappings, and no
ability to unmap fragments when a single map fails in the middle of a transmit.
This patch provides error checking to ensure that dma mappings work properly,
and unrolls an skb mapping if a fragmented skb transmission has a mapping
failure to prevent leaks.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Linux Kernel list <linux-kernel@vger.kernel.org>
CC: "David S. Miller" <davem@davemloft.net>
CC: Meelis Roos <mroos@linux.ee>
Tested-by: Meelis Roos <mroos@linux.ee>
-rw-r--r-- | drivers/net/ethernet/3com/3c59x.c | 50 |
1 files changed, 41 insertions, 9 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 3fe45c705933..8ab87ff6a9e6 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2129 | int entry = vp->cur_tx % TX_RING_SIZE; | 2129 | int entry = vp->cur_tx % TX_RING_SIZE; |
2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | 2130 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; |
2131 | unsigned long flags; | 2131 | unsigned long flags; |
2132 | dma_addr_t dma_addr; | ||
2132 | 2133 | ||
2133 | if (vortex_debug > 6) { | 2134 | if (vortex_debug > 6) { |
2134 | pr_debug("boomerang_start_xmit()\n"); | 2135 | pr_debug("boomerang_start_xmit()\n"); |
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2163 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | 2164 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); |
2164 | 2165 | ||
2165 | if (!skb_shinfo(skb)->nr_frags) { | 2166 | if (!skb_shinfo(skb)->nr_frags) { |
2166 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2167 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, |
2167 | skb->len, PCI_DMA_TODEVICE)); | 2168 | PCI_DMA_TODEVICE); |
2169 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2170 | goto out_dma_err; | ||
2171 | |||
2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2168 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | 2173 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); |
2169 | } else { | 2174 | } else { |
2170 | int i; | 2175 | int i; |
2171 | 2176 | ||
2172 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | 2177 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, |
2173 | skb_headlen(skb), PCI_DMA_TODEVICE)); | 2178 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2179 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2180 | goto out_dma_err; | ||
2181 | |||
2182 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | ||
2174 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); | 2183 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); |
2175 | 2184 | ||
2176 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2185 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2177 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2186 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2178 | 2187 | ||
2188 | dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, | ||
2189 | frag->page_offset, | ||
2190 | frag->size, | ||
2191 | DMA_TO_DEVICE); | ||
2192 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { | ||
2193 | for(i = i-1; i >= 0; i--) | ||
2194 | dma_unmap_page(&VORTEX_PCI(vp)->dev, | ||
2195 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), | ||
2196 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), | ||
2197 | DMA_TO_DEVICE); | ||
2198 | |||
2199 | pci_unmap_single(VORTEX_PCI(vp), | ||
2200 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | ||
2201 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), | ||
2202 | PCI_DMA_TODEVICE); | ||
2203 | |||
2204 | goto out_dma_err; | ||
2205 | } | ||
2206 | |||
2179 | vp->tx_ring[entry].frag[i+1].addr = | 2207 | vp->tx_ring[entry].frag[i+1].addr = |
2180 | cpu_to_le32(skb_frag_dma_map( | 2208 | cpu_to_le32(dma_addr); |
2181 | &VORTEX_PCI(vp)->dev, | ||
2182 | frag, | ||
2183 | frag->page_offset, frag->size, DMA_TO_DEVICE)); | ||
2184 | 2209 | ||
2185 | if (i == skb_shinfo(skb)->nr_frags-1) | 2210 | if (i == skb_shinfo(skb)->nr_frags-1) |
2186 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); | 2211 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); |
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2189 | } | 2214 | } |
2190 | } | 2215 | } |
2191 | #else | 2216 | #else |
2192 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | 2217 | dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); |
2218 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | ||
2219 | goto out_dma_err; | ||
2220 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | ||
2193 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | 2221 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); |
2194 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | 2222 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); |
2195 | #endif | 2223 | #endif |
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2217 | skb_tx_timestamp(skb); | 2245 | skb_tx_timestamp(skb); |
2218 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | 2246 | iowrite16(DownUnstall, ioaddr + EL3_CMD); |
2219 | spin_unlock_irqrestore(&vp->lock, flags); | 2247 | spin_unlock_irqrestore(&vp->lock, flags); |
2248 | out: | ||
2220 | return NETDEV_TX_OK; | 2249 | return NETDEV_TX_OK; |
2250 | out_dma_err: | ||
2251 | dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); | ||
2252 | goto out; | ||
2221 | } | 2253 | } |
2222 | 2254 | ||
2223 | /* The interrupt handler does all of the Rx thread work and cleans up | 2255 | /* The interrupt handler does all of the Rx thread work and cleans up |