aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/r8169.c
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2010-10-20 18:25:36 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-21 04:32:44 -0400
commit3eafe50708deca10d155ccff597a91dcecc2d869 (patch)
treeafb0d939214fa1553dbe94ebb88dac22ad0271f7 /drivers/net/r8169.c
parent9bcc08939223c5a2bad42748ee53ab69f5338a32 (diff)
r8169: check dma mapping failures
Check possible dma mapping errors and do clean up if it happens. Fix overwrap bug in rtl8169_tx_clear on the way. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/r8169.c')
-rw-r--r--drivers/net/r8169.c66
1 files changed, 48 insertions, 18 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 242748b2c357..88d5826ce93d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3992,11 +3992,18 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct pci_dev *pdev,
3992 if (!data) 3992 if (!data)
3993 return NULL; 3993 return NULL;
3994 } 3994 }
3995
3995 mapping = dma_map_single(&pdev->dev, rtl8169_align(data), rx_buf_sz, 3996 mapping = dma_map_single(&pdev->dev, rtl8169_align(data), rx_buf_sz,
3996 PCI_DMA_FROMDEVICE); 3997 PCI_DMA_FROMDEVICE);
3998 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
3999 goto err_out;
3997 4000
3998 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 4001 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
3999 return data; 4002 return data;
4003
4004err_out:
4005 kfree(data);
4006 return NULL;
4000} 4007}
4001 4008
4002static void rtl8169_rx_clear(struct rtl8169_private *tp) 4009static void rtl8169_rx_clear(struct rtl8169_private *tp)
@@ -4080,12 +4087,13 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
4080 tx_skb->len = 0; 4087 tx_skb->len = 0;
4081} 4088}
4082 4089
4083static void rtl8169_tx_clear(struct rtl8169_private *tp) 4090static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
4091 unsigned int n)
4084{ 4092{
4085 unsigned int i; 4093 unsigned int i;
4086 4094
4087 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) { 4095 for (i = 0; i < n; i++) {
4088 unsigned int entry = i % NUM_TX_DESC; 4096 unsigned int entry = (start + i) % NUM_TX_DESC;
4089 struct ring_info *tx_skb = tp->tx_skb + entry; 4097 struct ring_info *tx_skb = tp->tx_skb + entry;
4090 unsigned int len = tx_skb->len; 4098 unsigned int len = tx_skb->len;
4091 4099
@@ -4101,6 +4109,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
4101 tp->dev->stats.tx_dropped++; 4109 tp->dev->stats.tx_dropped++;
4102 } 4110 }
4103 } 4111 }
4112}
4113
4114static void rtl8169_tx_clear(struct rtl8169_private *tp)
4115{
4116 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
4104 tp->cur_tx = tp->dirty_tx = 0; 4117 tp->cur_tx = tp->dirty_tx = 0;
4105} 4118}
4106 4119
@@ -4219,6 +4232,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4219 addr = ((void *) page_address(frag->page)) + frag->page_offset; 4232 addr = ((void *) page_address(frag->page)) + frag->page_offset;
4220 mapping = dma_map_single(&tp->pci_dev->dev, addr, len, 4233 mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
4221 PCI_DMA_TODEVICE); 4234 PCI_DMA_TODEVICE);
4235 if (unlikely(dma_mapping_error(&tp->pci_dev->dev, mapping)))
4236 goto err_out;
4222 4237
4223 /* anti gcc 2.95.3 bugware (sic) */ 4238 /* anti gcc 2.95.3 bugware (sic) */
4224 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 4239 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4235,6 +4250,10 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4235 } 4250 }
4236 4251
4237 return cur_frag; 4252 return cur_frag;
4253
4254err_out:
4255 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
4256 return -EIO;
4238} 4257}
4239 4258
4240static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) 4259static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
@@ -4261,40 +4280,44 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4261 struct net_device *dev) 4280 struct net_device *dev)
4262{ 4281{
4263 struct rtl8169_private *tp = netdev_priv(dev); 4282 struct rtl8169_private *tp = netdev_priv(dev);
4264 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC; 4283 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4265 struct TxDesc *txd = tp->TxDescArray + entry; 4284 struct TxDesc *txd = tp->TxDescArray + entry;
4266 void __iomem *ioaddr = tp->mmio_addr; 4285 void __iomem *ioaddr = tp->mmio_addr;
4267 dma_addr_t mapping; 4286 dma_addr_t mapping;
4268 u32 status, len; 4287 u32 status, len;
4269 u32 opts1; 4288 u32 opts1;
4289 int frags;
4270 4290
4271 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 4291 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
4272 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 4292 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
4273 goto err_stop; 4293 goto err_stop_0;
4274 } 4294 }
4275 4295
4276 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) 4296 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
4277 goto err_stop; 4297 goto err_stop_0;
4298
4299 len = skb_headlen(skb);
4300 mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
4301 PCI_DMA_TODEVICE);
4302 if (unlikely(dma_mapping_error(&tp->pci_dev->dev, mapping)))
4303 goto err_dma_0;
4304
4305 tp->tx_skb[entry].len = len;
4306 txd->addr = cpu_to_le64(mapping);
4307 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4278 4308
4279 opts1 = DescOwn | rtl8169_tso_csum(skb, dev); 4309 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
4280 4310
4281 frags = rtl8169_xmit_frags(tp, skb, opts1); 4311 frags = rtl8169_xmit_frags(tp, skb, opts1);
4282 if (frags) { 4312 if (frags < 0)
4283 len = skb_headlen(skb); 4313 goto err_dma_1;
4314 else if (frags)
4284 opts1 |= FirstFrag; 4315 opts1 |= FirstFrag;
4285 } else { 4316 else {
4286 len = skb->len;
4287 opts1 |= FirstFrag | LastFrag; 4317 opts1 |= FirstFrag | LastFrag;
4288 tp->tx_skb[entry].skb = skb; 4318 tp->tx_skb[entry].skb = skb;
4289 } 4319 }
4290 4320
4291 mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
4292 PCI_DMA_TODEVICE);
4293
4294 tp->tx_skb[entry].len = len;
4295 txd->addr = cpu_to_le64(mapping);
4296 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4297
4298 wmb(); 4321 wmb();
4299 4322
4300 /* anti gcc 2.95.3 bugware (sic) */ 4323 /* anti gcc 2.95.3 bugware (sic) */
@@ -4316,7 +4339,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4316 4339
4317 return NETDEV_TX_OK; 4340 return NETDEV_TX_OK;
4318 4341
4319err_stop: 4342err_dma_1:
4343 rtl8169_unmap_tx_skb(tp->pci_dev, tp->tx_skb + entry, txd);
4344err_dma_0:
4345 dev_kfree_skb(skb);
4346 dev->stats.tx_dropped++;
4347 return NETDEV_TX_OK;
4348
4349err_stop_0:
4320 netif_stop_queue(dev); 4350 netif_stop_queue(dev);
4321 dev->stats.tx_dropped++; 4351 dev->stats.tx_dropped++;
4322 return NETDEV_TX_BUSY; 4352 return NETDEV_TX_BUSY;