diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2009-02-03 06:27:28 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-02-03 18:08:35 -0500 |
commit | 454e6cb6868dd5c88d8bcdab407caa3738d30c2b (patch) | |
tree | 182b21717db92f80f4af73c7cae4884a2e22d443 /drivers/net/sky2.c | |
parent | 1bd68c04850b9e73f1c7022b9a8c38cd14ceb37d (diff) |
sky2: handle dma mapping errors
On non-x86 platforms it is possible to run out of DMA mapping resources.
The driver was ignoring this and could cause corruptions.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sky2.c')
-rw-r--r-- | drivers/net/sky2.c | 64 |
1 files changed, 56 insertions, 8 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index db925085c185..8b9b88457267 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1068,13 +1068,16 @@ static void sky2_rx_submit(struct sky2_port *sky2, | |||
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | 1070 | ||
1071 | static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, | 1071 | static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, |
1072 | unsigned size) | 1072 | unsigned size) |
1073 | { | 1073 | { |
1074 | struct sk_buff *skb = re->skb; | 1074 | struct sk_buff *skb = re->skb; |
1075 | int i; | 1075 | int i; |
1076 | 1076 | ||
1077 | re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); | 1077 | re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); |
1078 | if (unlikely(pci_dma_mapping_error(pdev, re->data_addr))) | ||
1079 | return -EIO; | ||
1080 | |||
1078 | pci_unmap_len_set(re, data_size, size); | 1081 | pci_unmap_len_set(re, data_size, size); |
1079 | 1082 | ||
1080 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 1083 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
@@ -1083,6 +1086,7 @@ static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, | |||
1083 | skb_shinfo(skb)->frags[i].page_offset, | 1086 | skb_shinfo(skb)->frags[i].page_offset, |
1084 | skb_shinfo(skb)->frags[i].size, | 1087 | skb_shinfo(skb)->frags[i].size, |
1085 | PCI_DMA_FROMDEVICE); | 1088 | PCI_DMA_FROMDEVICE); |
1089 | return 0; | ||
1086 | } | 1090 | } |
1087 | 1091 | ||
1088 | static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) | 1092 | static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) |
@@ -1354,7 +1358,12 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1354 | if (!re->skb) | 1358 | if (!re->skb) |
1355 | goto nomem; | 1359 | goto nomem; |
1356 | 1360 | ||
1357 | sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size); | 1361 | if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { |
1362 | dev_kfree_skb(re->skb); | ||
1363 | re->skb = NULL; | ||
1364 | goto nomem; | ||
1365 | } | ||
1366 | |||
1358 | sky2_rx_submit(sky2, re); | 1367 | sky2_rx_submit(sky2, re); |
1359 | } | 1368 | } |
1360 | 1369 | ||
@@ -1547,7 +1556,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1547 | struct sky2_hw *hw = sky2->hw; | 1556 | struct sky2_hw *hw = sky2->hw; |
1548 | struct sky2_tx_le *le = NULL; | 1557 | struct sky2_tx_le *le = NULL; |
1549 | struct tx_ring_info *re; | 1558 | struct tx_ring_info *re; |
1550 | unsigned i, len; | 1559 | unsigned i, len, first_slot; |
1551 | dma_addr_t mapping; | 1560 | dma_addr_t mapping; |
1552 | u16 mss; | 1561 | u16 mss; |
1553 | u8 ctrl; | 1562 | u8 ctrl; |
@@ -1555,13 +1564,17 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1555 | if (unlikely(tx_avail(sky2) < tx_le_req(skb))) | 1564 | if (unlikely(tx_avail(sky2) < tx_le_req(skb))) |
1556 | return NETDEV_TX_BUSY; | 1565 | return NETDEV_TX_BUSY; |
1557 | 1566 | ||
1558 | if (unlikely(netif_msg_tx_queued(sky2))) | ||
1559 | printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", | ||
1560 | dev->name, sky2->tx_prod, skb->len); | ||
1561 | |||
1562 | len = skb_headlen(skb); | 1567 | len = skb_headlen(skb); |
1563 | mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 1568 | mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
1564 | 1569 | ||
1570 | if (pci_dma_mapping_error(hw->pdev, mapping)) | ||
1571 | goto mapping_error; | ||
1572 | |||
1573 | first_slot = sky2->tx_prod; | ||
1574 | if (unlikely(netif_msg_tx_queued(sky2))) | ||
1575 | printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n", | ||
1576 | dev->name, first_slot, skb->len); | ||
1577 | |||
1565 | /* Send high bits if needed */ | 1578 | /* Send high bits if needed */ |
1566 | if (sizeof(dma_addr_t) > sizeof(u32)) { | 1579 | if (sizeof(dma_addr_t) > sizeof(u32)) { |
1567 | le = get_tx_le(sky2); | 1580 | le = get_tx_le(sky2); |
@@ -1648,6 +1661,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1648 | mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, | 1661 | mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, |
1649 | frag->size, PCI_DMA_TODEVICE); | 1662 | frag->size, PCI_DMA_TODEVICE); |
1650 | 1663 | ||
1664 | if (pci_dma_mapping_error(hw->pdev, mapping)) | ||
1665 | goto mapping_unwind; | ||
1666 | |||
1651 | if (sizeof(dma_addr_t) > sizeof(u32)) { | 1667 | if (sizeof(dma_addr_t) > sizeof(u32)) { |
1652 | le = get_tx_le(sky2); | 1668 | le = get_tx_le(sky2); |
1653 | le->addr = cpu_to_le32(upper_32_bits(mapping)); | 1669 | le->addr = cpu_to_le32(upper_32_bits(mapping)); |
@@ -1676,6 +1692,34 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1676 | 1692 | ||
1677 | dev->trans_start = jiffies; | 1693 | dev->trans_start = jiffies; |
1678 | return NETDEV_TX_OK; | 1694 | return NETDEV_TX_OK; |
1695 | |||
1696 | mapping_unwind: | ||
1697 | for (i = first_slot; i != sky2->tx_prod; i = RING_NEXT(i, TX_RING_SIZE)) { | ||
1698 | le = sky2->tx_le + i; | ||
1699 | re = sky2->tx_ring + i; | ||
1700 | |||
1701 | switch(le->opcode & ~HW_OWNER) { | ||
1702 | case OP_LARGESEND: | ||
1703 | case OP_PACKET: | ||
1704 | pci_unmap_single(hw->pdev, | ||
1705 | pci_unmap_addr(re, mapaddr), | ||
1706 | pci_unmap_len(re, maplen), | ||
1707 | PCI_DMA_TODEVICE); | ||
1708 | break; | ||
1709 | case OP_BUFFER: | ||
1710 | pci_unmap_page(hw->pdev, pci_unmap_addr(re, mapaddr), | ||
1711 | pci_unmap_len(re, maplen), | ||
1712 | PCI_DMA_TODEVICE); | ||
1713 | break; | ||
1714 | } | ||
1715 | } | ||
1716 | |||
1717 | sky2->tx_prod = first_slot; | ||
1718 | mapping_error: | ||
1719 | if (net_ratelimit()) | ||
1720 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
1721 | dev_kfree_skb(skb); | ||
1722 | return NETDEV_TX_OK; | ||
1679 | } | 1723 | } |
1680 | 1724 | ||
1681 | /* | 1725 | /* |
@@ -2191,7 +2235,11 @@ static struct sk_buff *receive_new(struct sky2_port *sky2, | |||
2191 | 2235 | ||
2192 | prefetch(skb->data); | 2236 | prefetch(skb->data); |
2193 | re->skb = nskb; | 2237 | re->skb = nskb; |
2194 | sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space); | 2238 | if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) { |
2239 | dev_kfree_skb(nskb); | ||
2240 | re->skb = skb; | ||
2241 | return NULL; | ||
2242 | } | ||
2195 | 2243 | ||
2196 | if (skb_shinfo(skb)->nr_frags) | 2244 | if (skb_shinfo(skb)->nr_frags) |
2197 | skb_put_frags(skb, hdr_space, length); | 2245 | skb_put_frags(skb, hdr_space, length); |