aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorstephen hemminger <stephen@networkplumber.org>2013-08-04 20:22:34 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-04 21:35:01 -0400
commit136d8f377e1575463b47840bc5f1b22d94bf8f63 (patch)
treea398c9c221af33a0c7085a630a134224609f8a50 /drivers/net
parent72a67a94bcba71a5fddd6b3596a20604d2b5dcd6 (diff)
skge: add dma_mapping check
This old driver never checked for DMA mapping errors. Causing splats with the new DMA mapping checks: WARNING: at lib/dma-debug.c:937 check_unmap+0x47b/0x930() skge 0000:01:09.0: DMA-API: device driver failed to check map Add checks and unwind code. Reported-by: poma <pomidorabelisima@gmail.com> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/marvell/skge.c48
1 files changed, 42 insertions, 6 deletions
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079728e1..008cfa3173e6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,15 +931,18 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 934static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
935 struct sk_buff *skb, unsigned int bufsize) 935 struct sk_buff *skb, unsigned int bufsize)
936{ 936{
937 struct skge_rx_desc *rd = e->desc; 937 struct skge_rx_desc *rd = e->desc;
938 u64 map; 938 dma_addr_t map;
939 939
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE); 941 PCI_DMA_FROMDEVICE);
942 942
943 if (pci_dma_mapping_error(skge->hw->pdev, map))
944 return -1;
945
943 rd->dma_lo = map; 946 rd->dma_lo = map;
944 rd->dma_hi = map >> 32; 947 rd->dma_hi = map >> 32;
945 e->skb = skb; 948 e->skb = skb;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map); 957 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize); 958 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
956} 960}
957 961
958/* Resume receiving using existing skb, 962/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
1014 return -ENOMEM; 1018 return -ENOMEM;
1015 1019
1016 skb_reserve(skb, NET_IP_ALIGN); 1020 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1021 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
1022 dev_kfree_skb(skb);
1023 return -EIO;
1024 }
1018 } while ((e = e->next) != ring->start); 1025 } while ((e = e->next) != ring->start);
1019 1026
1020 ring->to_clean = ring->start; 1027 ring->to_clean = ring->start;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2729 struct skge_tx_desc *td; 2736 struct skge_tx_desc *td;
2730 int i; 2737 int i;
2731 u32 control, len; 2738 u32 control, len;
2732 u64 map; 2739 dma_addr_t map;
2733 2740
2734 if (skb_padto(skb, ETH_ZLEN)) 2741 if (skb_padto(skb, ETH_ZLEN))
2735 return NETDEV_TX_OK; 2742 return NETDEV_TX_OK;
@@ -2743,6 +2750,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2743 e->skb = skb; 2750 e->skb = skb;
2744 len = skb_headlen(skb); 2751 len = skb_headlen(skb);
2745 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2752 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2753 if (pci_dma_mapping_error(hw->pdev, map))
2754 goto mapping_error;
2755
2746 dma_unmap_addr_set(e, mapaddr, map); 2756 dma_unmap_addr_set(e, mapaddr, map);
2747 dma_unmap_len_set(e, maplen, len); 2757 dma_unmap_len_set(e, maplen, len);
2748 2758
@@ -2778,6 +2788,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2778 2788
2779 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2789 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2780 skb_frag_size(frag), DMA_TO_DEVICE); 2790 skb_frag_size(frag), DMA_TO_DEVICE);
2791 if (dma_mapping_error(&hw->pdev->dev, map))
2792 goto mapping_unwind;
2781 2793
2782 e = e->next; 2794 e = e->next;
2783 e->skb = skb; 2795 e->skb = skb;
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2815 } 2827 }
2816 2828
2817 return NETDEV_TX_OK; 2829 return NETDEV_TX_OK;
2830
2831mapping_unwind:
2832 e = skge->tx_ring.to_use;
2833 pci_unmap_single(hw->pdev,
2834 dma_unmap_addr(e, mapaddr),
2835 dma_unmap_len(e, maplen),
2836 PCI_DMA_TODEVICE);
2837 while (i-- > 0) {
2838 e = e->next;
2839 pci_unmap_page(hw->pdev,
2840 dma_unmap_addr(e, mapaddr),
2841 dma_unmap_len(e, maplen),
2842 PCI_DMA_TODEVICE);
2843 }
2844
2845mapping_error:
2846 if (net_ratelimit())
2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848 dev_kfree_skb(skb);
2849 return NETDEV_TX_OK;
2818} 2850}
2819 2851
2820 2852
@@ -3058,13 +3090,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3058 if (!nskb) 3090 if (!nskb)
3059 goto resubmit; 3091 goto resubmit;
3060 3092
3093 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3094 dev_kfree_skb(nskb);
3095 goto resubmit;
3096 }
3097
3061 pci_unmap_single(skge->hw->pdev, 3098 pci_unmap_single(skge->hw->pdev,
3062 dma_unmap_addr(e, mapaddr), 3099 dma_unmap_addr(e, mapaddr),
3063 dma_unmap_len(e, maplen), 3100 dma_unmap_len(e, maplen),
3064 PCI_DMA_FROMDEVICE); 3101 PCI_DMA_FROMDEVICE);
3065 skb = e->skb; 3102 skb = e->skb;
3066 prefetch(skb->data); 3103 prefetch(skb->data);
3067 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3068 } 3104 }
3069 3105
3070 skb_put(skb, len); 3106 skb_put(skb, len);