diff options
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 71 |
1 files changed, 58 insertions, 13 deletions
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index f580f0535bb4..299c33bd5345 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct pci_dev *pdev, |
935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct skge_element *e, |
936 | struct sk_buff *skb, unsigned int bufsize) | ||
936 | { | 937 | { |
937 | struct skge_rx_desc *rd = e->desc; | 938 | struct skge_rx_desc *rd = e->desc; |
938 | u64 map; | 939 | dma_addr_t map; |
939 | 940 | ||
940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 941 | map = pci_map_single(pdev, skb->data, bufsize, |
941 | PCI_DMA_FROMDEVICE); | 942 | PCI_DMA_FROMDEVICE); |
943 | if (pci_dma_mapping_error(pdev, map)) | ||
944 | goto mapping_error; | ||
942 | 945 | ||
943 | rd->dma_lo = map; | 946 | rd->dma_lo = lower_32_bits(map); |
944 | rd->dma_hi = map >> 32; | 947 | rd->dma_hi = upper_32_bits(map); |
945 | e->skb = skb; | 948 | e->skb = skb; |
946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
@@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
960 | |||
961 | mapping_error: | ||
962 | if (net_ratelimit()) | ||
963 | dev_warn(&pdev->dev, "%s: rx mapping error\n", | ||
964 | skb->dev->name); | ||
965 | return -EIO; | ||
956 | } | 966 | } |
957 | 967 | ||
958 | /* Resume receiving using existing skb, | 968 | /* Resume receiving using existing skb, |
@@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev) | |||
1014 | return -ENOMEM; | 1024 | return -ENOMEM; |
1015 | 1025 | ||
1016 | skb_reserve(skb, NET_IP_ALIGN); | 1026 | skb_reserve(skb, NET_IP_ALIGN); |
1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1027 | if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) { |
1028 | kfree_skb(skb); | ||
1029 | return -ENOMEM; | ||
1030 | } | ||
1031 | |||
1018 | } while ((e = e->next) != ring->start); | 1032 | } while ((e = e->next) != ring->start); |
1019 | 1033 | ||
1020 | ring->to_clean = ring->start; | 1034 | ring->to_clean = ring->start; |
@@ -2729,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2729 | struct skge_tx_desc *td; | 2743 | struct skge_tx_desc *td; |
2730 | int i; | 2744 | int i; |
2731 | u32 control, len; | 2745 | u32 control, len; |
2732 | u64 map; | 2746 | dma_addr_t map; |
2733 | 2747 | ||
2734 | if (skb_padto(skb, ETH_ZLEN)) | 2748 | if (skb_padto(skb, ETH_ZLEN)) |
2735 | return NETDEV_TX_OK; | 2749 | return NETDEV_TX_OK; |
@@ -2743,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | e->skb = skb; | 2757 | e->skb = skb; |
2744 | len = skb_headlen(skb); | 2758 | len = skb_headlen(skb); |
2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2759 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2760 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2761 | goto mapping_error; | ||
2762 | |||
2746 | dma_unmap_addr_set(e, mapaddr, map); | 2763 | dma_unmap_addr_set(e, mapaddr, map); |
2747 | dma_unmap_len_set(e, maplen, len); | 2764 | dma_unmap_len_set(e, maplen, len); |
2748 | 2765 | ||
2749 | td->dma_lo = map; | 2766 | td->dma_lo = lower_32_bits(map); |
2750 | td->dma_hi = map >> 32; | 2767 | td->dma_hi = upper_32_bits(map); |
2751 | 2768 | ||
2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2769 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2753 | const int offset = skb_checksum_start_offset(skb); | 2770 | const int offset = skb_checksum_start_offset(skb); |
@@ -2778,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2778 | 2795 | ||
2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2796 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2797 | skb_frag_size(frag), DMA_TO_DEVICE); |
2798 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2799 | goto mapping_unwind; | ||
2781 | 2800 | ||
2782 | e = e->next; | 2801 | e = e->next; |
2783 | e->skb = skb; | 2802 | e->skb = skb; |
2784 | tf = e->desc; | 2803 | tf = e->desc; |
2785 | BUG_ON(tf->control & BMU_OWN); | 2804 | BUG_ON(tf->control & BMU_OWN); |
2786 | 2805 | ||
2787 | tf->dma_lo = map; | 2806 | tf->dma_lo = lower_32_bits(map); |
2788 | tf->dma_hi = (u64) map >> 32; | 2807 | tf->dma_hi = upper_32_bits(map); |
2789 | dma_unmap_addr_set(e, mapaddr, map); | 2808 | dma_unmap_addr_set(e, mapaddr, map); |
2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2809 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2791 | 2810 | ||
@@ -2813,6 +2832,28 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2813 | } | 2832 | } |
2814 | 2833 | ||
2815 | return NETDEV_TX_OK; | 2834 | return NETDEV_TX_OK; |
2835 | |||
2836 | mapping_unwind: | ||
2837 | /* unroll any pages that were already mapped. */ | ||
2838 | if (e != skge->tx_ring.to_use) { | ||
2839 | struct skge_element *u; | ||
2840 | |||
2841 | for (u = skge->tx_ring.to_use->next; u != e; u = u->next) | ||
2842 | pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr), | ||
2843 | dma_unmap_len(u, maplen), | ||
2844 | PCI_DMA_TODEVICE); | ||
2845 | e = skge->tx_ring.to_use; | ||
2846 | } | ||
2847 | /* undo the mapping for the skb header */ | ||
2848 | pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr), | ||
2849 | dma_unmap_len(e, maplen), | ||
2850 | PCI_DMA_TODEVICE); | ||
2851 | mapping_error: | ||
2852 | /* mapping error causes error message and packet to be discarded. */ | ||
2853 | if (net_ratelimit()) | ||
2854 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2855 | dev_kfree_skb(skb); | ||
2856 | return NETDEV_TX_OK; | ||
2816 | } | 2857 | } |
2817 | 2858 | ||
2818 | 2859 | ||
@@ -3060,13 +3101,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3060 | if (!nskb) | 3101 | if (!nskb) |
3061 | goto resubmit; | 3102 | goto resubmit; |
3062 | 3103 | ||
3104 | if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) { | ||
3105 | dev_kfree_skb(nskb); | ||
3106 | goto resubmit; | ||
3107 | } | ||
3108 | |||
3063 | pci_unmap_single(skge->hw->pdev, | 3109 | pci_unmap_single(skge->hw->pdev, |
3064 | dma_unmap_addr(e, mapaddr), | 3110 | dma_unmap_addr(e, mapaddr), |
3065 | dma_unmap_len(e, maplen), | 3111 | dma_unmap_len(e, maplen), |
3066 | PCI_DMA_FROMDEVICE); | 3112 | PCI_DMA_FROMDEVICE); |
3067 | skb = e->skb; | 3113 | skb = e->skb; |
3068 | prefetch(skb->data); | 3114 | prefetch(skb->data); |
3069 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3070 | } | 3115 | } |
3071 | 3116 | ||
3072 | skb_put(skb, len); | 3117 | skb_put(skb, len); |