diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/skge.c')
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 71 |
1 files changed, 13 insertions, 58 deletions
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index edb9bda55d55..33947ac595c0 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,20 +931,17 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static int skge_rx_setup(struct pci_dev *pdev, | 934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
935 | struct skge_element *e, | 935 | struct sk_buff *skb, unsigned int bufsize) |
936 | struct sk_buff *skb, unsigned int bufsize) | ||
937 | { | 936 | { |
938 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
939 | dma_addr_t map; | 938 | u64 map; |
940 | 939 | ||
941 | map = pci_map_single(pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
942 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
943 | if (pci_dma_mapping_error(pdev, map)) | ||
944 | goto mapping_error; | ||
945 | 942 | ||
946 | rd->dma_lo = lower_32_bits(map); | 943 | rd->dma_lo = map; |
947 | rd->dma_hi = upper_32_bits(map); | 944 | rd->dma_hi = map >> 32; |
948 | e->skb = skb; | 945 | e->skb = skb; |
949 | rd->csum1_start = ETH_HLEN; | 946 | rd->csum1_start = ETH_HLEN; |
950 | rd->csum2_start = ETH_HLEN; | 947 | rd->csum2_start = ETH_HLEN; |
@@ -956,13 +953,6 @@ static int skge_rx_setup(struct pci_dev *pdev, | |||
956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
957 | dma_unmap_addr_set(e, mapaddr, map); | 954 | dma_unmap_addr_set(e, mapaddr, map); |
958 | dma_unmap_len_set(e, maplen, bufsize); | 955 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
960 | |||
961 | mapping_error: | ||
962 | if (net_ratelimit()) | ||
963 | dev_warn(&pdev->dev, "%s: rx mapping error\n", | ||
964 | skb->dev->name); | ||
965 | return -EIO; | ||
966 | } | 956 | } |
967 | 957 | ||
968 | /* Resume receiving using existing skb, | 958 | /* Resume receiving using existing skb, |
@@ -1024,11 +1014,7 @@ static int skge_rx_fill(struct net_device *dev) | |||
1024 | return -ENOMEM; | 1014 | return -ENOMEM; |
1025 | 1015 | ||
1026 | skb_reserve(skb, NET_IP_ALIGN); | 1016 | skb_reserve(skb, NET_IP_ALIGN); |
1027 | if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) { | 1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); |
1028 | kfree_skb(skb); | ||
1029 | return -ENOMEM; | ||
1030 | } | ||
1031 | |||
1032 | } while ((e = e->next) != ring->start); | 1018 | } while ((e = e->next) != ring->start); |
1033 | 1019 | ||
1034 | ring->to_clean = ring->start; | 1020 | ring->to_clean = ring->start; |
@@ -2743,7 +2729,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | struct skge_tx_desc *td; | 2729 | struct skge_tx_desc *td; |
2744 | int i; | 2730 | int i; |
2745 | u32 control, len; | 2731 | u32 control, len; |
2746 | dma_addr_t map; | 2732 | u64 map; |
2747 | 2733 | ||
2748 | if (skb_padto(skb, ETH_ZLEN)) | 2734 | if (skb_padto(skb, ETH_ZLEN)) |
2749 | return NETDEV_TX_OK; | 2735 | return NETDEV_TX_OK; |
@@ -2757,14 +2743,11 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2757 | e->skb = skb; | 2743 | e->skb = skb; |
2758 | len = skb_headlen(skb); | 2744 | len = skb_headlen(skb); |
2759 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2760 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2761 | goto mapping_error; | ||
2762 | |||
2763 | dma_unmap_addr_set(e, mapaddr, map); | 2746 | dma_unmap_addr_set(e, mapaddr, map); |
2764 | dma_unmap_len_set(e, maplen, len); | 2747 | dma_unmap_len_set(e, maplen, len); |
2765 | 2748 | ||
2766 | td->dma_lo = lower_32_bits(map); | 2749 | td->dma_lo = map; |
2767 | td->dma_hi = upper_32_bits(map); | 2750 | td->dma_hi = map >> 32; |
2768 | 2751 | ||
2769 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2770 | const int offset = skb_checksum_start_offset(skb); | 2753 | const int offset = skb_checksum_start_offset(skb); |
@@ -2795,16 +2778,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2795 | 2778 | ||
2796 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2797 | skb_frag_size(frag), DMA_TO_DEVICE); | 2780 | skb_frag_size(frag), DMA_TO_DEVICE); |
2798 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2799 | goto mapping_unwind; | ||
2800 | 2781 | ||
2801 | e = e->next; | 2782 | e = e->next; |
2802 | e->skb = skb; | 2783 | e->skb = skb; |
2803 | tf = e->desc; | 2784 | tf = e->desc; |
2804 | BUG_ON(tf->control & BMU_OWN); | 2785 | BUG_ON(tf->control & BMU_OWN); |
2805 | 2786 | ||
2806 | tf->dma_lo = lower_32_bits(map); | 2787 | tf->dma_lo = map; |
2807 | tf->dma_hi = upper_32_bits(map); | 2788 | tf->dma_hi = (u64) map >> 32; |
2808 | dma_unmap_addr_set(e, mapaddr, map); | 2789 | dma_unmap_addr_set(e, mapaddr, map); |
2809 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2810 | 2791 | ||
@@ -2834,28 +2815,6 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2834 | } | 2815 | } |
2835 | 2816 | ||
2836 | return NETDEV_TX_OK; | 2817 | return NETDEV_TX_OK; |
2837 | |||
2838 | mapping_unwind: | ||
2839 | /* unroll any pages that were already mapped. */ | ||
2840 | if (e != skge->tx_ring.to_use) { | ||
2841 | struct skge_element *u; | ||
2842 | |||
2843 | for (u = skge->tx_ring.to_use->next; u != e; u = u->next) | ||
2844 | pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr), | ||
2845 | dma_unmap_len(u, maplen), | ||
2846 | PCI_DMA_TODEVICE); | ||
2847 | e = skge->tx_ring.to_use; | ||
2848 | } | ||
2849 | /* undo the mapping for the skb header */ | ||
2850 | pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr), | ||
2851 | dma_unmap_len(e, maplen), | ||
2852 | PCI_DMA_TODEVICE); | ||
2853 | mapping_error: | ||
2854 | /* mapping error causes error message and packet to be discarded. */ | ||
2855 | if (net_ratelimit()) | ||
2856 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2857 | dev_kfree_skb(skb); | ||
2858 | return NETDEV_TX_OK; | ||
2859 | } | 2818 | } |
2860 | 2819 | ||
2861 | 2820 | ||
@@ -3099,17 +3058,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3099 | if (!nskb) | 3058 | if (!nskb) |
3100 | goto resubmit; | 3059 | goto resubmit; |
3101 | 3060 | ||
3102 | if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) { | ||
3103 | dev_kfree_skb(nskb); | ||
3104 | goto resubmit; | ||
3105 | } | ||
3106 | |||
3107 | pci_unmap_single(skge->hw->pdev, | 3061 | pci_unmap_single(skge->hw->pdev, |
3108 | dma_unmap_addr(e, mapaddr), | 3062 | dma_unmap_addr(e, mapaddr), |
3109 | dma_unmap_len(e, maplen), | 3063 | dma_unmap_len(e, maplen), |
3110 | PCI_DMA_FROMDEVICE); | 3064 | PCI_DMA_FROMDEVICE); |
3111 | skb = e->skb; | 3065 | skb = e->skb; |
3112 | prefetch(skb->data); | 3066 | prefetch(skb->data); |
3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3113 | } | 3068 | } |
3114 | 3069 | ||
3115 | skb_put(skb, len); | 3070 | skb_put(skb, len); |