diff options
author | David S. Miller <davem@davemloft.net> | 2013-08-16 18:37:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-08-16 18:37:26 -0400 |
commit | 2ff1cf12c9fe70e75e600404e6a4274b19d293ed (patch) | |
tree | beafddac0a8098e3f07d2ec60e44a2a7d006e605 /drivers/net/ethernet/marvell | |
parent | 16b304f3404f8e0243d5ee2b70b68767b7b59b2b (diff) | |
parent | 0f7dd1aa8f959216f1faa71513b9d3c1a9065e5a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 68 |
1 files changed, 53 insertions, 15 deletions
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct sk_buff *skb, unsigned int bufsize) |
936 | { | 936 | { |
937 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
938 | u64 map; | 938 | dma_addr_t map; |
939 | 939 | ||
940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
942 | 942 | ||
943 | rd->dma_lo = map; | 943 | if (pci_dma_mapping_error(skge->hw->pdev, map)) |
944 | rd->dma_hi = map >> 32; | 944 | return -1; |
945 | |||
946 | rd->dma_lo = lower_32_bits(map); | ||
947 | rd->dma_hi = upper_32_bits(map); | ||
945 | e->skb = skb; | 948 | e->skb = skb; |
946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
956 | } | 960 | } |
957 | 961 | ||
958 | /* Resume receiving using existing skb, | 962 | /* Resume receiving using existing skb, |
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) | |||
1014 | return -ENOMEM; | 1018 | return -ENOMEM; |
1015 | 1019 | ||
1016 | skb_reserve(skb, NET_IP_ALIGN); | 1020 | skb_reserve(skb, NET_IP_ALIGN); |
1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1021 | if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { |
1022 | dev_kfree_skb(skb); | ||
1023 | return -EIO; | ||
1024 | } | ||
1018 | } while ((e = e->next) != ring->start); | 1025 | } while ((e = e->next) != ring->start); |
1019 | 1026 | ||
1020 | ring->to_clean = ring->start; | 1027 | ring->to_clean = ring->start; |
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) | |||
2544 | 2551 | ||
2545 | BUG_ON(skge->dma & 7); | 2552 | BUG_ON(skge->dma & 7); |
2546 | 2553 | ||
2547 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2554 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
2548 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | 2555 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2549 | err = -EINVAL; | 2556 | err = -EINVAL; |
2550 | goto free_pci_mem; | 2557 | goto free_pci_mem; |
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2729 | struct skge_tx_desc *td; | 2736 | struct skge_tx_desc *td; |
2730 | int i; | 2737 | int i; |
2731 | u32 control, len; | 2738 | u32 control, len; |
2732 | u64 map; | 2739 | dma_addr_t map; |
2733 | 2740 | ||
2734 | if (skb_padto(skb, ETH_ZLEN)) | 2741 | if (skb_padto(skb, ETH_ZLEN)) |
2735 | return NETDEV_TX_OK; | 2742 | return NETDEV_TX_OK; |
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | e->skb = skb; | 2750 | e->skb = skb; |
2744 | len = skb_headlen(skb); | 2751 | len = skb_headlen(skb); |
2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2752 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2753 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2754 | goto mapping_error; | ||
2755 | |||
2746 | dma_unmap_addr_set(e, mapaddr, map); | 2756 | dma_unmap_addr_set(e, mapaddr, map); |
2747 | dma_unmap_len_set(e, maplen, len); | 2757 | dma_unmap_len_set(e, maplen, len); |
2748 | 2758 | ||
2749 | td->dma_lo = map; | 2759 | td->dma_lo = lower_32_bits(map); |
2750 | td->dma_hi = map >> 32; | 2760 | td->dma_hi = upper_32_bits(map); |
2751 | 2761 | ||
2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2753 | const int offset = skb_checksum_start_offset(skb); | 2763 | const int offset = skb_checksum_start_offset(skb); |
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2778 | 2788 | ||
2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2789 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2790 | skb_frag_size(frag), DMA_TO_DEVICE); |
2791 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2792 | goto mapping_unwind; | ||
2781 | 2793 | ||
2782 | e = e->next; | 2794 | e = e->next; |
2783 | e->skb = skb; | 2795 | e->skb = skb; |
2784 | tf = e->desc; | 2796 | tf = e->desc; |
2785 | BUG_ON(tf->control & BMU_OWN); | 2797 | BUG_ON(tf->control & BMU_OWN); |
2786 | 2798 | ||
2787 | tf->dma_lo = map; | 2799 | tf->dma_lo = lower_32_bits(map); |
2788 | tf->dma_hi = (u64) map >> 32; | 2800 | tf->dma_hi = upper_32_bits(map); |
2789 | dma_unmap_addr_set(e, mapaddr, map); | 2801 | dma_unmap_addr_set(e, mapaddr, map); |
2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2802 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2791 | 2803 | ||
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2815 | } | 2827 | } |
2816 | 2828 | ||
2817 | return NETDEV_TX_OK; | 2829 | return NETDEV_TX_OK; |
2830 | |||
2831 | mapping_unwind: | ||
2832 | e = skge->tx_ring.to_use; | ||
2833 | pci_unmap_single(hw->pdev, | ||
2834 | dma_unmap_addr(e, mapaddr), | ||
2835 | dma_unmap_len(e, maplen), | ||
2836 | PCI_DMA_TODEVICE); | ||
2837 | while (i-- > 0) { | ||
2838 | e = e->next; | ||
2839 | pci_unmap_page(hw->pdev, | ||
2840 | dma_unmap_addr(e, mapaddr), | ||
2841 | dma_unmap_len(e, maplen), | ||
2842 | PCI_DMA_TODEVICE); | ||
2843 | } | ||
2844 | |||
2845 | mapping_error: | ||
2846 | if (net_ratelimit()) | ||
2847 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2848 | dev_kfree_skb(skb); | ||
2849 | return NETDEV_TX_OK; | ||
2818 | } | 2850 | } |
2819 | 2851 | ||
2820 | 2852 | ||
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3045 | 3077 | ||
3046 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 3078 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
3047 | dma_unmap_addr(e, mapaddr), | 3079 | dma_unmap_addr(e, mapaddr), |
3048 | len, PCI_DMA_FROMDEVICE); | 3080 | dma_unmap_len(e, maplen), |
3081 | PCI_DMA_FROMDEVICE); | ||
3049 | skb_copy_from_linear_data(e->skb, skb->data, len); | 3082 | skb_copy_from_linear_data(e->skb, skb->data, len); |
3050 | pci_dma_sync_single_for_device(skge->hw->pdev, | 3083 | pci_dma_sync_single_for_device(skge->hw->pdev, |
3051 | dma_unmap_addr(e, mapaddr), | 3084 | dma_unmap_addr(e, mapaddr), |
3052 | len, PCI_DMA_FROMDEVICE); | 3085 | dma_unmap_len(e, maplen), |
3086 | PCI_DMA_FROMDEVICE); | ||
3053 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3054 | } else { | 3088 | } else { |
3055 | struct sk_buff *nskb; | 3089 | struct sk_buff *nskb; |
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3058 | if (!nskb) | 3092 | if (!nskb) |
3059 | goto resubmit; | 3093 | goto resubmit; |
3060 | 3094 | ||
3095 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | ||
3096 | dev_kfree_skb(nskb); | ||
3097 | goto resubmit; | ||
3098 | } | ||
3099 | |||
3061 | pci_unmap_single(skge->hw->pdev, | 3100 | pci_unmap_single(skge->hw->pdev, |
3062 | dma_unmap_addr(e, mapaddr), | 3101 | dma_unmap_addr(e, mapaddr), |
3063 | dma_unmap_len(e, maplen), | 3102 | dma_unmap_len(e, maplen), |
3064 | PCI_DMA_FROMDEVICE); | 3103 | PCI_DMA_FROMDEVICE); |
3065 | skb = e->skb; | 3104 | skb = e->skb; |
3066 | prefetch(skb->data); | 3105 | prefetch(skb->data); |
3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3068 | } | 3106 | } |
3069 | 3107 | ||
3070 | skb_put(skb, len); | 3108 | skb_put(skb, len); |