aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/skge.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-24 18:51:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-24 18:51:40 -0500
commit701b259f446be2f3625fb852bceb93afe76e206d (patch)
tree93f15bcd00bd59c38b4e59fed9af7ddf6b06c8b3 /drivers/net/ethernet/marvell/skge.c
parentd2346963bfcbb9a8ee783ca3c3b3bdd7448ec9d5 (diff)
parentefc3dbc37412c027e363736b4f4c74ee5e8ecffc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Davem says: 1) Fix JIT code generation on x86-64 for divide by zero, from Eric Dumazet. 2) tg3 header length computation correction from Eric Dumazet. 3) More build and reference counting fixes for socket memory cgroup code from Glauber Costa. 4) module.h snuck back into a core header after all the hard work we did to remove that, from Paul Gortmaker and Jesper Dangaard Brouer. 5) Fix PHY naming regression and add some new PCI IDs in stmmac, from Alessandro Rubini. 6) Netlink message generation fix in new team driver, should only advertise the entries that changed during events, from Jiri Pirko. 7) SRIOV VF registration and unregistration fixes, and also add a missing PCI ID, from Roopa Prabhu. 8) Fix infinite loop in tx queue flush code of brcmsmac, from Stanislaw Gruszka. 9) ftgmac100/ftmac100 build fix, missing interrupt.h include. 10) Memory leak fix in net/hyperv do_set_mutlicast() handling, from Wei Yongjun. 11) Off by one fix in netem packet scheduler, from Vijay Subramanian. 12) TCP loss detection fix from Yuchung Cheng. 13) TCP reset packet MD5 calculation uses wrong address, fix from Shawn Lu. 14) skge carrier assertion and DMA mapping fixes from Stephen Hemminger. 15) Congestion recovery undo performed at the wrong spot in BIC and CUBIC congestion control modules, fix from Neal Cardwell. 16) Ethtool ETHTOOL_GSSET_INFO is unnecessarily restrictive, from Michał Mirosław. 17) Fix triggerable race in ipv6 sysctl handling, from Francesco Ruggeri. 18) Statistics bug fixes in mlx4 from Eugenia Emantayev. 19) rds locking bug fix during info dumps, from your's truly. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits) rds: Make rds_sock_lock BH rather than IRQ safe. netprio_cgroup.h: dont include module.h from other includes net: flow_dissector.c missing include linux/export.h team: send only changed options/ports via netlink net/hyperv: fix possible memory leak in do_set_multicast() drivers/net: dsa/mv88e6xxx.c files need linux/module.h stmmac: added PCI identifiers llc: Fix race condition in llc_ui_recvmsg stmmac: fix phy naming inconsistency dsa: Add reporting of silicon revision for Marvell 88E6123/88E6161/88E6165 switches. tg3: fix ipv6 header length computation skge: add byte queue limit support mv643xx_eth: Add Rx Discard and Rx Overrun statistics bnx2x: fix compilation error with SOE in fw_dump bnx2x: handle CHIP_REVISION during init_one bnx2x: allow user to change ring size in ISCSI SD mode bnx2x: fix Big-Endianess in ethtool -t bnx2x: fixed ethtool statistics for MF modes bnx2x: credit-leakage fixup on vlan_mac_del_all macvlan: fix a possible use after free ...
Diffstat (limited to 'drivers/net/ethernet/marvell/skge.c')
-rw-r--r--drivers/net/ethernet/marvell/skge.c109
1 files changed, 83 insertions, 26 deletions
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 18a87a57fc0a..edb9bda55d55 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 934static int skge_rx_setup(struct pci_dev *pdev,
935 struct sk_buff *skb, unsigned int bufsize) 935 struct skge_element *e,
936 struct sk_buff *skb, unsigned int bufsize)
936{ 937{
937 struct skge_rx_desc *rd = e->desc; 938 struct skge_rx_desc *rd = e->desc;
938 u64 map; 939 dma_addr_t map;
939 940
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 941 map = pci_map_single(pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE); 942 PCI_DMA_FROMDEVICE);
943 if (pci_dma_mapping_error(pdev, map))
944 goto mapping_error;
942 945
943 rd->dma_lo = map; 946 rd->dma_lo = lower_32_bits(map);
944 rd->dma_hi = map >> 32; 947 rd->dma_hi = upper_32_bits(map);
945 e->skb = skb; 948 e->skb = skb;
946 rd->csum1_start = ETH_HLEN; 949 rd->csum1_start = ETH_HLEN;
947 rd->csum2_start = ETH_HLEN; 950 rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map); 957 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize); 958 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
960
961mapping_error:
962 if (net_ratelimit())
963 dev_warn(&pdev->dev, "%s: rx mapping error\n",
964 skb->dev->name);
965 return -EIO;
956} 966}
957 967
958/* Resume receiving using existing skb, 968/* Resume receiving using existing skb,
@@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev)
1014 return -ENOMEM; 1024 return -ENOMEM;
1015 1025
1016 skb_reserve(skb, NET_IP_ALIGN); 1026 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1027 if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
1028 kfree_skb(skb);
1029 return -ENOMEM;
1030 }
1031
1018 } while ((e = e->next) != ring->start); 1032 } while ((e = e->next) != ring->start);
1019 1033
1020 ring->to_clean = ring->start; 1034 ring->to_clean = ring->start;
@@ -2576,6 +2590,7 @@ static int skge_up(struct net_device *dev)
2576 } 2590 }
2577 2591
2578 /* Initialize MAC */ 2592 /* Initialize MAC */
2593 netif_carrier_off(dev);
2579 spin_lock_bh(&hw->phy_lock); 2594 spin_lock_bh(&hw->phy_lock);
2580 if (is_genesis(hw)) 2595 if (is_genesis(hw))
2581 genesis_mac_init(hw, port); 2596 genesis_mac_init(hw, port);
@@ -2728,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2728 struct skge_tx_desc *td; 2743 struct skge_tx_desc *td;
2729 int i; 2744 int i;
2730 u32 control, len; 2745 u32 control, len;
2731 u64 map; 2746 dma_addr_t map;
2732 2747
2733 if (skb_padto(skb, ETH_ZLEN)) 2748 if (skb_padto(skb, ETH_ZLEN))
2734 return NETDEV_TX_OK; 2749 return NETDEV_TX_OK;
@@ -2742,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2742 e->skb = skb; 2757 e->skb = skb;
2743 len = skb_headlen(skb); 2758 len = skb_headlen(skb);
2744 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2759 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2760 if (pci_dma_mapping_error(hw->pdev, map))
2761 goto mapping_error;
2762
2745 dma_unmap_addr_set(e, mapaddr, map); 2763 dma_unmap_addr_set(e, mapaddr, map);
2746 dma_unmap_len_set(e, maplen, len); 2764 dma_unmap_len_set(e, maplen, len);
2747 2765
2748 td->dma_lo = map; 2766 td->dma_lo = lower_32_bits(map);
2749 td->dma_hi = map >> 32; 2767 td->dma_hi = upper_32_bits(map);
2750 2768
2751 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2769 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2752 const int offset = skb_checksum_start_offset(skb); 2770 const int offset = skb_checksum_start_offset(skb);
@@ -2777,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2777 2795
2778 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2796 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2779 skb_frag_size(frag), DMA_TO_DEVICE); 2797 skb_frag_size(frag), DMA_TO_DEVICE);
2798 if (dma_mapping_error(&hw->pdev->dev, map))
2799 goto mapping_unwind;
2780 2800
2781 e = e->next; 2801 e = e->next;
2782 e->skb = skb; 2802 e->skb = skb;
2783 tf = e->desc; 2803 tf = e->desc;
2784 BUG_ON(tf->control & BMU_OWN); 2804 BUG_ON(tf->control & BMU_OWN);
2785 2805
2786 tf->dma_lo = map; 2806 tf->dma_lo = lower_32_bits(map);
2787 tf->dma_hi = (u64) map >> 32; 2807 tf->dma_hi = upper_32_bits(map);
2788 dma_unmap_addr_set(e, mapaddr, map); 2808 dma_unmap_addr_set(e, mapaddr, map);
2789 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2809 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2790 2810
@@ -2797,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2797 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; 2817 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2798 wmb(); 2818 wmb();
2799 2819
2820 netdev_sent_queue(dev, skb->len);
2821
2800 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2822 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2801 2823
2802 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, 2824 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@@ -2812,15 +2834,35 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2812 } 2834 }
2813 2835
2814 return NETDEV_TX_OK; 2836 return NETDEV_TX_OK;
2837
2838mapping_unwind:
2839 /* unroll any pages that were already mapped. */
2840 if (e != skge->tx_ring.to_use) {
2841 struct skge_element *u;
2842
2843 for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
2844 pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
2845 dma_unmap_len(u, maplen),
2846 PCI_DMA_TODEVICE);
2847 e = skge->tx_ring.to_use;
2848 }
2849 /* undo the mapping for the skb header */
2850 pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
2851 dma_unmap_len(e, maplen),
2852 PCI_DMA_TODEVICE);
2853mapping_error:
2854 /* mapping error causes error message and packet to be discarded. */
2855 if (net_ratelimit())
2856 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2857 dev_kfree_skb(skb);
2858 return NETDEV_TX_OK;
2815} 2859}
2816 2860
2817 2861
2818/* Free resources associated with this reing element */ 2862/* Free resources associated with this reing element */
2819static void skge_tx_free(struct skge_port *skge, struct skge_element *e, 2863static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
2820 u32 control) 2864 u32 control)
2821{ 2865{
2822 struct pci_dev *pdev = skge->hw->pdev;
2823
2824 /* skb header vs. fragment */ 2866 /* skb header vs. fragment */
2825 if (control & BMU_STF) 2867 if (control & BMU_STF)
2826 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), 2868 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@@ -2830,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2830 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), 2872 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
2831 dma_unmap_len(e, maplen), 2873 dma_unmap_len(e, maplen),
2832 PCI_DMA_TODEVICE); 2874 PCI_DMA_TODEVICE);
2833
2834 if (control & BMU_EOF) {
2835 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
2836 "tx done slot %td\n", e - skge->tx_ring.start);
2837
2838 dev_kfree_skb(e->skb);
2839 }
2840} 2875}
2841 2876
2842/* Free all buffers in transmit ring */ 2877/* Free all buffers in transmit ring */
@@ -2847,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)
2847 2882
2848 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2883 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2849 struct skge_tx_desc *td = e->desc; 2884 struct skge_tx_desc *td = e->desc;
2850 skge_tx_free(skge, e, td->control); 2885
2886 skge_tx_unmap(skge->hw->pdev, e, td->control);
2887
2888 if (td->control & BMU_EOF)
2889 dev_kfree_skb(e->skb);
2851 td->control = 0; 2890 td->control = 0;
2852 } 2891 }
2853 2892
2893 netdev_reset_queue(dev);
2854 skge->tx_ring.to_clean = e; 2894 skge->tx_ring.to_clean = e;
2855} 2895}
2856 2896
@@ -3059,13 +3099,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3059 if (!nskb) 3099 if (!nskb)
3060 goto resubmit; 3100 goto resubmit;
3061 3101
3102 if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
3103 dev_kfree_skb(nskb);
3104 goto resubmit;
3105 }
3106
3062 pci_unmap_single(skge->hw->pdev, 3107 pci_unmap_single(skge->hw->pdev,
3063 dma_unmap_addr(e, mapaddr), 3108 dma_unmap_addr(e, mapaddr),
3064 dma_unmap_len(e, maplen), 3109 dma_unmap_len(e, maplen),
3065 PCI_DMA_FROMDEVICE); 3110 PCI_DMA_FROMDEVICE);
3066 skb = e->skb; 3111 skb = e->skb;
3067 prefetch(skb->data); 3112 prefetch(skb->data);
3068 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3069 } 3113 }
3070 3114
3071 skb_put(skb, len); 3115 skb_put(skb, len);
@@ -3111,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
3111 struct skge_port *skge = netdev_priv(dev); 3155 struct skge_port *skge = netdev_priv(dev);
3112 struct skge_ring *ring = &skge->tx_ring; 3156 struct skge_ring *ring = &skge->tx_ring;
3113 struct skge_element *e; 3157 struct skge_element *e;
3158 unsigned int bytes_compl = 0, pkts_compl = 0;
3114 3159
3115 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3160 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3116 3161
@@ -3120,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
3120 if (control & BMU_OWN) 3165 if (control & BMU_OWN)
3121 break; 3166 break;
3122 3167
3123 skge_tx_free(skge, e, control); 3168 skge_tx_unmap(skge->hw->pdev, e, control);
3169
3170 if (control & BMU_EOF) {
3171 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
3172 "tx done slot %td\n",
3173 e - skge->tx_ring.start);
3174
3175 pkts_compl++;
3176 bytes_compl += e->skb->len;
3177
3178 dev_kfree_skb(e->skb);
3179 }
3124 } 3180 }
3181 netdev_completed_queue(dev, pkts_compl, bytes_compl);
3125 skge->tx_ring.to_clean = e; 3182 skge->tx_ring.to_clean = e;
3126 3183
3127 /* Can run lockless until we need to synchronize to restart queue. */ 3184 /* Can run lockless until we need to synchronize to restart queue. */