diff options
author | stephen hemminger <shemminger@vyatta.com> | 2012-01-22 04:40:40 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-01-23 14:25:00 -0500 |
commit | da057fb7d272c7e7609465a54bcac8ec8072ead5 (patch) | |
tree | ebc800cce7c665a383bc28c33205b0f7eca1c64d /drivers/net/ethernet | |
parent | 302476c99863fe6d08eed6145e37322892ab7f55 (diff) |
skge: add byte queue limit support
This also changes the cleanup logic slightly to aggregate
completed notifications for multiple packets.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 299c33bd5345..edb9bda55d55 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -2817,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2817 | td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; | 2817 | td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; |
2818 | wmb(); | 2818 | wmb(); |
2819 | 2819 | ||
2820 | netdev_sent_queue(dev, skb->len); | ||
2821 | |||
2820 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); | 2822 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); |
2821 | 2823 | ||
2822 | netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, | 2824 | netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, |
@@ -2858,11 +2860,9 @@ mapping_error: | |||
2858 | 2860 | ||
2859 | 2861 | ||
2860 | /* Free resources associated with this reing element */ | 2862 | /* Free resources associated with this reing element */ |
2861 | static void skge_tx_free(struct skge_port *skge, struct skge_element *e, | 2863 | static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, |
2862 | u32 control) | 2864 | u32 control) |
2863 | { | 2865 | { |
2864 | struct pci_dev *pdev = skge->hw->pdev; | ||
2865 | |||
2866 | /* skb header vs. fragment */ | 2866 | /* skb header vs. fragment */ |
2867 | if (control & BMU_STF) | 2867 | if (control & BMU_STF) |
2868 | pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), | 2868 | pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), |
@@ -2872,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e, | |||
2872 | pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), | 2872 | pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), |
2873 | dma_unmap_len(e, maplen), | 2873 | dma_unmap_len(e, maplen), |
2874 | PCI_DMA_TODEVICE); | 2874 | PCI_DMA_TODEVICE); |
2875 | |||
2876 | if (control & BMU_EOF) { | ||
2877 | netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, | ||
2878 | "tx done slot %td\n", e - skge->tx_ring.start); | ||
2879 | |||
2880 | dev_kfree_skb(e->skb); | ||
2881 | } | ||
2882 | } | 2875 | } |
2883 | 2876 | ||
2884 | /* Free all buffers in transmit ring */ | 2877 | /* Free all buffers in transmit ring */ |
@@ -2889,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev) | |||
2889 | 2882 | ||
2890 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { | 2883 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { |
2891 | struct skge_tx_desc *td = e->desc; | 2884 | struct skge_tx_desc *td = e->desc; |
2892 | skge_tx_free(skge, e, td->control); | 2885 | |
2886 | skge_tx_unmap(skge->hw->pdev, e, td->control); | ||
2887 | |||
2888 | if (td->control & BMU_EOF) | ||
2889 | dev_kfree_skb(e->skb); | ||
2893 | td->control = 0; | 2890 | td->control = 0; |
2894 | } | 2891 | } |
2895 | 2892 | ||
2893 | netdev_reset_queue(dev); | ||
2896 | skge->tx_ring.to_clean = e; | 2894 | skge->tx_ring.to_clean = e; |
2897 | } | 2895 | } |
2898 | 2896 | ||
@@ -3157,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev) | |||
3157 | struct skge_port *skge = netdev_priv(dev); | 3155 | struct skge_port *skge = netdev_priv(dev); |
3158 | struct skge_ring *ring = &skge->tx_ring; | 3156 | struct skge_ring *ring = &skge->tx_ring; |
3159 | struct skge_element *e; | 3157 | struct skge_element *e; |
3158 | unsigned int bytes_compl = 0, pkts_compl = 0; | ||
3160 | 3159 | ||
3161 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | 3160 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); |
3162 | 3161 | ||
@@ -3166,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev) | |||
3166 | if (control & BMU_OWN) | 3165 | if (control & BMU_OWN) |
3167 | break; | 3166 | break; |
3168 | 3167 | ||
3169 | skge_tx_free(skge, e, control); | 3168 | skge_tx_unmap(skge->hw->pdev, e, control); |
3169 | |||
3170 | if (control & BMU_EOF) { | ||
3171 | netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, | ||
3172 | "tx done slot %td\n", | ||
3173 | e - skge->tx_ring.start); | ||
3174 | |||
3175 | pkts_compl++; | ||
3176 | bytes_compl += e->skb->len; | ||
3177 | |||
3178 | dev_kfree_skb(e->skb); | ||
3179 | } | ||
3170 | } | 3180 | } |
3181 | netdev_completed_queue(dev, pkts_compl, bytes_compl); | ||
3171 | skge->tx_ring.to_clean = e; | 3182 | skge->tx_ring.to_clean = e; |
3172 | 3183 | ||
3173 | /* Can run lockless until we need to synchronize to restart queue. */ | 3184 | /* Can run lockless until we need to synchronize to restart queue. */ |