aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@linux-foundation.org>2007-03-16 17:01:30 -0400
committerJeff Garzik <jeff@garzik.org>2007-04-28 11:01:00 -0400
commit992c9623b148ba939e9cdba0d668eedd3af1a5d2 (patch)
tree4593ab3e61d2f58023d4af4397a0b23fefe65abc
parent7e0038a414c38085dfbf352f21006bcca4cd308b (diff)
skge: transmit locking improvements
Don't need to lock when processing transmit complete unless queue fills. Modeled after tg3. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/skge.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f1a0e6c0fbdd..bc531fdfc8e2 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2621,6 +2621,7 @@ static int skge_down(struct net_device *dev)
2621 2621
2622static inline int skge_avail(const struct skge_ring *ring) 2622static inline int skge_avail(const struct skge_ring *ring)
2623{ 2623{
2624 smp_mb();
2624 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) 2625 return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2625 + (ring->to_clean - ring->to_use) - 1; 2626 + (ring->to_clean - ring->to_use) - 1;
2626} 2627}
@@ -2709,6 +2710,8 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2709 dev->name, e - skge->tx_ring.start, skb->len); 2710 dev->name, e - skge->tx_ring.start, skb->len);
2710 2711
2711 skge->tx_ring.to_use = e->next; 2712 skge->tx_ring.to_use = e->next;
2713 smp_wmb();
2714
2712 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { 2715 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2713 pr_debug("%s: transmit queue full\n", dev->name); 2716 pr_debug("%s: transmit queue full\n", dev->name);
2714 netif_stop_queue(dev); 2717 netif_stop_queue(dev);
@@ -2726,8 +2729,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2726{ 2729{
2727 struct pci_dev *pdev = skge->hw->pdev; 2730 struct pci_dev *pdev = skge->hw->pdev;
2728 2731
2729 BUG_ON(!e->skb);
2730
2731 /* skb header vs. fragment */ 2732 /* skb header vs. fragment */
2732 if (control & BMU_STF) 2733 if (control & BMU_STF)
2733 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), 2734 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
@@ -2745,7 +2746,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2745 2746
2746 dev_kfree_skb(e->skb); 2747 dev_kfree_skb(e->skb);
2747 } 2748 }
2748 e->skb = NULL;
2749} 2749}
2750 2750
2751/* Free all buffers in transmit ring */ 2751/* Free all buffers in transmit ring */
@@ -3017,21 +3017,29 @@ static void skge_tx_done(struct net_device *dev)
3017 3017
3018 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3018 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3019 3019
3020 netif_tx_lock(dev);
3021 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 3020 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
3022 struct skge_tx_desc *td = e->desc; 3021 u32 control = ((const struct skge_tx_desc *) e->desc)->control;
3023 3022
3024 if (td->control & BMU_OWN) 3023 if (control & BMU_OWN)
3025 break; 3024 break;
3026 3025
3027 skge_tx_free(skge, e, td->control); 3026 skge_tx_free(skge, e, control);
3028 } 3027 }
3029 skge->tx_ring.to_clean = e; 3028 skge->tx_ring.to_clean = e;
3030 3029
3031 if (skge_avail(&skge->tx_ring) > TX_LOW_WATER) 3030 /* Can run lockless until we need to synchronize to restart queue. */
3032 netif_wake_queue(dev); 3031 smp_mb();
3032
3033 if (unlikely(netif_queue_stopped(dev) &&
3034 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
3035 netif_tx_lock(dev);
3036 if (unlikely(netif_queue_stopped(dev) &&
3037 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
3038 netif_wake_queue(dev);
3033 3039
3034 netif_tx_unlock(dev); 3040 }
3041 netif_tx_unlock(dev);
3042 }
3035} 3043}
3036 3044
3037static int skge_poll(struct net_device *dev, int *budget) 3045static int skge_poll(struct net_device *dev, int *budget)