aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a261766bc052..a6f42fc85738 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -847,8 +847,7 @@ static void skge_link_up(struct skge_port *skge)
847 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 847 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
848 848
849 netif_carrier_on(skge->netdev); 849 netif_carrier_on(skge->netdev);
850 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 850 netif_wake_queue(skge->netdev);
851 netif_wake_queue(skge->netdev);
852 851
853 if (netif_msg_link(skge)) 852 if (netif_msg_link(skge))
854 printk(KERN_INFO PFX 853 printk(KERN_INFO PFX
@@ -2190,8 +2189,6 @@ static int skge_up(struct net_device *dev)
2190 if (err) 2189 if (err)
2191 goto free_rx_ring; 2190 goto free_rx_ring;
2192 2191
2193 skge->tx_avail = skge->tx_ring.count - 1;
2194
2195 /* Initialize MAC */ 2192 /* Initialize MAC */
2196 spin_lock_bh(&hw->phy_lock); 2193 spin_lock_bh(&hw->phy_lock);
2197 if (hw->chip_id == CHIP_ID_GENESIS) 2194 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2294,6 +2291,12 @@ static int skge_down(struct net_device *dev)
2294 return 0; 2291 return 0;
2295} 2292}
2296 2293
2294static inline int skge_avail(const struct skge_ring *ring)
2295{
2296 return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2297 + (ring->to_clean - ring->to_use) - 1;
2298}
2299
2297static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) 2300static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2298{ 2301{
2299 struct skge_port *skge = netdev_priv(dev); 2302 struct skge_port *skge = netdev_priv(dev);
@@ -2314,7 +2317,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2314 return NETDEV_TX_LOCKED; 2317 return NETDEV_TX_LOCKED;
2315 } 2318 }
2316 2319
2317 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2320 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2318 if (!netif_queue_stopped(dev)) { 2321 if (!netif_queue_stopped(dev)) {
2319 netif_stop_queue(dev); 2322 netif_stop_queue(dev);
2320 2323
@@ -2390,8 +2393,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2390 dev->name, e - ring->start, skb->len); 2393 dev->name, e - ring->start, skb->len);
2391 2394
2392 ring->to_use = e->next; 2395 ring->to_use = e->next;
2393 skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1; 2396 if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
2394 if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
2395 pr_debug("%s: transmit queue full\n", dev->name); 2397 pr_debug("%s: transmit queue full\n", dev->name);
2396 netif_stop_queue(dev); 2398 netif_stop_queue(dev);
2397 } 2399 }
@@ -2416,14 +2418,12 @@ static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
2416 e->skb = NULL; 2418 e->skb = NULL;
2417 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), 2419 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
2418 skb_headlen(skb), PCI_DMA_TODEVICE); 2420 skb_headlen(skb), PCI_DMA_TODEVICE);
2419 ++skge->tx_avail;
2420 2421
2421 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2422 e = e->next; 2423 e = e->next;
2423 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), 2424 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
2424 skb_shinfo(skb)->frags[i].size, 2425 skb_shinfo(skb)->frags[i].size,
2425 PCI_DMA_TODEVICE); 2426 PCI_DMA_TODEVICE);
2426 ++skge->tx_avail;
2427 } 2427 }
2428 2428
2429 dev_kfree_skb(skb); 2429 dev_kfree_skb(skb);
@@ -2688,7 +2688,7 @@ static void skge_tx_done(struct skge_port *skge)
2688 2688
2689 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 2689 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2690 2690
2691 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 2691 if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
2692 netif_wake_queue(skge->netdev); 2692 netif_wake_queue(skge->netdev);
2693 2693
2694 spin_unlock(&skge->tx_lock); 2694 spin_unlock(&skge->tx_lock);