diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-03-23 14:07:27 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-03-23 17:13:54 -0500 |
commit | 866b4f3e94a7568a1cb0018c061e19e120de6922 (patch) | |
tree | ce9f82edb063145a46d5bc84d7e38d153272bc46 | |
parent | 4c180fc424550217344db6fe8960732dbd7feb0c (diff) |
[PATCH] skge: dont free skb until multi-part transmit complete
Don't free transmit buffers until the whole set of transmit descriptors
has been marked as done. Otherwise, we risk freeing a skb before the
whole transmit is done.
This changes the transmit completion handling from incremental to a
two pass algorithm. First pass scans and records the start of the last
done descriptor, second cleans up until that point.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/net/skge.c | 73 |
1 files changed, 38 insertions, 35 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index e15cbefcb6e3..a261766bc052 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2404,35 +2404,39 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2404 | return NETDEV_TX_OK; | 2404 | return NETDEV_TX_OK; |
2405 | } | 2405 | } |
2406 | 2406 | ||
2407 | static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) | 2407 | static void skge_tx_complete(struct skge_port *skge, struct skge_element *last) |
2408 | { | 2408 | { |
2409 | /* This ring element can be skb or fragment */ | 2409 | struct pci_dev *pdev = skge->hw->pdev; |
2410 | if (e->skb) { | 2410 | struct skge_element *e; |
2411 | pci_unmap_single(hw->pdev, | 2411 | |
2412 | pci_unmap_addr(e, mapaddr), | 2412 | for (e = skge->tx_ring.to_clean; e != last; e = e->next) { |
2413 | pci_unmap_len(e, maplen), | 2413 | struct sk_buff *skb = e->skb; |
2414 | PCI_DMA_TODEVICE); | 2414 | int i; |
2415 | dev_kfree_skb(e->skb); | 2415 | |
2416 | e->skb = NULL; | 2416 | e->skb = NULL; |
2417 | } else { | 2417 | pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), |
2418 | pci_unmap_page(hw->pdev, | 2418 | skb_headlen(skb), PCI_DMA_TODEVICE); |
2419 | pci_unmap_addr(e, mapaddr), | 2419 | ++skge->tx_avail; |
2420 | pci_unmap_len(e, maplen), | 2420 | |
2421 | PCI_DMA_TODEVICE); | 2421 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2422 | e = e->next; | ||
2423 | pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), | ||
2424 | skb_shinfo(skb)->frags[i].size, | ||
2425 | PCI_DMA_TODEVICE); | ||
2426 | ++skge->tx_avail; | ||
2427 | } | ||
2428 | |||
2429 | dev_kfree_skb(skb); | ||
2422 | } | 2430 | } |
2431 | skge->tx_ring.to_clean = e; | ||
2423 | } | 2432 | } |
2424 | 2433 | ||
2425 | static void skge_tx_clean(struct skge_port *skge) | 2434 | static void skge_tx_clean(struct skge_port *skge) |
2426 | { | 2435 | { |
2427 | struct skge_ring *ring = &skge->tx_ring; | ||
2428 | struct skge_element *e; | ||
2429 | 2436 | ||
2430 | spin_lock_bh(&skge->tx_lock); | 2437 | spin_lock_bh(&skge->tx_lock); |
2431 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | 2438 | skge_tx_complete(skge, skge->tx_ring.to_use); |
2432 | ++skge->tx_avail; | 2439 | netif_wake_queue(skge->netdev); |
2433 | skge_tx_free(skge->hw, e); | ||
2434 | } | ||
2435 | ring->to_clean = e; | ||
2436 | spin_unlock_bh(&skge->tx_lock); | 2440 | spin_unlock_bh(&skge->tx_lock); |
2437 | } | 2441 | } |
2438 | 2442 | ||
@@ -2662,27 +2666,26 @@ resubmit: | |||
2662 | static void skge_tx_done(struct skge_port *skge) | 2666 | static void skge_tx_done(struct skge_port *skge) |
2663 | { | 2667 | { |
2664 | struct skge_ring *ring = &skge->tx_ring; | 2668 | struct skge_ring *ring = &skge->tx_ring; |
2665 | struct skge_element *e; | 2669 | struct skge_element *e, *last; |
2666 | 2670 | ||
2667 | spin_lock(&skge->tx_lock); | 2671 | spin_lock(&skge->tx_lock); |
2668 | for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { | 2672 | last = ring->to_clean; |
2673 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | ||
2669 | struct skge_tx_desc *td = e->desc; | 2674 | struct skge_tx_desc *td = e->desc; |
2670 | u32 control; | ||
2671 | 2675 | ||
2672 | rmb(); | 2676 | if (td->control & BMU_OWN) |
2673 | control = td->control; | ||
2674 | if (control & BMU_OWN) | ||
2675 | break; | 2677 | break; |
2676 | 2678 | ||
2677 | if (unlikely(netif_msg_tx_done(skge))) | 2679 | if (td->control & BMU_EOF) { |
2678 | printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n", | 2680 | last = e->next; |
2679 | skge->netdev->name, e - ring->start, td->status); | 2681 | if (unlikely(netif_msg_tx_done(skge))) |
2680 | 2682 | printk(KERN_DEBUG PFX "%s: tx done slot %td\n", | |
2681 | skge_tx_free(skge->hw, e); | 2683 | skge->netdev->name, e - ring->start); |
2682 | e->skb = NULL; | 2684 | } |
2683 | ++skge->tx_avail; | ||
2684 | } | 2685 | } |
2685 | ring->to_clean = e; | 2686 | |
2687 | skge_tx_complete(skge, last); | ||
2688 | |||
2686 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | 2689 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); |
2687 | 2690 | ||
2688 | if (skge->tx_avail > MAX_SKB_FRAGS + 1) | 2691 | if (skge->tx_avail > MAX_SKB_FRAGS + 1) |