diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-09-12 22:16:15 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@marvell.com> | 2008-09-14 08:09:06 -0400 |
commit | a418950c1378c4a3722baecdbe98df78ea23f231 (patch) | |
tree | 6c92a9e59c85c82b306c55bdd0a5b7fc3ce89516 | |
parent | 8fd89211bf8e8e60415c66e5546c1478f5e8bc2b (diff) |
mv643xx_eth: avoid dropping tx lock during transmit reclaim
By moving DMA unmapping during transmit reclaim back under the netif
tx lock, we avoid the situation where we read the DMA address and buffer
length from the descriptor under the lock and then not do anything with
that data after dropping the lock on platforms where the DMA unmapping
routines are all NOPs (which is the case on all ARM platforms that
mv643xx_eth is used on at least).
This saves two uncached reads, which makes a small but measurable
performance difference in routing benchmarks.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
-rw-r--r-- | drivers/net/mv643xx_eth.c | 22 |
1 files changed, 7 insertions, 15 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index d653b5a19e77..7410eca87823 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -866,8 +866,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
866 | struct tx_desc *desc; | 866 | struct tx_desc *desc; |
867 | u32 cmd_sts; | 867 | u32 cmd_sts; |
868 | struct sk_buff *skb; | 868 | struct sk_buff *skb; |
869 | dma_addr_t addr; | ||
870 | int count; | ||
871 | 869 | ||
872 | tx_index = txq->tx_used_desc; | 870 | tx_index = txq->tx_used_desc; |
873 | desc = &txq->tx_desc_area[tx_index]; | 871 | desc = &txq->tx_desc_area[tx_index]; |
@@ -886,8 +884,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
886 | reclaimed++; | 884 | reclaimed++; |
887 | txq->tx_desc_count--; | 885 | txq->tx_desc_count--; |
888 | 886 | ||
889 | addr = desc->buf_ptr; | ||
890 | count = desc->byte_cnt; | ||
891 | skb = txq->tx_skb[tx_index]; | 887 | skb = txq->tx_skb[tx_index]; |
892 | txq->tx_skb[tx_index] = NULL; | 888 | txq->tx_skb[tx_index] = NULL; |
893 | 889 | ||
@@ -896,20 +892,16 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
896 | mp->dev->stats.tx_errors++; | 892 | mp->dev->stats.tx_errors++; |
897 | } | 893 | } |
898 | 894 | ||
899 | /* | 895 | if (cmd_sts & TX_FIRST_DESC) { |
900 | * Drop tx queue lock while we free the skb. | 896 | dma_unmap_single(NULL, desc->buf_ptr, |
901 | */ | 897 | desc->byte_cnt, DMA_TO_DEVICE); |
902 | __netif_tx_unlock(nq); | 898 | } else { |
903 | 899 | dma_unmap_page(NULL, desc->buf_ptr, | |
904 | if (cmd_sts & TX_FIRST_DESC) | 900 | desc->byte_cnt, DMA_TO_DEVICE); |
905 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); | 901 | } |
906 | else | ||
907 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); | ||
908 | 902 | ||
909 | if (skb) | 903 | if (skb) |
910 | dev_kfree_skb(skb); | 904 | dev_kfree_skb(skb); |
911 | |||
912 | __netif_tx_lock(nq, smp_processor_id()); | ||
913 | } | 905 | } |
914 | 906 | ||
915 | __netif_tx_unlock(nq); | 907 | __netif_tx_unlock(nq); |