aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-08-28 02:26:28 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-09-14 08:09:05 -0400
commit8fd89211bf8e8e60415c66e5546c1478f5e8bc2b (patch)
tree4600b72d8fec487f2e90d45a369bce52dc306eef /drivers/net/mv643xx_eth.c
parent1fa38c586e92cce4ce06bfc08ad3134b8445170b (diff)
mv643xx_eth: switch to netif tx queue lock, get rid of private spinlock
Since our ->hard_start_xmit() method is already called under spinlock protection (the netif tx queue lock), we can simply make that lock cover the private transmit state (descriptor ring indexes et al.) as well, which avoids having to use a private lock to protect that state. Since this was the last user of the driver-private spinlock, it can be killed off. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c85
1 files changed, 55 insertions, 30 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 3db422b6666b..d653b5a19e77 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -337,6 +337,10 @@ struct tx_queue {
337 dma_addr_t tx_desc_dma; 337 dma_addr_t tx_desc_dma;
338 int tx_desc_area_size; 338 int tx_desc_area_size;
339 struct sk_buff **tx_skb; 339 struct sk_buff **tx_skb;
340
341 unsigned long tx_packets;
342 unsigned long tx_bytes;
343 unsigned long tx_dropped;
340}; 344};
341 345
342struct mv643xx_eth_private { 346struct mv643xx_eth_private {
@@ -347,8 +351,6 @@ struct mv643xx_eth_private {
347 351
348 int phy_addr; 352 int phy_addr;
349 353
350 spinlock_t lock;
351
352 struct mib_counters mib_counters; 354 struct mib_counters mib_counters;
353 struct work_struct tx_timeout_task; 355 struct work_struct tx_timeout_task;
354 struct mii_if_info mii; 356 struct mii_if_info mii;
@@ -453,10 +455,12 @@ static void txq_maybe_wake(struct tx_queue *txq)
453 struct mv643xx_eth_private *mp = txq_to_mp(txq); 455 struct mv643xx_eth_private *mp = txq_to_mp(txq);
454 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 456 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
455 457
456 spin_lock(&mp->lock); 458 if (netif_tx_queue_stopped(nq)) {
457 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 459 __netif_tx_lock(nq, smp_processor_id());
458 netif_tx_wake_queue(nq); 460 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
459 spin_unlock(&mp->lock); 461 netif_tx_wake_queue(nq);
462 __netif_tx_unlock(nq);
463 }
460} 464}
461 465
462 466
@@ -785,28 +789,24 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
785static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 789static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
786{ 790{
787 struct mv643xx_eth_private *mp = netdev_priv(dev); 791 struct mv643xx_eth_private *mp = netdev_priv(dev);
788 struct net_device_stats *stats = &dev->stats;
789 int queue; 792 int queue;
790 struct tx_queue *txq; 793 struct tx_queue *txq;
791 struct netdev_queue *nq; 794 struct netdev_queue *nq;
792 int entries_left; 795 int entries_left;
793 796
797 queue = skb_get_queue_mapping(skb);
798 txq = mp->txq + queue;
799 nq = netdev_get_tx_queue(dev, queue);
800
794 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 801 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
795 stats->tx_dropped++; 802 txq->tx_dropped++;
796 dev_printk(KERN_DEBUG, &dev->dev, 803 dev_printk(KERN_DEBUG, &dev->dev,
797 "failed to linearize skb with tiny " 804 "failed to linearize skb with tiny "
798 "unaligned fragment\n"); 805 "unaligned fragment\n");
799 return NETDEV_TX_BUSY; 806 return NETDEV_TX_BUSY;
800 } 807 }
801 808
802 queue = skb_get_queue_mapping(skb);
803 txq = mp->txq + queue;
804 nq = netdev_get_tx_queue(dev, queue);
805
806 spin_lock(&mp->lock);
807
808 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 809 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
809 spin_unlock(&mp->lock);
810 if (net_ratelimit()) 810 if (net_ratelimit())
811 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 811 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
812 kfree_skb(skb); 812 kfree_skb(skb);
@@ -814,16 +814,14 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
814 } 814 }
815 815
816 txq_submit_skb(txq, skb); 816 txq_submit_skb(txq, skb);
817 stats->tx_bytes += skb->len; 817 txq->tx_bytes += skb->len;
818 stats->tx_packets++; 818 txq->tx_packets++;
819 dev->trans_start = jiffies; 819 dev->trans_start = jiffies;
820 820
821 entries_left = txq->tx_ring_size - txq->tx_desc_count; 821 entries_left = txq->tx_ring_size - txq->tx_desc_count;
822 if (entries_left < MAX_SKB_FRAGS + 1) 822 if (entries_left < MAX_SKB_FRAGS + 1)
823 netif_tx_stop_queue(nq); 823 netif_tx_stop_queue(nq);
824 824
825 spin_unlock(&mp->lock);
826
827 return NETDEV_TX_OK; 825 return NETDEV_TX_OK;
828} 826}
829 827
@@ -832,10 +830,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
832static void txq_kick(struct tx_queue *txq) 830static void txq_kick(struct tx_queue *txq)
833{ 831{
834 struct mv643xx_eth_private *mp = txq_to_mp(txq); 832 struct mv643xx_eth_private *mp = txq_to_mp(txq);
833 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
835 u32 hw_desc_ptr; 834 u32 hw_desc_ptr;
836 u32 expected_ptr; 835 u32 expected_ptr;
837 836
838 spin_lock(&mp->lock); 837 __netif_tx_lock(nq, smp_processor_id());
839 838
840 if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index)) 839 if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index))
841 goto out; 840 goto out;
@@ -848,7 +847,7 @@ static void txq_kick(struct tx_queue *txq)
848 txq_enable(txq); 847 txq_enable(txq);
849 848
850out: 849out:
851 spin_unlock(&mp->lock); 850 __netif_tx_unlock(nq);
852 851
853 mp->work_tx_end &= ~(1 << txq->index); 852 mp->work_tx_end &= ~(1 << txq->index);
854} 853}
@@ -856,9 +855,10 @@ out:
856static int txq_reclaim(struct tx_queue *txq, int budget, int force) 855static int txq_reclaim(struct tx_queue *txq, int budget, int force)
857{ 856{
858 struct mv643xx_eth_private *mp = txq_to_mp(txq); 857 struct mv643xx_eth_private *mp = txq_to_mp(txq);
858 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
859 int reclaimed; 859 int reclaimed;
860 860
861 spin_lock(&mp->lock); 861 __netif_tx_lock(nq, smp_processor_id());
862 862
863 reclaimed = 0; 863 reclaimed = 0;
864 while (reclaimed < budget && txq->tx_desc_count > 0) { 864 while (reclaimed < budget && txq->tx_desc_count > 0) {
@@ -897,9 +897,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
897 } 897 }
898 898
899 /* 899 /*
900 * Drop mp->lock while we free the skb. 900 * Drop tx queue lock while we free the skb.
901 */ 901 */
902 spin_unlock(&mp->lock); 902 __netif_tx_unlock(nq);
903 903
904 if (cmd_sts & TX_FIRST_DESC) 904 if (cmd_sts & TX_FIRST_DESC)
905 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); 905 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
@@ -909,14 +909,14 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
909 if (skb) 909 if (skb)
910 dev_kfree_skb(skb); 910 dev_kfree_skb(skb);
911 911
912 spin_lock(&mp->lock); 912 __netif_tx_lock(nq, smp_processor_id());
913 } 913 }
914 914
915 __netif_tx_unlock(nq);
916
915 if (reclaimed < budget) 917 if (reclaimed < budget)
916 mp->work_tx &= ~(1 << txq->index); 918 mp->work_tx &= ~(1 << txq->index);
917 919
918 spin_unlock(&mp->lock);
919
920 return reclaimed; 920 return reclaimed;
921} 921}
922 922
@@ -1123,7 +1123,31 @@ static int smi_reg_write(struct mv643xx_eth_private *mp, unsigned int addr,
1123} 1123}
1124 1124
1125 1125
1126/* mib counters *************************************************************/ 1126/* statistics ***************************************************************/
1127static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1128{
1129 struct mv643xx_eth_private *mp = netdev_priv(dev);
1130 struct net_device_stats *stats = &dev->stats;
1131 unsigned long tx_packets = 0;
1132 unsigned long tx_bytes = 0;
1133 unsigned long tx_dropped = 0;
1134 int i;
1135
1136 for (i = 0; i < mp->txq_count; i++) {
1137 struct tx_queue *txq = mp->txq + i;
1138
1139 tx_packets += txq->tx_packets;
1140 tx_bytes += txq->tx_bytes;
1141 tx_dropped += txq->tx_dropped;
1142 }
1143
1144 stats->tx_packets = tx_packets;
1145 stats->tx_bytes = tx_bytes;
1146 stats->tx_dropped = tx_dropped;
1147
1148 return stats;
1149}
1150
1127static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1151static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1128{ 1152{
1129 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1153 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1355,6 +1379,7 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1355 struct mv643xx_eth_private *mp = netdev_priv(dev); 1379 struct mv643xx_eth_private *mp = netdev_priv(dev);
1356 int i; 1380 int i;
1357 1381
1382 mv643xx_eth_get_stats(dev);
1358 mib_counters_update(mp); 1383 mib_counters_update(mp);
1359 1384
1360 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1385 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
@@ -2138,6 +2163,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
2138 free_irq(dev->irq, dev); 2163 free_irq(dev->irq, dev);
2139 2164
2140 port_reset(mp); 2165 port_reset(mp);
2166 mv643xx_eth_get_stats(dev);
2141 mib_counters_update(mp); 2167 mib_counters_update(mp);
2142 2168
2143 for (i = 0; i < mp->rxq_count; i++) 2169 for (i = 0; i < mp->rxq_count; i++)
@@ -2585,8 +2611,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2585 set_params(mp, pd); 2611 set_params(mp, pd);
2586 dev->real_num_tx_queues = mp->txq_count; 2612 dev->real_num_tx_queues = mp->txq_count;
2587 2613
2588 spin_lock_init(&mp->lock);
2589
2590 mib_counters_clear(mp); 2614 mib_counters_clear(mp);
2591 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2615 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2592 2616
@@ -2612,6 +2636,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2612 BUG_ON(!res); 2636 BUG_ON(!res);
2613 dev->irq = res->start; 2637 dev->irq = res->start;
2614 2638
2639 dev->get_stats = mv643xx_eth_get_stats;
2615 dev->hard_start_xmit = mv643xx_eth_xmit; 2640 dev->hard_start_xmit = mv643xx_eth_xmit;
2616 dev->open = mv643xx_eth_open; 2641 dev->open = mv643xx_eth_open;
2617 dev->stop = mv643xx_eth_stop; 2642 dev->stop = mv643xx_eth_stop;