aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-08-28 00:26:23 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-09-05 00:33:59 -0400
commite5ef1de198c7bf2ca10de82add06536ed6165f8b (patch)
tree95d3cbb86ec768e6261e36e0228ad2abf44c55d9 /drivers/net/mv643xx_eth.c
parentbefefe2177d4bca07a64a2410333388c063a6f7d (diff)
mv643xx_eth: transmit multiqueue support
As all the infrastructure for multiple transmit queues already exists in the driver, this patch is entirely trivial. The individual transmit queues are still serialised by the driver's per-port private spinlock, but that will disappear (i.e. be replaced by the per-subqueue ->_xmit_lock) in a subsequent patch. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c40
1 files changed, 18 insertions, 22 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index e592fac87068..1ceed8798618 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -449,15 +449,10 @@ static void txq_disable(struct tx_queue *txq)
449static void __txq_maybe_wake(struct tx_queue *txq) 449static void __txq_maybe_wake(struct tx_queue *txq)
450{ 450{
451 struct mv643xx_eth_private *mp = txq_to_mp(txq); 451 struct mv643xx_eth_private *mp = txq_to_mp(txq);
452 452 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
453 /*
454 * netif_{stop,wake}_queue() flow control only applies to
455 * the primary queue.
456 */
457 BUG_ON(txq->index != 0);
458 453
459 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 454 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
460 netif_wake_queue(mp->dev); 455 netif_tx_wake_queue(nq);
461} 456}
462 457
463 458
@@ -827,8 +822,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
827{ 822{
828 struct mv643xx_eth_private *mp = netdev_priv(dev); 823 struct mv643xx_eth_private *mp = netdev_priv(dev);
829 struct net_device_stats *stats = &dev->stats; 824 struct net_device_stats *stats = &dev->stats;
825 int queue;
830 struct tx_queue *txq; 826 struct tx_queue *txq;
827 struct netdev_queue *nq;
831 unsigned long flags; 828 unsigned long flags;
829 int entries_left;
832 830
833 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 831 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
834 stats->tx_dropped++; 832 stats->tx_dropped++;
@@ -838,15 +836,16 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
838 return NETDEV_TX_BUSY; 836 return NETDEV_TX_BUSY;
839 } 837 }
840 838
841 spin_lock_irqsave(&mp->lock, flags); 839 queue = skb_get_queue_mapping(skb);
840 txq = mp->txq + queue;
841 nq = netdev_get_tx_queue(dev, queue);
842 842
843 txq = mp->txq; 843 spin_lock_irqsave(&mp->lock, flags);
844 844
845 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 845 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
846 spin_unlock_irqrestore(&mp->lock, flags); 846 spin_unlock_irqrestore(&mp->lock, flags);
847 if (txq->index == 0 && net_ratelimit()) 847 if (net_ratelimit())
848 dev_printk(KERN_ERR, &dev->dev, 848 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
849 "primary tx queue full?!\n");
850 kfree_skb(skb); 849 kfree_skb(skb);
851 return NETDEV_TX_OK; 850 return NETDEV_TX_OK;
852 } 851 }
@@ -856,13 +855,9 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
856 stats->tx_packets++; 855 stats->tx_packets++;
857 dev->trans_start = jiffies; 856 dev->trans_start = jiffies;
858 857
859 if (txq->index == 0) { 858 entries_left = txq->tx_ring_size - txq->tx_desc_count;
860 int entries_left; 859 if (entries_left < MAX_SKB_FRAGS + 1)
861 860 netif_tx_stop_queue(nq);
862 entries_left = txq->tx_ring_size - txq->tx_desc_count;
863 if (entries_left < MAX_SKB_FRAGS + 1)
864 netif_stop_queue(dev);
865 }
866 861
867 spin_unlock_irqrestore(&mp->lock, flags); 862 spin_unlock_irqrestore(&mp->lock, flags);
868 863
@@ -2169,10 +2164,10 @@ static void tx_timeout_task(struct work_struct *ugly)
2169 2164
2170 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2165 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2171 if (netif_running(mp->dev)) { 2166 if (netif_running(mp->dev)) {
2172 netif_stop_queue(mp->dev); 2167 netif_tx_stop_all_queues(mp->dev);
2173 port_reset(mp); 2168 port_reset(mp);
2174 port_start(mp); 2169 port_start(mp);
2175 netif_wake_queue(mp->dev); 2170 netif_tx_wake_all_queues(mp->dev);
2176 } 2171 }
2177} 2172}
2178 2173
@@ -2546,7 +2541,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2546 return -ENODEV; 2541 return -ENODEV;
2547 } 2542 }
2548 2543
2549 dev = alloc_etherdev(sizeof(struct mv643xx_eth_private)); 2544 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2550 if (!dev) 2545 if (!dev)
2551 return -ENOMEM; 2546 return -ENOMEM;
2552 2547
@@ -2559,6 +2554,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2559 mp->dev = dev; 2554 mp->dev = dev;
2560 2555
2561 set_params(mp, pd); 2556 set_params(mp, pd);
2557 dev->real_num_tx_queues = mp->txq_count;
2562 2558
2563 spin_lock_init(&mp->lock); 2559 spin_lock_init(&mp->lock);
2564 2560