aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/cpmac.c20
-rw-r--r--drivers/net/igb/igb_main.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c10
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/s2io.c48
-rw-r--r--include/linux/netdevice.h82
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/netpoll.c24
-rw-r--r--net/core/pktgen.c69
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_teql.c6
12 files changed, 187 insertions, 143 deletions
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 7c7b54e4828e..fbd4280c102c 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -544,7 +544,7 @@ fatal_error:
544 544
545 spin_unlock(&priv->rx_lock); 545 spin_unlock(&priv->rx_lock);
546 netif_rx_complete(priv->dev, napi); 546 netif_rx_complete(priv->dev, napi);
547 netif_stop_queue(priv->dev); 547 netif_tx_stop_all_queues(priv->dev);
548 napi_disable(&priv->napi); 548 napi_disable(&priv->napi);
549 549
550 atomic_inc(&priv->reset_pending); 550 atomic_inc(&priv->reset_pending);
@@ -750,9 +750,7 @@ static void cpmac_hw_error(struct work_struct *work)
750 barrier(); 750 barrier();
751 atomic_dec(&priv->reset_pending); 751 atomic_dec(&priv->reset_pending);
752 752
753 for (i = 0; i < CPMAC_QUEUES; i++) 753 netif_tx_wake_all_queues(priv->dev);
754 netif_wake_subqueue(priv->dev, i);
755 netif_wake_queue(priv->dev);
756 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 754 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
757} 755}
758 756
@@ -781,7 +779,7 @@ static void cpmac_check_status(struct net_device *dev)
781 dev->name, tx_code, tx_channel, macstatus); 779 dev->name, tx_code, tx_channel, macstatus);
782 } 780 }
783 781
784 netif_stop_queue(dev); 782 netif_tx_stop_all_queues(dev);
785 cpmac_hw_stop(dev); 783 cpmac_hw_stop(dev);
786 if (schedule_work(&priv->reset_work)) 784 if (schedule_work(&priv->reset_work))
787 atomic_inc(&priv->reset_pending); 785 atomic_inc(&priv->reset_pending);
@@ -842,9 +840,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
842 barrier(); 840 barrier();
843 atomic_dec(&priv->reset_pending); 841 atomic_dec(&priv->reset_pending);
844 842
845 netif_wake_queue(priv->dev); 843 netif_tx_wake_all_queues(priv->dev);
846 for (i = 0; i < CPMAC_QUEUES; i++)
847 netif_wake_subqueue(dev, i);
848} 844}
849 845
850static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 846static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -935,7 +931,7 @@ static void cpmac_adjust_link(struct net_device *dev)
935 931
936 spin_lock(&priv->lock); 932 spin_lock(&priv->lock);
937 if (priv->phy->link) { 933 if (priv->phy->link) {
938 netif_start_queue(dev); 934 netif_tx_start_all_queues(dev);
939 if (priv->phy->duplex != priv->oldduplex) { 935 if (priv->phy->duplex != priv->oldduplex) {
940 new_state = 1; 936 new_state = 1;
941 priv->oldduplex = priv->phy->duplex; 937 priv->oldduplex = priv->phy->duplex;
@@ -949,10 +945,10 @@ static void cpmac_adjust_link(struct net_device *dev)
949 if (!priv->oldlink) { 945 if (!priv->oldlink) {
950 new_state = 1; 946 new_state = 1;
951 priv->oldlink = 1; 947 priv->oldlink = 1;
952 netif_schedule(dev); 948 netif_tx_schedule_all(dev);
953 } 949 }
954 } else if (priv->oldlink) { 950 } else if (priv->oldlink) {
955 netif_stop_queue(dev); 951 netif_tx_stop_all_queues(dev);
956 new_state = 1; 952 new_state = 1;
957 priv->oldlink = 0; 953 priv->oldlink = 0;
958 priv->oldspeed = 0; 954 priv->oldspeed = 0;
@@ -1072,7 +1068,7 @@ static int cpmac_stop(struct net_device *dev)
1072 struct cpmac_priv *priv = netdev_priv(dev); 1068 struct cpmac_priv *priv = netdev_priv(dev);
1073 struct resource *mem; 1069 struct resource *mem;
1074 1070
1075 netif_stop_queue(dev); 1071 netif_tx_stop_all_queues(dev);
1076 1072
1077 cancel_work_sync(&priv->reset_work); 1073 cancel_work_sync(&priv->reset_work);
1078 napi_disable(&priv->napi); 1074 napi_disable(&priv->napi);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 471c194cd54e..81bba6983dde 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -533,7 +533,7 @@ msi_only:
533 adapter->flags |= IGB_FLAG_HAS_MSI; 533 adapter->flags |= IGB_FLAG_HAS_MSI;
534 534
535 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 535 /* Notify the stack of the (possibly) reduced Tx Queue count. */
536 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; 536 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
537 return; 537 return;
538} 538}
539 539
@@ -821,9 +821,7 @@ void igb_down(struct igb_adapter *adapter)
821 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 821 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
822 /* flush and sleep below */ 822 /* flush and sleep below */
823 823
824 netif_stop_queue(netdev); 824 netif_tx_stop_all_queues(netdev);
825 for (i = 0; i < adapter->num_tx_queues; i++)
826 netif_stop_subqueue(netdev, i);
827 825
828 /* disable transmits in the hardware */ 826 /* disable transmits in the hardware */
829 tctl = rd32(E1000_TCTL); 827 tctl = rd32(E1000_TCTL);
@@ -1266,9 +1264,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1266 1264
1267 /* tell the stack to leave us alone until igb_open() is called */ 1265 /* tell the stack to leave us alone until igb_open() is called */
1268 netif_carrier_off(netdev); 1266 netif_carrier_off(netdev);
1269 netif_stop_queue(netdev); 1267 netif_tx_stop_all_queues(netdev);
1270 for (i = 0; i < adapter->num_tx_queues; i++)
1271 netif_stop_subqueue(netdev, i);
1272 1268
1273 strcpy(netdev->name, "eth%d"); 1269 strcpy(netdev->name, "eth%d");
1274 err = register_netdev(netdev); 1270 err = register_netdev(netdev);
@@ -2315,7 +2311,6 @@ static void igb_watchdog_task(struct work_struct *work)
2315 struct e1000_mac_info *mac = &adapter->hw.mac; 2311 struct e1000_mac_info *mac = &adapter->hw.mac;
2316 u32 link; 2312 u32 link;
2317 s32 ret_val; 2313 s32 ret_val;
2318 int i;
2319 2314
2320 if ((netif_carrier_ok(netdev)) && 2315 if ((netif_carrier_ok(netdev)) &&
2321 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2316 (rd32(E1000_STATUS) & E1000_STATUS_LU))
@@ -2371,9 +2366,7 @@ static void igb_watchdog_task(struct work_struct *work)
2371 } 2366 }
2372 2367
2373 netif_carrier_on(netdev); 2368 netif_carrier_on(netdev);
2374 netif_wake_queue(netdev); 2369 netif_tx_wake_all_queues(netdev);
2375 for (i = 0; i < adapter->num_tx_queues; i++)
2376 netif_wake_subqueue(netdev, i);
2377 2370
2378 if (!test_bit(__IGB_DOWN, &adapter->state)) 2371 if (!test_bit(__IGB_DOWN, &adapter->state))
2379 mod_timer(&adapter->phy_info_timer, 2372 mod_timer(&adapter->phy_info_timer,
@@ -2385,9 +2378,7 @@ static void igb_watchdog_task(struct work_struct *work)
2385 adapter->link_duplex = 0; 2378 adapter->link_duplex = 0;
2386 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2379 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2387 netif_carrier_off(netdev); 2380 netif_carrier_off(netdev);
2388 netif_stop_queue(netdev); 2381 netif_tx_stop_all_queues(netdev);
2389 for (i = 0; i < adapter->num_tx_queues; i++)
2390 netif_stop_subqueue(netdev, i);
2391 if (!test_bit(__IGB_DOWN, &adapter->state)) 2382 if (!test_bit(__IGB_DOWN, &adapter->state))
2392 mod_timer(&adapter->phy_info_timer, 2383 mod_timer(&adapter->phy_info_timer,
2393 round_jiffies(jiffies + 2 * HZ)); 2384 round_jiffies(jiffies + 2 * HZ));
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 81b769093d22..3efe5dda10af 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -252,16 +252,10 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data)
252 netdev->features |= NETIF_F_TSO; 252 netdev->features |= NETIF_F_TSO;
253 netdev->features |= NETIF_F_TSO6; 253 netdev->features |= NETIF_F_TSO6;
254 } else { 254 } else {
255 struct ixgbe_adapter *adapter = netdev_priv(netdev); 255 netif_tx_stop_all_queues(netdev);
256 int i;
257 netif_stop_queue(netdev);
258 for (i = 0; i < adapter->num_tx_queues; i++)
259 netif_stop_subqueue(netdev, i);
260 netdev->features &= ~NETIF_F_TSO; 256 netdev->features &= ~NETIF_F_TSO;
261 netdev->features &= ~NETIF_F_TSO6; 257 netdev->features &= ~NETIF_F_TSO6;
262 for (i = 0; i < adapter->num_tx_queues; i++) 258 netif_tx_start_all_queues(netdev);
263 netif_start_subqueue(netdev, i);
264 netif_start_queue(netdev);
265 } 259 }
266 return 0; 260 return 0;
267} 261}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e6df9233f5ef..6af8fb5c4b5f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2013,7 +2013,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
2013 del_timer_sync(&adapter->watchdog_timer); 2013 del_timer_sync(&adapter->watchdog_timer);
2014 2014
2015 netif_carrier_off(netdev); 2015 netif_carrier_off(netdev);
2016 netif_stop_queue(netdev); 2016 netif_tx_stop_all_queues(netdev);
2017 2017
2018 if (!pci_channel_offline(adapter->pdev)) 2018 if (!pci_channel_offline(adapter->pdev))
2019 ixgbe_reset(adapter); 2019 ixgbe_reset(adapter);
@@ -2359,7 +2359,7 @@ try_msi:
2359 2359
2360out: 2360out:
2361 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 2361 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2362 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; 2362 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2363 2363
2364 return err; 2364 return err;
2365} 2365}
@@ -2896,7 +2896,6 @@ static void ixgbe_watchdog(unsigned long data)
2896 struct net_device *netdev = adapter->netdev; 2896 struct net_device *netdev = adapter->netdev;
2897 bool link_up; 2897 bool link_up;
2898 u32 link_speed = 0; 2898 u32 link_speed = 0;
2899 int i;
2900 2899
2901 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 2900 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
2902 2901
@@ -2917,9 +2916,7 @@ static void ixgbe_watchdog(unsigned long data)
2917 (FLOW_TX ? "TX" : "None")))); 2916 (FLOW_TX ? "TX" : "None"))));
2918 2917
2919 netif_carrier_on(netdev); 2918 netif_carrier_on(netdev);
2920 netif_wake_queue(netdev); 2919 netif_tx_wake_all_queues(netdev);
2921 for (i = 0; i < adapter->num_tx_queues; i++)
2922 netif_wake_subqueue(netdev, i);
2923 } else { 2920 } else {
2924 /* Force detection of hung controller */ 2921 /* Force detection of hung controller */
2925 adapter->detect_tx_hung = true; 2922 adapter->detect_tx_hung = true;
@@ -2928,7 +2925,7 @@ static void ixgbe_watchdog(unsigned long data)
2928 if (netif_carrier_ok(netdev)) { 2925 if (netif_carrier_ok(netdev)) {
2929 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2926 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2930 netif_carrier_off(netdev); 2927 netif_carrier_off(netdev);
2931 netif_stop_queue(netdev); 2928 netif_tx_stop_all_queues(netdev);
2932 } 2929 }
2933 } 2930 }
2934 2931
@@ -3631,9 +3628,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3631 ixgbe_start_hw(hw); 3628 ixgbe_start_hw(hw);
3632 3629
3633 netif_carrier_off(netdev); 3630 netif_carrier_off(netdev);
3634 netif_stop_queue(netdev); 3631 netif_tx_stop_all_queues(netdev);
3635 for (i = 0; i < adapter->num_tx_queues; i++)
3636 netif_stop_subqueue(netdev, i);
3637 3632
3638 ixgbe_napi_add_all(adapter); 3633 ixgbe_napi_add_all(adapter);
3639 3634
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 5f0fcb04afff..9dae40ccf048 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -545,63 +545,53 @@ static struct pci_driver s2io_driver = {
545/* netqueue manipulation helper functions */ 545/* netqueue manipulation helper functions */
546static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) 546static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547{ 547{
548 int i; 548 if (!sp->config.multiq) {
549 if (sp->config.multiq) { 549 int i;
550 for (i = 0; i < sp->config.tx_fifo_num; i++) 550
551 netif_stop_subqueue(sp->dev, i);
552 } else {
553 for (i = 0; i < sp->config.tx_fifo_num; i++) 551 for (i = 0; i < sp->config.tx_fifo_num; i++)
554 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; 552 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555 netif_stop_queue(sp->dev);
556 } 553 }
554 netif_tx_stop_all_queues(sp->dev);
557} 555}
558 556
559static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) 557static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
560{ 558{
561 if (sp->config.multiq) 559 if (!sp->config.multiq)
562 netif_stop_subqueue(sp->dev, fifo_no);
563 else {
564 sp->mac_control.fifos[fifo_no].queue_state = 560 sp->mac_control.fifos[fifo_no].queue_state =
565 FIFO_QUEUE_STOP; 561 FIFO_QUEUE_STOP;
566 netif_stop_queue(sp->dev); 562
567 } 563 netif_tx_stop_all_queues(sp->dev);
568} 564}
569 565
570static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) 566static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
571{ 567{
572 int i; 568 if (!sp->config.multiq) {
573 if (sp->config.multiq) { 569 int i;
574 for (i = 0; i < sp->config.tx_fifo_num; i++) 570
575 netif_start_subqueue(sp->dev, i);
576 } else {
577 for (i = 0; i < sp->config.tx_fifo_num; i++) 571 for (i = 0; i < sp->config.tx_fifo_num; i++)
578 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; 572 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579 netif_start_queue(sp->dev);
580 } 573 }
574 netif_tx_start_all_queues(sp->dev);
581} 575}
582 576
583static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) 577static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
584{ 578{
585 if (sp->config.multiq) 579 if (!sp->config.multiq)
586 netif_start_subqueue(sp->dev, fifo_no);
587 else {
588 sp->mac_control.fifos[fifo_no].queue_state = 580 sp->mac_control.fifos[fifo_no].queue_state =
589 FIFO_QUEUE_START; 581 FIFO_QUEUE_START;
590 netif_start_queue(sp->dev); 582
591 } 583 netif_tx_start_all_queues(sp->dev);
592} 584}
593 585
594static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) 586static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
595{ 587{
596 int i; 588 if (!sp->config.multiq) {
597 if (sp->config.multiq) { 589 int i;
598 for (i = 0; i < sp->config.tx_fifo_num; i++) 590
599 netif_wake_subqueue(sp->dev, i);
600 } else {
601 for (i = 0; i < sp->config.tx_fifo_num; i++) 591 for (i = 0; i < sp->config.tx_fifo_num; i++)
602 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; 592 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_wake_queue(sp->dev);
604 } 593 }
594 netif_tx_wake_all_queues(sp->dev);
605} 595}
606 596
607static inline void s2io_wake_tx_queue( 597static inline void s2io_wake_tx_queue(
@@ -8691,5 +8681,5 @@ static void s2io_io_resume(struct pci_dev *pdev)
8691 } 8681 }
8692 8682
8693 netif_device_attach(netdev); 8683 netif_device_attach(netdev);
8694 netif_wake_queue(netdev); 8684 netif_tx_wake_all_queues(netdev);
8695} 8685}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c02227b9dd7b..b5c1e7df64fc 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -642,7 +642,13 @@ struct net_device
642 struct netdev_queue rx_queue; 642 struct netdev_queue rx_queue;
643 643
644 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 644 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
645
646 /* Number of TX queues allocated at alloc_netdev_mq() time */
645 unsigned int num_tx_queues; 647 unsigned int num_tx_queues;
648
649 /* Number of TX queues currently active in device */
650 unsigned int real_num_tx_queues;
651
646 unsigned long tx_queue_len; /* Max frames per queue allowed */ 652 unsigned long tx_queue_len; /* Max frames per queue allowed */
647 653
648/* 654/*
@@ -1000,6 +1006,14 @@ static inline void netif_schedule(struct net_device *dev)
1000 netif_schedule_queue(netdev_get_tx_queue(dev, 0)); 1006 netif_schedule_queue(netdev_get_tx_queue(dev, 0));
1001} 1007}
1002 1008
1009static inline void netif_tx_schedule_all(struct net_device *dev)
1010{
1011 unsigned int i;
1012
1013 for (i = 0; i < dev->num_tx_queues; i++)
1014 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1015}
1016
1003/** 1017/**
1004 * netif_start_queue - allow transmit 1018 * netif_start_queue - allow transmit
1005 * @dev: network device 1019 * @dev: network device
@@ -1016,6 +1030,16 @@ static inline void netif_start_queue(struct net_device *dev)
1016 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 1030 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1017} 1031}
1018 1032
1033static inline void netif_tx_start_all_queues(struct net_device *dev)
1034{
1035 unsigned int i;
1036
1037 for (i = 0; i < dev->num_tx_queues; i++) {
1038 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1039 netif_tx_start_queue(txq);
1040 }
1041}
1042
1019/** 1043/**
1020 * netif_wake_queue - restart transmit 1044 * netif_wake_queue - restart transmit
1021 * @dev: network device 1045 * @dev: network device
@@ -1040,6 +1064,16 @@ static inline void netif_wake_queue(struct net_device *dev)
1040 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 1064 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1041} 1065}
1042 1066
1067static inline void netif_tx_wake_all_queues(struct net_device *dev)
1068{
1069 unsigned int i;
1070
1071 for (i = 0; i < dev->num_tx_queues; i++) {
1072 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1073 netif_tx_wake_queue(txq);
1074 }
1075}
1076
1043/** 1077/**
1044 * netif_stop_queue - stop transmitted packets 1078 * netif_stop_queue - stop transmitted packets
1045 * @dev: network device 1079 * @dev: network device
@@ -1057,6 +1091,16 @@ static inline void netif_stop_queue(struct net_device *dev)
1057 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 1091 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1058} 1092}
1059 1093
1094static inline void netif_tx_stop_all_queues(struct net_device *dev)
1095{
1096 unsigned int i;
1097
1098 for (i = 0; i < dev->num_tx_queues; i++) {
1099 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1100 netif_tx_stop_queue(txq);
1101 }
1102}
1103
1060/** 1104/**
1061 * netif_queue_stopped - test if transmit queue is flowblocked 1105 * netif_queue_stopped - test if transmit queue is flowblocked
1062 * @dev: network device 1106 * @dev: network device
@@ -1100,7 +1144,8 @@ static inline int netif_running(const struct net_device *dev)
1100 */ 1144 */
1101static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 1145static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1102{ 1146{
1103 clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); 1147 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1148 clear_bit(__QUEUE_STATE_XOFF, &txq->state);
1104} 1149}
1105 1150
1106/** 1151/**
@@ -1112,11 +1157,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1112 */ 1157 */
1113static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 1158static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1114{ 1159{
1160 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1115#ifdef CONFIG_NETPOLL_TRAP 1161#ifdef CONFIG_NETPOLL_TRAP
1116 if (netpoll_trap()) 1162 if (netpoll_trap())
1117 return; 1163 return;
1118#endif 1164#endif
1119 set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state); 1165 set_bit(__QUEUE_STATE_XOFF, &txq->state);
1120} 1166}
1121 1167
1122/** 1168/**
@@ -1129,8 +1175,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1129static inline int __netif_subqueue_stopped(const struct net_device *dev, 1175static inline int __netif_subqueue_stopped(const struct net_device *dev,
1130 u16 queue_index) 1176 u16 queue_index)
1131{ 1177{
1132 return test_bit(__QUEUE_STATE_XOFF, 1178 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1133 &dev->egress_subqueue[queue_index].state); 1179 return test_bit(__QUEUE_STATE_XOFF, &txq->state);
1134} 1180}
1135 1181
1136static inline int netif_subqueue_stopped(const struct net_device *dev, 1182static inline int netif_subqueue_stopped(const struct net_device *dev,
@@ -1148,13 +1194,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
1148 */ 1194 */
1149static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 1195static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1150{ 1196{
1197 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1151#ifdef CONFIG_NETPOLL_TRAP 1198#ifdef CONFIG_NETPOLL_TRAP
1152 if (netpoll_trap()) 1199 if (netpoll_trap())
1153 return; 1200 return;
1154#endif 1201#endif
1155 if (test_and_clear_bit(__QUEUE_STATE_XOFF, 1202 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1156 &dev->egress_subqueue[queue_index].state)) 1203 __netif_schedule(txq);
1157 __netif_schedule(netdev_get_tx_queue(dev, 0));
1158} 1204}
1159 1205
1160/** 1206/**
@@ -1198,7 +1244,8 @@ extern int dev_set_mtu(struct net_device *, int);
1198extern int dev_set_mac_address(struct net_device *, 1244extern int dev_set_mac_address(struct net_device *,
1199 struct sockaddr *); 1245 struct sockaddr *);
1200extern int dev_hard_start_xmit(struct sk_buff *skb, 1246extern int dev_hard_start_xmit(struct sk_buff *skb,
1201 struct net_device *dev); 1247 struct net_device *dev,
1248 struct netdev_queue *txq);
1202 1249
1203extern int netdev_budget; 1250extern int netdev_budget;
1204 1251
@@ -1447,6 +1494,12 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1447 txq->xmit_lock_owner = cpu; 1494 txq->xmit_lock_owner = cpu;
1448} 1495}
1449 1496
1497static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1498{
1499 spin_lock_bh(&txq->_xmit_lock);
1500 txq->xmit_lock_owner = smp_processor_id();
1501}
1502
1450static inline void netif_tx_lock(struct net_device *dev) 1503static inline void netif_tx_lock(struct net_device *dev)
1451{ 1504{
1452 int cpu = smp_processor_id(); 1505 int cpu = smp_processor_id();
@@ -1483,6 +1536,12 @@ static inline void __netif_tx_unlock(struct netdev_queue *txq)
1483 spin_unlock(&txq->_xmit_lock); 1536 spin_unlock(&txq->_xmit_lock);
1484} 1537}
1485 1538
1539static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1540{
1541 txq->xmit_lock_owner = -1;
1542 spin_unlock_bh(&txq->_xmit_lock);
1543}
1544
1486static inline void netif_tx_unlock(struct net_device *dev) 1545static inline void netif_tx_unlock(struct net_device *dev)
1487{ 1546{
1488 unsigned int i; 1547 unsigned int i;
@@ -1514,8 +1573,13 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
1514 1573
1515static inline void netif_tx_disable(struct net_device *dev) 1574static inline void netif_tx_disable(struct net_device *dev)
1516{ 1575{
1576 unsigned int i;
1577
1517 netif_tx_lock_bh(dev); 1578 netif_tx_lock_bh(dev);
1518 netif_stop_queue(dev); 1579 for (i = 0; i < dev->num_tx_queues; i++) {
1580 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1581 netif_tx_stop_queue(txq);
1582 }
1519 netif_tx_unlock_bh(dev); 1583 netif_tx_unlock_bh(dev);
1520} 1584}
1521 1585
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d58c1a5eb845..cb9527815606 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -88,9 +88,7 @@ extern void __qdisc_run(struct netdev_queue *txq);
88 88
89static inline void qdisc_run(struct netdev_queue *txq) 89static inline void qdisc_run(struct netdev_queue *txq)
90{ 90{
91 struct net_device *dev = txq->dev; 91 if (!netif_tx_queue_stopped(txq) &&
92
93 if (!netif_queue_stopped(dev) &&
94 !test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state)) 92 !test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state))
95 __qdisc_run(txq); 93 __qdisc_run(txq);
96} 94}
diff --git a/net/core/dev.c b/net/core/dev.c
index 69378f250695..f027a1ac4fbb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1598,7 +1598,8 @@ static int dev_gso_segment(struct sk_buff *skb)
1598 return 0; 1598 return 0;
1599} 1599}
1600 1600
1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1602 struct netdev_queue *txq)
1602{ 1603{
1603 if (likely(!skb->next)) { 1604 if (likely(!skb->next)) {
1604 if (!list_empty(&ptype_all)) 1605 if (!list_empty(&ptype_all))
@@ -1627,9 +1628,7 @@ gso:
1627 skb->next = nskb; 1628 skb->next = nskb;
1628 return rc; 1629 return rc;
1629 } 1630 }
1630 if (unlikely((netif_queue_stopped(dev) || 1631 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1631 netif_subqueue_stopped(dev, skb)) &&
1632 skb->next))
1633 return NETDEV_TX_BUSY; 1632 return NETDEV_TX_BUSY;
1634 } while (skb->next); 1633 } while (skb->next);
1635 1634
@@ -1669,7 +1668,10 @@ out_kfree_skb:
1669static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1668static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb) 1669 struct sk_buff *skb)
1671{ 1670{
1672 return netdev_get_tx_queue(dev, 0); 1671 u16 queue_index = 0;
1672
1673 skb_set_queue_mapping(skb, queue_index);
1674 return netdev_get_tx_queue(dev, queue_index);
1673} 1675}
1674 1676
1675int dev_queue_xmit(struct sk_buff *skb) 1677int dev_queue_xmit(struct sk_buff *skb)
@@ -1737,8 +1739,6 @@ gso:
1737 spin_lock(&txq->lock); 1739 spin_lock(&txq->lock);
1738 q = txq->qdisc; 1740 q = txq->qdisc;
1739 if (q->enqueue) { 1741 if (q->enqueue) {
1740 /* reset queue_mapping to zero */
1741 skb_set_queue_mapping(skb, 0);
1742 rc = q->enqueue(skb, q); 1742 rc = q->enqueue(skb, q);
1743 qdisc_run(txq); 1743 qdisc_run(txq);
1744 spin_unlock(&txq->lock); 1744 spin_unlock(&txq->lock);
@@ -1768,10 +1768,9 @@ gso:
1768 1768
1769 HARD_TX_LOCK(dev, txq, cpu); 1769 HARD_TX_LOCK(dev, txq, cpu);
1770 1770
1771 if (!netif_queue_stopped(dev) && 1771 if (!netif_tx_queue_stopped(txq)) {
1772 !netif_subqueue_stopped(dev, skb)) {
1773 rc = 0; 1772 rc = 0;
1774 if (!dev_hard_start_xmit(skb, dev)) { 1773 if (!dev_hard_start_xmit(skb, dev, txq)) {
1775 HARD_TX_UNLOCK(dev, txq); 1774 HARD_TX_UNLOCK(dev, txq);
1776 goto out; 1775 goto out;
1777 } 1776 }
@@ -4160,8 +4159,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4160 4159
4161 BUG_ON(strlen(name) >= sizeof(dev->name)); 4160 BUG_ON(strlen(name) >= sizeof(dev->name));
4162 4161
4163 alloc_size = sizeof(struct net_device) + 4162 alloc_size = sizeof(struct net_device);
4164 sizeof(struct net_device_subqueue) * (queue_count - 1);
4165 if (sizeof_priv) { 4163 if (sizeof_priv) {
4166 /* ensure 32-byte alignment of private area */ 4164 /* ensure 32-byte alignment of private area */
4167 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 4165 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
@@ -4191,16 +4189,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4191 4189
4192 dev->_tx = tx; 4190 dev->_tx = tx;
4193 dev->num_tx_queues = queue_count; 4191 dev->num_tx_queues = queue_count;
4192 dev->real_num_tx_queues = queue_count;
4194 4193
4195 if (sizeof_priv) { 4194 if (sizeof_priv) {
4196 dev->priv = ((char *)dev + 4195 dev->priv = ((char *)dev +
4197 ((sizeof(struct net_device) + 4196 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4198 (sizeof(struct net_device_subqueue) *
4199 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4200 & ~NETDEV_ALIGN_CONST)); 4197 & ~NETDEV_ALIGN_CONST));
4201 } 4198 }
4202 4199
4203 dev->egress_subqueue_count = queue_count;
4204 dev->gso_max_size = GSO_MAX_SIZE; 4200 dev->gso_max_size = GSO_MAX_SIZE;
4205 4201
4206 netdev_init_queues(dev); 4202 netdev_init_queues(dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8fb134da0346..c12720895ecf 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,25 +58,27 @@ static void queue_process(struct work_struct *work)
58 58
59 while ((skb = skb_dequeue(&npinfo->txq))) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
60 struct net_device *dev = skb->dev; 60 struct net_device *dev = skb->dev;
61 struct netdev_queue *txq;
61 62
62 if (!netif_device_present(dev) || !netif_running(dev)) { 63 if (!netif_device_present(dev) || !netif_running(dev)) {
63 __kfree_skb(skb); 64 __kfree_skb(skb);
64 continue; 65 continue;
65 } 66 }
66 67
68 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
69
67 local_irq_save(flags); 70 local_irq_save(flags);
68 netif_tx_lock(dev); 71 __netif_tx_lock(txq, smp_processor_id());
69 if ((netif_queue_stopped(dev) || 72 if (netif_tx_queue_stopped(txq) ||
70 netif_subqueue_stopped(dev, skb)) || 73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
72 skb_queue_head(&npinfo->txq, skb); 74 skb_queue_head(&npinfo->txq, skb);
73 netif_tx_unlock(dev); 75 __netif_tx_unlock(txq);
74 local_irq_restore(flags); 76 local_irq_restore(flags);
75 77
76 schedule_delayed_work(&npinfo->tx_work, HZ/10); 78 schedule_delayed_work(&npinfo->tx_work, HZ/10);
77 return; 79 return;
78 } 80 }
79 netif_tx_unlock(dev); 81 __netif_tx_unlock(txq);
80 local_irq_restore(flags); 82 local_irq_restore(flags);
81 } 83 }
82} 84}
@@ -278,17 +280,19 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
278 280
279 /* don't get messages out of order, and no recursion */ 281 /* don't get messages out of order, and no recursion */
280 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 282 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
283 struct netdev_queue *txq;
281 unsigned long flags; 284 unsigned long flags;
282 285
286 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
287
283 local_irq_save(flags); 288 local_irq_save(flags);
284 /* try until next clock tick */ 289 /* try until next clock tick */
285 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 290 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
286 tries > 0; --tries) { 291 tries > 0; --tries) {
287 if (netif_tx_trylock(dev)) { 292 if (__netif_tx_trylock(txq)) {
288 if (!netif_queue_stopped(dev) && 293 if (!netif_tx_queue_stopped(txq))
289 !netif_subqueue_stopped(dev, skb))
290 status = dev->hard_start_xmit(skb, dev); 294 status = dev->hard_start_xmit(skb, dev);
291 netif_tx_unlock(dev); 295 __netif_tx_unlock(txq);
292 296
293 if (status == NETDEV_TX_OK) 297 if (status == NETDEV_TX_OK)
294 break; 298 break;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fdf537707e51..906802db4ed4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2123,6 +2123,24 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2123 } 2123 }
2124} 2124}
2125#endif 2125#endif
2126static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2127{
2128 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
2129 __u16 t;
2130 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2131 t = random32() %
2132 (pkt_dev->queue_map_max -
2133 pkt_dev->queue_map_min + 1)
2134 + pkt_dev->queue_map_min;
2135 } else {
2136 t = pkt_dev->cur_queue_map + 1;
2137 if (t > pkt_dev->queue_map_max)
2138 t = pkt_dev->queue_map_min;
2139 }
2140 pkt_dev->cur_queue_map = t;
2141 }
2142}
2143
2126/* Increment/randomize headers according to flags and current values 2144/* Increment/randomize headers according to flags and current values
2127 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2145 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2128 */ 2146 */
@@ -2325,19 +2343,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2325 pkt_dev->cur_pkt_size = t; 2343 pkt_dev->cur_pkt_size = t;
2326 } 2344 }
2327 2345
2328 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2346 set_cur_queue_map(pkt_dev);
2329 __u16 t;
2330 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2331 t = random32() %
2332 (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1)
2333 + pkt_dev->queue_map_min;
2334 } else {
2335 t = pkt_dev->cur_queue_map + 1;
2336 if (t > pkt_dev->queue_map_max)
2337 t = pkt_dev->queue_map_min;
2338 }
2339 pkt_dev->cur_queue_map = t;
2340 }
2341 2347
2342 pkt_dev->flows[flow].count++; 2348 pkt_dev->flows[flow].count++;
2343} 2349}
@@ -2458,7 +2464,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2458 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2464 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2459 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2465 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2460 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2466 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2461 2467 u16 queue_map;
2462 2468
2463 if (pkt_dev->nr_labels) 2469 if (pkt_dev->nr_labels)
2464 protocol = htons(ETH_P_MPLS_UC); 2470 protocol = htons(ETH_P_MPLS_UC);
@@ -2469,6 +2475,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2469 /* Update any of the values, used when we're incrementing various 2475 /* Update any of the values, used when we're incrementing various
2470 * fields. 2476 * fields.
2471 */ 2477 */
2478 queue_map = pkt_dev->cur_queue_map;
2472 mod_cur_headers(pkt_dev); 2479 mod_cur_headers(pkt_dev);
2473 2480
2474 datalen = (odev->hard_header_len + 16) & ~0xf; 2481 datalen = (odev->hard_header_len + 16) & ~0xf;
@@ -2507,7 +2514,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2507 skb->network_header = skb->tail; 2514 skb->network_header = skb->tail;
2508 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2515 skb->transport_header = skb->network_header + sizeof(struct iphdr);
2509 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2516 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
2510 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2517 skb_set_queue_mapping(skb, queue_map);
2511 iph = ip_hdr(skb); 2518 iph = ip_hdr(skb);
2512 udph = udp_hdr(skb); 2519 udph = udp_hdr(skb);
2513 2520
@@ -2797,6 +2804,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2797 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2804 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */
2798 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2805 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */
2799 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2806 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */
2807 u16 queue_map;
2800 2808
2801 if (pkt_dev->nr_labels) 2809 if (pkt_dev->nr_labels)
2802 protocol = htons(ETH_P_MPLS_UC); 2810 protocol = htons(ETH_P_MPLS_UC);
@@ -2807,6 +2815,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2807 /* Update any of the values, used when we're incrementing various 2815 /* Update any of the values, used when we're incrementing various
2808 * fields. 2816 * fields.
2809 */ 2817 */
2818 queue_map = pkt_dev->cur_queue_map;
2810 mod_cur_headers(pkt_dev); 2819 mod_cur_headers(pkt_dev);
2811 2820
2812 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2821 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
@@ -2844,7 +2853,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2844 skb->network_header = skb->tail; 2853 skb->network_header = skb->tail;
2845 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2854 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2846 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2855 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
2847 skb_set_queue_mapping(skb, pkt_dev->cur_queue_map); 2856 skb_set_queue_mapping(skb, queue_map);
2848 iph = ipv6_hdr(skb); 2857 iph = ipv6_hdr(skb);
2849 udph = udp_hdr(skb); 2858 udph = udp_hdr(skb);
2850 2859
@@ -3263,7 +3272,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3263static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3272static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3264{ 3273{
3265 struct net_device *odev = NULL; 3274 struct net_device *odev = NULL;
3275 struct netdev_queue *txq;
3266 __u64 idle_start = 0; 3276 __u64 idle_start = 0;
3277 u16 queue_map;
3267 int ret; 3278 int ret;
3268 3279
3269 odev = pkt_dev->odev; 3280 odev = pkt_dev->odev;
@@ -3285,9 +3296,15 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3285 } 3296 }
3286 } 3297 }
3287 3298
3288 if ((netif_queue_stopped(odev) || 3299 if (!pkt_dev->skb) {
3289 (pkt_dev->skb && 3300 set_cur_queue_map(pkt_dev);
3290 netif_subqueue_stopped(odev, pkt_dev->skb))) || 3301 queue_map = pkt_dev->cur_queue_map;
3302 } else {
3303 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3304 }
3305
3306 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) ||
3291 need_resched()) { 3308 need_resched()) {
3292 idle_start = getCurUs(); 3309 idle_start = getCurUs();
3293 3310
@@ -3303,8 +3320,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3303 3320
3304 pkt_dev->idle_acc += getCurUs() - idle_start; 3321 pkt_dev->idle_acc += getCurUs() - idle_start;
3305 3322
3306 if (netif_queue_stopped(odev) || 3323 if (netif_tx_queue_stopped(txq)) {
3307 netif_subqueue_stopped(odev, pkt_dev->skb)) {
3308 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3309 pkt_dev->next_tx_ns = 0; 3325 pkt_dev->next_tx_ns = 0;
3310 goto out; /* Try the next interface */ 3326 goto out; /* Try the next interface */
@@ -3331,9 +3347,12 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3331 } 3347 }
3332 } 3348 }
3333 3349
3334 netif_tx_lock_bh(odev); 3350 /* fill_packet() might have changed the queue */
3335 if (!netif_queue_stopped(odev) && 3351 queue_map = skb_get_queue_mapping(pkt_dev->skb);
3336 !netif_subqueue_stopped(odev, pkt_dev->skb)) { 3352 txq = netdev_get_tx_queue(odev, queue_map);
3353
3354 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) {
3337 3356
3338 atomic_inc(&(pkt_dev->skb->users)); 3357 atomic_inc(&(pkt_dev->skb->users));
3339 retry_now: 3358 retry_now:
@@ -3377,7 +3396,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3377 pkt_dev->next_tx_ns = 0; 3396 pkt_dev->next_tx_ns = 0;
3378 } 3397 }
3379 3398
3380 netif_tx_unlock_bh(odev); 3399 __netif_tx_unlock_bh(txq);
3381 3400
3382 /* If pkt_dev->count is zero, then run forever */ 3401 /* If pkt_dev->count is zero, then run forever */
3383 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3402 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4e2b865cbba0..2f575b9017d1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -166,7 +166,7 @@ static inline int qdisc_restart(struct netdev_queue *txq)
166 166
167 HARD_TX_LOCK(dev, txq, smp_processor_id()); 167 HARD_TX_LOCK(dev, txq, smp_processor_id());
168 if (!netif_subqueue_stopped(dev, skb)) 168 if (!netif_subqueue_stopped(dev, skb))
169 ret = dev_hard_start_xmit(skb, dev); 169 ret = dev_hard_start_xmit(skb, dev, txq);
170 HARD_TX_UNLOCK(dev, txq); 170 HARD_TX_UNLOCK(dev, txq);
171 171
172 spin_lock(&txq->lock); 172 spin_lock(&txq->lock);
@@ -198,11 +198,10 @@ static inline int qdisc_restart(struct netdev_queue *txq)
198 198
199void __qdisc_run(struct netdev_queue *txq) 199void __qdisc_run(struct netdev_queue *txq)
200{ 200{
201 struct net_device *dev = txq->dev;
202 unsigned long start_time = jiffies; 201 unsigned long start_time = jiffies;
203 202
204 while (qdisc_restart(txq)) { 203 while (qdisc_restart(txq)) {
205 if (netif_queue_stopped(dev)) 204 if (netif_tx_queue_stopped(txq))
206 break; 205 break;
207 206
208 /* 207 /*
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 44a2c3451f4d..ade3372221c7 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -295,8 +295,7 @@ restart:
295 slave_txq = netdev_get_tx_queue(slave, 0); 295 slave_txq = netdev_get_tx_queue(slave, 0);
296 if (slave_txq->qdisc_sleeping != q) 296 if (slave_txq->qdisc_sleeping != q)
297 continue; 297 continue;
298 if (netif_queue_stopped(slave) || 298 if (__netif_subqueue_stopped(slave, subq) ||
299 __netif_subqueue_stopped(slave, subq) ||
300 !netif_running(slave)) { 299 !netif_running(slave)) {
301 busy = 1; 300 busy = 1;
302 continue; 301 continue;
@@ -305,8 +304,7 @@ restart:
305 switch (teql_resolve(skb, skb_res, slave)) { 304 switch (teql_resolve(skb, skb_res, slave)) {
306 case 0: 305 case 0:
307 if (netif_tx_trylock(slave)) { 306 if (netif_tx_trylock(slave)) {
308 if (!netif_queue_stopped(slave) && 307 if (!__netif_subqueue_stopped(slave, subq) &&
309 !__netif_subqueue_stopped(slave, subq) &&
310 slave->hard_start_xmit(skb, slave) == 0) { 308 slave->hard_start_xmit(skb, slave) == 0) {
311 netif_tx_unlock(slave); 309 netif_tx_unlock(slave);
312 master->slaves = NEXT_SLAVE(q); 310 master->slaves = NEXT_SLAVE(q);