aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/netdevice.h80
-rw-r--r--include/linux/skbuff.h25
-rw-r--r--net/core/dev.c36
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/ethernet/eth.c9
-rw-r--r--net/sched/sch_teql.c6
10 files changed, 158 insertions, 30 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c251cca295c1..d4e39ff1545b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -25,6 +25,14 @@ menuconfig NETDEVICES
25# that for each of the symbols. 25# that for each of the symbols.
26if NETDEVICES 26if NETDEVICES
27 27
28config NETDEVICES_MULTIQUEUE
29 bool "Netdevice multiple hardware queue support"
30 ---help---
31 Say Y here if you want to allow the network stack to use multiple
32 hardware TX queues on an ethernet device.
33
34 Most people will say N here.
35
28config IFB 36config IFB
29 tristate "Intermediate Functional Block support" 37 tristate "Intermediate Functional Block support"
30 depends on NET_CLS_ACT 38 depends on NET_CLS_ACT
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f48eb89efd0f..6cdb97365e47 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -39,7 +39,8 @@ extern void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev
39extern int eth_header_cache(struct neighbour *neigh, 39extern int eth_header_cache(struct neighbour *neigh,
40 struct hh_cache *hh); 40 struct hh_cache *hh);
41 41
42extern struct net_device *alloc_etherdev(int sizeof_priv); 42extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
43#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
43 44
44/** 45/**
45 * is_zero_ether_addr - Determine if give Ethernet address is all zeros. 46 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2c0cc19edfb2..9817821729c4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -108,6 +108,14 @@ struct wireless_dev;
108#define MAX_HEADER (LL_MAX_HEADER + 48) 108#define MAX_HEADER (LL_MAX_HEADER + 48)
109#endif 109#endif
110 110
111struct net_device_subqueue
112{
113 /* Give a control state for each queue. This struct may contain
114 * per-queue locks in the future.
115 */
116 unsigned long state;
117};
118
111/* 119/*
112 * Network device statistics. Akin to the 2.0 ether stats but 120 * Network device statistics. Akin to the 2.0 ether stats but
113 * with byte counters. 121 * with byte counters.
@@ -331,6 +339,7 @@ struct net_device
331#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 339#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
332#define NETIF_F_GSO 2048 /* Enable software GSO. */ 340#define NETIF_F_GSO 2048 /* Enable software GSO. */
333#define NETIF_F_LLTX 4096 /* LockLess TX */ 341#define NETIF_F_LLTX 4096 /* LockLess TX */
342#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
334 343
335 /* Segmentation offload features */ 344 /* Segmentation offload features */
336#define NETIF_F_GSO_SHIFT 16 345#define NETIF_F_GSO_SHIFT 16
@@ -557,6 +566,10 @@ struct net_device
557 566
558 /* rtnetlink link ops */ 567 /* rtnetlink link ops */
559 const struct rtnl_link_ops *rtnl_link_ops; 568 const struct rtnl_link_ops *rtnl_link_ops;
569
570 /* The TX queue control structures */
571 unsigned int egress_subqueue_count;
572 struct net_device_subqueue egress_subqueue[0];
560}; 573};
561#define to_net_dev(d) container_of(d, struct net_device, dev) 574#define to_net_dev(d) container_of(d, struct net_device, dev)
562 575
@@ -565,9 +578,7 @@ struct net_device
565 578
566static inline void *netdev_priv(const struct net_device *dev) 579static inline void *netdev_priv(const struct net_device *dev)
567{ 580{
568 return (char *)dev + ((sizeof(struct net_device) 581 return dev->priv;
569 + NETDEV_ALIGN_CONST)
570 & ~NETDEV_ALIGN_CONST);
571} 582}
572 583
573#define SET_MODULE_OWNER(dev) do { } while (0) 584#define SET_MODULE_OWNER(dev) do { } while (0)
@@ -719,6 +730,62 @@ static inline int netif_running(const struct net_device *dev)
719 return test_bit(__LINK_STATE_START, &dev->state); 730 return test_bit(__LINK_STATE_START, &dev->state);
720} 731}
721 732
733/*
734 * Routines to manage the subqueues on a device. We only need start
735 * stop, and a check if it's stopped. All other device management is
736 * done at the overall netdevice level.
737 * Also test the device if we're multiqueue.
738 */
739static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
740{
741#ifdef CONFIG_NETDEVICES_MULTIQUEUE
742 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
743#endif
744}
745
746static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
747{
748#ifdef CONFIG_NETDEVICES_MULTIQUEUE
749#ifdef CONFIG_NETPOLL_TRAP
750 if (netpoll_trap())
751 return;
752#endif
753 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
754#endif
755}
756
757static inline int netif_subqueue_stopped(const struct net_device *dev,
758 u16 queue_index)
759{
760#ifdef CONFIG_NETDEVICES_MULTIQUEUE
761 return test_bit(__LINK_STATE_XOFF,
762 &dev->egress_subqueue[queue_index].state);
763#else
764 return 0;
765#endif
766}
767
768static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
769{
770#ifdef CONFIG_NETDEVICES_MULTIQUEUE
771#ifdef CONFIG_NETPOLL_TRAP
772 if (netpoll_trap())
773 return;
774#endif
775 if (test_and_clear_bit(__LINK_STATE_XOFF,
776 &dev->egress_subqueue[queue_index].state))
777 __netif_schedule(dev);
778#endif
779}
780
781static inline int netif_is_multiqueue(const struct net_device *dev)
782{
783#ifdef CONFIG_NETDEVICES_MULTIQUEUE
784 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
785#else
786 return 0;
787#endif
788}
722 789
723/* Use this variant when it is known for sure that it 790/* Use this variant when it is known for sure that it
724 * is executing from interrupt context. 791 * is executing from interrupt context.
@@ -1009,8 +1076,11 @@ static inline void netif_tx_disable(struct net_device *dev)
1009extern void ether_setup(struct net_device *dev); 1076extern void ether_setup(struct net_device *dev);
1010 1077
1011/* Support for loadable net-drivers */ 1078/* Support for loadable net-drivers */
1012extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, 1079extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1013 void (*setup)(struct net_device *)); 1080 void (*setup)(struct net_device *),
1081 unsigned int queue_count);
1082#define alloc_netdev(sizeof_priv, name, setup) \
1083 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1014extern int register_netdev(struct net_device *dev); 1084extern int register_netdev(struct net_device *dev);
1015extern void unregister_netdev(struct net_device *dev); 1085extern void unregister_netdev(struct net_device *dev);
1016/* Functions used for secondary unicast and multicast support */ 1086/* Functions used for secondary unicast and multicast support */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 881fe80f01d0..2d6a14f5f2f1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -196,7 +196,6 @@ typedef unsigned char *sk_buff_data_t;
196 * @sk: Socket we are owned by 196 * @sk: Socket we are owned by
197 * @tstamp: Time we arrived 197 * @tstamp: Time we arrived
198 * @dev: Device we arrived on/are leaving by 198 * @dev: Device we arrived on/are leaving by
199 * @iif: ifindex of device we arrived on
200 * @transport_header: Transport layer header 199 * @transport_header: Transport layer header
201 * @network_header: Network layer header 200 * @network_header: Network layer header
202 * @mac_header: Link layer header 201 * @mac_header: Link layer header
@@ -231,6 +230,8 @@ typedef unsigned char *sk_buff_data_t;
231 * @nfctinfo: Relationship of this skb to the connection 230 * @nfctinfo: Relationship of this skb to the connection
232 * @nfct_reasm: netfilter conntrack re-assembly pointer 231 * @nfct_reasm: netfilter conntrack re-assembly pointer
233 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 232 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
233 * @iif: ifindex of device we arrived on
234 * @queue_mapping: Queue mapping for multiqueue devices
234 * @tc_index: Traffic control index 235 * @tc_index: Traffic control index
235 * @tc_verd: traffic control verdict 236 * @tc_verd: traffic control verdict
236 * @dma_cookie: a cookie to one of several possible DMA operations 237 * @dma_cookie: a cookie to one of several possible DMA operations
@@ -246,8 +247,6 @@ struct sk_buff {
246 struct sock *sk; 247 struct sock *sk;
247 ktime_t tstamp; 248 ktime_t tstamp;
248 struct net_device *dev; 249 struct net_device *dev;
249 int iif;
250 /* 4 byte hole on 64 bit*/
251 250
252 struct dst_entry *dst; 251 struct dst_entry *dst;
253 struct sec_path *sp; 252 struct sec_path *sp;
@@ -290,12 +289,18 @@ struct sk_buff {
290#ifdef CONFIG_BRIDGE_NETFILTER 289#ifdef CONFIG_BRIDGE_NETFILTER
291 struct nf_bridge_info *nf_bridge; 290 struct nf_bridge_info *nf_bridge;
292#endif 291#endif
292
293 int iif;
294 __u16 queue_mapping;
295
293#ifdef CONFIG_NET_SCHED 296#ifdef CONFIG_NET_SCHED
294 __u16 tc_index; /* traffic control index */ 297 __u16 tc_index; /* traffic control index */
295#ifdef CONFIG_NET_CLS_ACT 298#ifdef CONFIG_NET_CLS_ACT
296 __u16 tc_verd; /* traffic control verdict */ 299 __u16 tc_verd; /* traffic control verdict */
297#endif 300#endif
298#endif 301#endif
302 /* 2 byte hole */
303
299#ifdef CONFIG_NET_DMA 304#ifdef CONFIG_NET_DMA
300 dma_cookie_t dma_cookie; 305 dma_cookie_t dma_cookie;
301#endif 306#endif
@@ -1725,6 +1730,20 @@ static inline void skb_init_secmark(struct sk_buff *skb)
1725{ } 1730{ }
1726#endif 1731#endif
1727 1732
1733static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1734{
1735#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1736 skb->queue_mapping = queue_mapping;
1737#endif
1738}
1739
1740static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1741{
1742#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1743 to->queue_mapping = from->queue_mapping;
1744#endif
1745}
1746
1728static inline int skb_is_gso(const struct sk_buff *skb) 1747static inline int skb_is_gso(const struct sk_buff *skb)
1729{ 1748{
1730 return skb_shinfo(skb)->gso_size; 1749 return skb_shinfo(skb)->gso_size;
diff --git a/net/core/dev.c b/net/core/dev.c
index 6dce9d2d46f2..7ddf66d0ad5e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1429,7 +1429,9 @@ gso:
1429 skb->next = nskb; 1429 skb->next = nskb;
1430 return rc; 1430 return rc;
1431 } 1431 }
1432 if (unlikely(netif_queue_stopped(dev) && skb->next)) 1432 if (unlikely((netif_queue_stopped(dev) ||
1433 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1434 skb->next))
1433 return NETDEV_TX_BUSY; 1435 return NETDEV_TX_BUSY;
1434 } while (skb->next); 1436 } while (skb->next);
1435 1437
@@ -1547,6 +1549,8 @@ gso:
1547 spin_lock(&dev->queue_lock); 1549 spin_lock(&dev->queue_lock);
1548 q = dev->qdisc; 1550 q = dev->qdisc;
1549 if (q->enqueue) { 1551 if (q->enqueue) {
1552 /* reset queue_mapping to zero */
1553 skb->queue_mapping = 0;
1550 rc = q->enqueue(skb, q); 1554 rc = q->enqueue(skb, q);
1551 qdisc_run(dev); 1555 qdisc_run(dev);
1552 spin_unlock(&dev->queue_lock); 1556 spin_unlock(&dev->queue_lock);
@@ -1576,7 +1580,8 @@ gso:
1576 1580
1577 HARD_TX_LOCK(dev, cpu); 1581 HARD_TX_LOCK(dev, cpu);
1578 1582
1579 if (!netif_queue_stopped(dev)) { 1583 if (!netif_queue_stopped(dev) &&
1584 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
1580 rc = 0; 1585 rc = 0;
1581 if (!dev_hard_start_xmit(skb, dev)) { 1586 if (!dev_hard_start_xmit(skb, dev)) {
1582 HARD_TX_UNLOCK(dev); 1587 HARD_TX_UNLOCK(dev);
@@ -3539,16 +3544,18 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
3539} 3544}
3540 3545
3541/** 3546/**
3542 * alloc_netdev - allocate network device 3547 * alloc_netdev_mq - allocate network device
3543 * @sizeof_priv: size of private data to allocate space for 3548 * @sizeof_priv: size of private data to allocate space for
3544 * @name: device name format string 3549 * @name: device name format string
3545 * @setup: callback to initialize device 3550 * @setup: callback to initialize device
3551 * @queue_count: the number of subqueues to allocate
3546 * 3552 *
3547 * Allocates a struct net_device with private data area for driver use 3553 * Allocates a struct net_device with private data area for driver use
3548 * and performs basic initialization. 3554 * and performs basic initialization. Also allocates subquue structs
3555 * for each queue on the device at the end of the netdevice.
3549 */ 3556 */
3550struct net_device *alloc_netdev(int sizeof_priv, const char *name, 3557struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3551 void (*setup)(struct net_device *)) 3558 void (*setup)(struct net_device *), unsigned int queue_count)
3552{ 3559{
3553 void *p; 3560 void *p;
3554 struct net_device *dev; 3561 struct net_device *dev;
@@ -3557,7 +3564,9 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3557 BUG_ON(strlen(name) >= sizeof(dev->name)); 3564 BUG_ON(strlen(name) >= sizeof(dev->name));
3558 3565
3559 /* ensure 32-byte alignment of both the device and private area */ 3566 /* ensure 32-byte alignment of both the device and private area */
3560 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 3567 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
3568 (sizeof(struct net_device_subqueue) * queue_count)) &
3569 ~NETDEV_ALIGN_CONST;
3561 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; 3570 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3562 3571
3563 p = kzalloc(alloc_size, GFP_KERNEL); 3572 p = kzalloc(alloc_size, GFP_KERNEL);
@@ -3570,15 +3579,22 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3570 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 3579 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3571 dev->padded = (char *)dev - (char *)p; 3580 dev->padded = (char *)dev - (char *)p;
3572 3581
3573 if (sizeof_priv) 3582 if (sizeof_priv) {
3574 dev->priv = netdev_priv(dev); 3583 dev->priv = ((char *)dev +
3584 ((sizeof(struct net_device) +
3585 (sizeof(struct net_device_subqueue) *
3586 queue_count) + NETDEV_ALIGN_CONST)
3587 & ~NETDEV_ALIGN_CONST));
3588 }
3589
3590 dev->egress_subqueue_count = queue_count;
3575 3591
3576 dev->get_stats = internal_stats; 3592 dev->get_stats = internal_stats;
3577 setup(dev); 3593 setup(dev);
3578 strcpy(dev->name, name); 3594 strcpy(dev->name, name);
3579 return dev; 3595 return dev;
3580} 3596}
3581EXPORT_SYMBOL(alloc_netdev); 3597EXPORT_SYMBOL(alloc_netdev_mq);
3582 3598
3583/** 3599/**
3584 * free_netdev - free network device 3600 * free_netdev - free network device
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a0efdd7a6b37..4b06d1936375 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -66,8 +66,9 @@ static void queue_process(struct work_struct *work)
66 66
67 local_irq_save(flags); 67 local_irq_save(flags);
68 netif_tx_lock(dev); 68 netif_tx_lock(dev);
69 if (netif_queue_stopped(dev) || 69 if ((netif_queue_stopped(dev) ||
70 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 70 netif_subqueue_stopped(dev, skb->queue_mapping)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
71 skb_queue_head(&npinfo->txq, skb); 72 skb_queue_head(&npinfo->txq, skb);
72 netif_tx_unlock(dev); 73 netif_tx_unlock(dev);
73 local_irq_restore(flags); 74 local_irq_restore(flags);
@@ -254,7 +255,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
254 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 255 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
255 tries > 0; --tries) { 256 tries > 0; --tries) {
256 if (netif_tx_trylock(dev)) { 257 if (netif_tx_trylock(dev)) {
257 if (!netif_queue_stopped(dev)) 258 if (!netif_queue_stopped(dev) &&
259 !netif_subqueue_stopped(dev, skb->queue_mapping))
258 status = dev->hard_start_xmit(skb, dev); 260 status = dev->hard_start_xmit(skb, dev);
259 netif_tx_unlock(dev); 261 netif_tx_unlock(dev);
260 262
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9cd3a1cb60ef..dffe067e7a7b 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3139,7 +3139,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3139 } 3139 }
3140 } 3140 }
3141 3141
3142 if (netif_queue_stopped(odev) || need_resched()) { 3142 if ((netif_queue_stopped(odev) ||
3143 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
3144 need_resched()) {
3143 idle_start = getCurUs(); 3145 idle_start = getCurUs();
3144 3146
3145 if (!netif_running(odev)) { 3147 if (!netif_running(odev)) {
@@ -3154,7 +3156,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3154 3156
3155 pkt_dev->idle_acc += getCurUs() - idle_start; 3157 pkt_dev->idle_acc += getCurUs() - idle_start;
3156 3158
3157 if (netif_queue_stopped(odev)) { 3159 if (netif_queue_stopped(odev) ||
3160 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
3158 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3161 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3159 pkt_dev->next_tx_ns = 0; 3162 pkt_dev->next_tx_ns = 0;
3160 goto out; /* Try the next interface */ 3163 goto out; /* Try the next interface */
@@ -3181,7 +3184,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3181 } 3184 }
3182 3185
3183 netif_tx_lock_bh(odev); 3186 netif_tx_lock_bh(odev);
3184 if (!netif_queue_stopped(odev)) { 3187 if (!netif_queue_stopped(odev) &&
3188 !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
3185 3189
3186 atomic_inc(&(pkt_dev->skb->users)); 3190 atomic_inc(&(pkt_dev->skb->users));
3187 retry_now: 3191 retry_now:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c989c3a0f907..6a41b96b3d37 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -419,6 +419,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
419 n->nohdr = 0; 419 n->nohdr = 0;
420 C(pkt_type); 420 C(pkt_type);
421 C(ip_summed); 421 C(ip_summed);
422 skb_copy_queue_mapping(n, skb);
422 C(priority); 423 C(priority);
423#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 424#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
424 C(ipvs_property); 425 C(ipvs_property);
@@ -460,6 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
460#endif 461#endif
461 new->sk = NULL; 462 new->sk = NULL;
462 new->dev = old->dev; 463 new->dev = old->dev;
464 skb_copy_queue_mapping(new, old);
463 new->priority = old->priority; 465 new->priority = old->priority;
464 new->protocol = old->protocol; 466 new->protocol = old->protocol;
465 new->dst = dst_clone(old->dst); 467 new->dst = dst_clone(old->dst);
@@ -1932,6 +1934,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1932 tail = nskb; 1934 tail = nskb;
1933 1935
1934 nskb->dev = skb->dev; 1936 nskb->dev = skb->dev;
1937 skb_copy_queue_mapping(nskb, skb);
1935 nskb->priority = skb->priority; 1938 nskb->priority = skb->priority;
1936 nskb->protocol = skb->protocol; 1939 nskb->protocol = skb->protocol;
1937 nskb->dst = dst_clone(skb->dst); 1940 nskb->dst = dst_clone(skb->dst);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 0ac2524f3b68..1387e5411f77 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -316,9 +316,10 @@ void ether_setup(struct net_device *dev)
316EXPORT_SYMBOL(ether_setup); 316EXPORT_SYMBOL(ether_setup);
317 317
318/** 318/**
319 * alloc_etherdev - Allocates and sets up an Ethernet device 319 * alloc_etherdev_mq - Allocates and sets up an Ethernet device
320 * @sizeof_priv: Size of additional driver-private structure to be allocated 320 * @sizeof_priv: Size of additional driver-private structure to be allocated
321 * for this Ethernet device 321 * for this Ethernet device
322 * @queue_count: The number of queues this device has.
322 * 323 *
323 * Fill in the fields of the device structure with Ethernet-generic 324 * Fill in the fields of the device structure with Ethernet-generic
324 * values. Basically does everything except registering the device. 325 * values. Basically does everything except registering the device.
@@ -328,8 +329,8 @@ EXPORT_SYMBOL(ether_setup);
328 * this private data area. 329 * this private data area.
329 */ 330 */
330 331
331struct net_device *alloc_etherdev(int sizeof_priv) 332struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
332{ 333{
333 return alloc_netdev(sizeof_priv, "eth%d", ether_setup); 334 return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
334} 335}
335EXPORT_SYMBOL(alloc_etherdev); 336EXPORT_SYMBOL(alloc_etherdev_mq);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index f05ad9a30b4c..dfe7e4520988 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -277,6 +277,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
277 int busy; 277 int busy;
278 int nores; 278 int nores;
279 int len = skb->len; 279 int len = skb->len;
280 int subq = skb->queue_mapping;
280 struct sk_buff *skb_res = NULL; 281 struct sk_buff *skb_res = NULL;
281 282
282 start = master->slaves; 283 start = master->slaves;
@@ -293,7 +294,9 @@ restart:
293 294
294 if (slave->qdisc_sleeping != q) 295 if (slave->qdisc_sleeping != q)
295 continue; 296 continue;
296 if (netif_queue_stopped(slave) || ! netif_running(slave)) { 297 if (netif_queue_stopped(slave) ||
298 netif_subqueue_stopped(slave, subq) ||
299 !netif_running(slave)) {
297 busy = 1; 300 busy = 1;
298 continue; 301 continue;
299 } 302 }
@@ -302,6 +305,7 @@ restart:
302 case 0: 305 case 0:
303 if (netif_tx_trylock(slave)) { 306 if (netif_tx_trylock(slave)) {
304 if (!netif_queue_stopped(slave) && 307 if (!netif_queue_stopped(slave) &&
308 !netif_subqueue_stopped(slave, subq) &&
305 slave->hard_start_xmit(skb, slave) == 0) { 309 slave->hard_start_xmit(skb, slave) == 0) {
306 netif_tx_unlock(slave); 310 netif_tx_unlock(slave);
307 master->slaves = NEXT_SLAVE(q); 311 master->slaves = NEXT_SLAVE(q);