diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 36 | ||||
-rw-r--r-- | net/core/netpoll.c | 8 | ||||
-rw-r--r-- | net/core/pktgen.c | 10 | ||||
-rw-r--r-- | net/core/skbuff.c | 3 | ||||
-rw-r--r-- | net/ethernet/eth.c | 9 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 6 |
6 files changed, 51 insertions, 21 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 6dce9d2d46..7ddf66d0ad 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1429,7 +1429,9 @@ gso: | |||
1429 | skb->next = nskb; | 1429 | skb->next = nskb; |
1430 | return rc; | 1430 | return rc; |
1431 | } | 1431 | } |
1432 | if (unlikely(netif_queue_stopped(dev) && skb->next)) | 1432 | if (unlikely((netif_queue_stopped(dev) || |
1433 | netif_subqueue_stopped(dev, skb->queue_mapping)) && | ||
1434 | skb->next)) | ||
1433 | return NETDEV_TX_BUSY; | 1435 | return NETDEV_TX_BUSY; |
1434 | } while (skb->next); | 1436 | } while (skb->next); |
1435 | 1437 | ||
@@ -1547,6 +1549,8 @@ gso: | |||
1547 | spin_lock(&dev->queue_lock); | 1549 | spin_lock(&dev->queue_lock); |
1548 | q = dev->qdisc; | 1550 | q = dev->qdisc; |
1549 | if (q->enqueue) { | 1551 | if (q->enqueue) { |
1552 | /* reset queue_mapping to zero */ | ||
1553 | skb->queue_mapping = 0; | ||
1550 | rc = q->enqueue(skb, q); | 1554 | rc = q->enqueue(skb, q); |
1551 | qdisc_run(dev); | 1555 | qdisc_run(dev); |
1552 | spin_unlock(&dev->queue_lock); | 1556 | spin_unlock(&dev->queue_lock); |
@@ -1576,7 +1580,8 @@ gso: | |||
1576 | 1580 | ||
1577 | HARD_TX_LOCK(dev, cpu); | 1581 | HARD_TX_LOCK(dev, cpu); |
1578 | 1582 | ||
1579 | if (!netif_queue_stopped(dev)) { | 1583 | if (!netif_queue_stopped(dev) && |
1584 | !netif_subqueue_stopped(dev, skb->queue_mapping)) { | ||
1580 | rc = 0; | 1585 | rc = 0; |
1581 | if (!dev_hard_start_xmit(skb, dev)) { | 1586 | if (!dev_hard_start_xmit(skb, dev)) { |
1582 | HARD_TX_UNLOCK(dev); | 1587 | HARD_TX_UNLOCK(dev); |
@@ -3539,16 +3544,18 @@ static struct net_device_stats *internal_stats(struct net_device *dev) | |||
3539 | } | 3544 | } |
3540 | 3545 | ||
3541 | /** | 3546 | /** |
3542 | * alloc_netdev - allocate network device | 3547 | * alloc_netdev_mq - allocate network device |
3543 | * @sizeof_priv: size of private data to allocate space for | 3548 | * @sizeof_priv: size of private data to allocate space for |
3544 | * @name: device name format string | 3549 | * @name: device name format string |
3545 | * @setup: callback to initialize device | 3550 | * @setup: callback to initialize device |
3551 | * @queue_count: the number of subqueues to allocate | ||
3546 | * | 3552 | * |
3547 | * Allocates a struct net_device with private data area for driver use | 3553 | * Allocates a struct net_device with private data area for driver use |
3548 | * and performs basic initialization. | 3554 | * and performs basic initialization. Also allocates subquue structs |
3555 | * for each queue on the device at the end of the netdevice. | ||
3549 | */ | 3556 | */ |
3550 | struct net_device *alloc_netdev(int sizeof_priv, const char *name, | 3557 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
3551 | void (*setup)(struct net_device *)) | 3558 | void (*setup)(struct net_device *), unsigned int queue_count) |
3552 | { | 3559 | { |
3553 | void *p; | 3560 | void *p; |
3554 | struct net_device *dev; | 3561 | struct net_device *dev; |
@@ -3557,7 +3564,9 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name, | |||
3557 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 3564 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
3558 | 3565 | ||
3559 | /* ensure 32-byte alignment of both the device and private area */ | 3566 | /* ensure 32-byte alignment of both the device and private area */ |
3560 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | 3567 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST + |
3568 | (sizeof(struct net_device_subqueue) * queue_count)) & | ||
3569 | ~NETDEV_ALIGN_CONST; | ||
3561 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; | 3570 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; |
3562 | 3571 | ||
3563 | p = kzalloc(alloc_size, GFP_KERNEL); | 3572 | p = kzalloc(alloc_size, GFP_KERNEL); |
@@ -3570,15 +3579,22 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name, | |||
3570 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | 3579 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); |
3571 | dev->padded = (char *)dev - (char *)p; | 3580 | dev->padded = (char *)dev - (char *)p; |
3572 | 3581 | ||
3573 | if (sizeof_priv) | 3582 | if (sizeof_priv) { |
3574 | dev->priv = netdev_priv(dev); | 3583 | dev->priv = ((char *)dev + |
3584 | ((sizeof(struct net_device) + | ||
3585 | (sizeof(struct net_device_subqueue) * | ||
3586 | queue_count) + NETDEV_ALIGN_CONST) | ||
3587 | & ~NETDEV_ALIGN_CONST)); | ||
3588 | } | ||
3589 | |||
3590 | dev->egress_subqueue_count = queue_count; | ||
3575 | 3591 | ||
3576 | dev->get_stats = internal_stats; | 3592 | dev->get_stats = internal_stats; |
3577 | setup(dev); | 3593 | setup(dev); |
3578 | strcpy(dev->name, name); | 3594 | strcpy(dev->name, name); |
3579 | return dev; | 3595 | return dev; |
3580 | } | 3596 | } |
3581 | EXPORT_SYMBOL(alloc_netdev); | 3597 | EXPORT_SYMBOL(alloc_netdev_mq); |
3582 | 3598 | ||
3583 | /** | 3599 | /** |
3584 | * free_netdev - free network device | 3600 | * free_netdev - free network device |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a0efdd7a6b..4b06d19363 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -66,8 +66,9 @@ static void queue_process(struct work_struct *work) | |||
66 | 66 | ||
67 | local_irq_save(flags); | 67 | local_irq_save(flags); |
68 | netif_tx_lock(dev); | 68 | netif_tx_lock(dev); |
69 | if (netif_queue_stopped(dev) || | 69 | if ((netif_queue_stopped(dev) || |
70 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { | 70 | netif_subqueue_stopped(dev, skb->queue_mapping)) || |
71 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { | ||
71 | skb_queue_head(&npinfo->txq, skb); | 72 | skb_queue_head(&npinfo->txq, skb); |
72 | netif_tx_unlock(dev); | 73 | netif_tx_unlock(dev); |
73 | local_irq_restore(flags); | 74 | local_irq_restore(flags); |
@@ -254,7 +255,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
254 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 255 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
255 | tries > 0; --tries) { | 256 | tries > 0; --tries) { |
256 | if (netif_tx_trylock(dev)) { | 257 | if (netif_tx_trylock(dev)) { |
257 | if (!netif_queue_stopped(dev)) | 258 | if (!netif_queue_stopped(dev) && |
259 | !netif_subqueue_stopped(dev, skb->queue_mapping)) | ||
258 | status = dev->hard_start_xmit(skb, dev); | 260 | status = dev->hard_start_xmit(skb, dev); |
259 | netif_tx_unlock(dev); | 261 | netif_tx_unlock(dev); |
260 | 262 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 9cd3a1cb60..dffe067e7a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3139,7 +3139,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3139 | } | 3139 | } |
3140 | } | 3140 | } |
3141 | 3141 | ||
3142 | if (netif_queue_stopped(odev) || need_resched()) { | 3142 | if ((netif_queue_stopped(odev) || |
3143 | netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || | ||
3144 | need_resched()) { | ||
3143 | idle_start = getCurUs(); | 3145 | idle_start = getCurUs(); |
3144 | 3146 | ||
3145 | if (!netif_running(odev)) { | 3147 | if (!netif_running(odev)) { |
@@ -3154,7 +3156,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3154 | 3156 | ||
3155 | pkt_dev->idle_acc += getCurUs() - idle_start; | 3157 | pkt_dev->idle_acc += getCurUs() - idle_start; |
3156 | 3158 | ||
3157 | if (netif_queue_stopped(odev)) { | 3159 | if (netif_queue_stopped(odev) || |
3160 | netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { | ||
3158 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | 3161 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ |
3159 | pkt_dev->next_tx_ns = 0; | 3162 | pkt_dev->next_tx_ns = 0; |
3160 | goto out; /* Try the next interface */ | 3163 | goto out; /* Try the next interface */ |
@@ -3181,7 +3184,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3181 | } | 3184 | } |
3182 | 3185 | ||
3183 | netif_tx_lock_bh(odev); | 3186 | netif_tx_lock_bh(odev); |
3184 | if (!netif_queue_stopped(odev)) { | 3187 | if (!netif_queue_stopped(odev) && |
3188 | !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) { | ||
3185 | 3189 | ||
3186 | atomic_inc(&(pkt_dev->skb->users)); | 3190 | atomic_inc(&(pkt_dev->skb->users)); |
3187 | retry_now: | 3191 | retry_now: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c989c3a0f9..6a41b96b3d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -419,6 +419,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
419 | n->nohdr = 0; | 419 | n->nohdr = 0; |
420 | C(pkt_type); | 420 | C(pkt_type); |
421 | C(ip_summed); | 421 | C(ip_summed); |
422 | skb_copy_queue_mapping(n, skb); | ||
422 | C(priority); | 423 | C(priority); |
423 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 424 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
424 | C(ipvs_property); | 425 | C(ipvs_property); |
@@ -460,6 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
460 | #endif | 461 | #endif |
461 | new->sk = NULL; | 462 | new->sk = NULL; |
462 | new->dev = old->dev; | 463 | new->dev = old->dev; |
464 | skb_copy_queue_mapping(new, old); | ||
463 | new->priority = old->priority; | 465 | new->priority = old->priority; |
464 | new->protocol = old->protocol; | 466 | new->protocol = old->protocol; |
465 | new->dst = dst_clone(old->dst); | 467 | new->dst = dst_clone(old->dst); |
@@ -1932,6 +1934,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
1932 | tail = nskb; | 1934 | tail = nskb; |
1933 | 1935 | ||
1934 | nskb->dev = skb->dev; | 1936 | nskb->dev = skb->dev; |
1937 | skb_copy_queue_mapping(nskb, skb); | ||
1935 | nskb->priority = skb->priority; | 1938 | nskb->priority = skb->priority; |
1936 | nskb->protocol = skb->protocol; | 1939 | nskb->protocol = skb->protocol; |
1937 | nskb->dst = dst_clone(skb->dst); | 1940 | nskb->dst = dst_clone(skb->dst); |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 0ac2524f3b..1387e5411f 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -316,9 +316,10 @@ void ether_setup(struct net_device *dev) | |||
316 | EXPORT_SYMBOL(ether_setup); | 316 | EXPORT_SYMBOL(ether_setup); |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * alloc_etherdev - Allocates and sets up an Ethernet device | 319 | * alloc_etherdev_mq - Allocates and sets up an Ethernet device |
320 | * @sizeof_priv: Size of additional driver-private structure to be allocated | 320 | * @sizeof_priv: Size of additional driver-private structure to be allocated |
321 | * for this Ethernet device | 321 | * for this Ethernet device |
322 | * @queue_count: The number of queues this device has. | ||
322 | * | 323 | * |
323 | * Fill in the fields of the device structure with Ethernet-generic | 324 | * Fill in the fields of the device structure with Ethernet-generic |
324 | * values. Basically does everything except registering the device. | 325 | * values. Basically does everything except registering the device. |
@@ -328,8 +329,8 @@ EXPORT_SYMBOL(ether_setup); | |||
328 | * this private data area. | 329 | * this private data area. |
329 | */ | 330 | */ |
330 | 331 | ||
331 | struct net_device *alloc_etherdev(int sizeof_priv) | 332 | struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) |
332 | { | 333 | { |
333 | return alloc_netdev(sizeof_priv, "eth%d", ether_setup); | 334 | return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); |
334 | } | 335 | } |
335 | EXPORT_SYMBOL(alloc_etherdev); | 336 | EXPORT_SYMBOL(alloc_etherdev_mq); |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index f05ad9a30b..dfe7e45209 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -277,6 +277,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | |||
277 | int busy; | 277 | int busy; |
278 | int nores; | 278 | int nores; |
279 | int len = skb->len; | 279 | int len = skb->len; |
280 | int subq = skb->queue_mapping; | ||
280 | struct sk_buff *skb_res = NULL; | 281 | struct sk_buff *skb_res = NULL; |
281 | 282 | ||
282 | start = master->slaves; | 283 | start = master->slaves; |
@@ -293,7 +294,9 @@ restart: | |||
293 | 294 | ||
294 | if (slave->qdisc_sleeping != q) | 295 | if (slave->qdisc_sleeping != q) |
295 | continue; | 296 | continue; |
296 | if (netif_queue_stopped(slave) || ! netif_running(slave)) { | 297 | if (netif_queue_stopped(slave) || |
298 | netif_subqueue_stopped(slave, subq) || | ||
299 | !netif_running(slave)) { | ||
297 | busy = 1; | 300 | busy = 1; |
298 | continue; | 301 | continue; |
299 | } | 302 | } |
@@ -302,6 +305,7 @@ restart: | |||
302 | case 0: | 305 | case 0: |
303 | if (netif_tx_trylock(slave)) { | 306 | if (netif_tx_trylock(slave)) { |
304 | if (!netif_queue_stopped(slave) && | 307 | if (!netif_queue_stopped(slave) && |
308 | !netif_subqueue_stopped(slave, subq) && | ||
305 | slave->hard_start_xmit(skb, slave) == 0) { | 309 | slave->hard_start_xmit(skb, slave) == 0) { |
306 | netif_tx_unlock(slave); | 310 | netif_tx_unlock(slave); |
307 | master->slaves = NEXT_SLAVE(q); | 311 | master->slaves = NEXT_SLAVE(q); |