aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>2007-07-06 16:36:20 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-11 01:16:21 -0400
commitf25f4e44808f0f6c9875d94ef1c41ef86c288eb2 (patch)
treed7809dd5e957f1626185326d0c3438ff9a04d350 /net/core/dev.c
parenta093bf006e09a305e95ff0938c0a18b7520aef67 (diff)
[CORE] Stack changes to add multiqueue hardware support API
Add the multiqueue hardware device support API to the core network stack. Allow drivers to allocate multiple queues and manage them at the netdev level if they choose to do so. Added a new field to sk_buff, namely queue_mapping, for drivers to know which tx_ring to select based on OS classification of the flow. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6dce9d2d46f2..7ddf66d0ad5e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1429,7 +1429,9 @@ gso:
1429 skb->next = nskb; 1429 skb->next = nskb;
1430 return rc; 1430 return rc;
1431 } 1431 }
1432 if (unlikely(netif_queue_stopped(dev) && skb->next)) 1432 if (unlikely((netif_queue_stopped(dev) ||
1433 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1434 skb->next))
1433 return NETDEV_TX_BUSY; 1435 return NETDEV_TX_BUSY;
1434 } while (skb->next); 1436 } while (skb->next);
1435 1437
@@ -1547,6 +1549,8 @@ gso:
1547 spin_lock(&dev->queue_lock); 1549 spin_lock(&dev->queue_lock);
1548 q = dev->qdisc; 1550 q = dev->qdisc;
1549 if (q->enqueue) { 1551 if (q->enqueue) {
1552 /* reset queue_mapping to zero */
1553 skb->queue_mapping = 0;
1550 rc = q->enqueue(skb, q); 1554 rc = q->enqueue(skb, q);
1551 qdisc_run(dev); 1555 qdisc_run(dev);
1552 spin_unlock(&dev->queue_lock); 1556 spin_unlock(&dev->queue_lock);
@@ -1576,7 +1580,8 @@ gso:
1576 1580
1577 HARD_TX_LOCK(dev, cpu); 1581 HARD_TX_LOCK(dev, cpu);
1578 1582
1579 if (!netif_queue_stopped(dev)) { 1583 if (!netif_queue_stopped(dev) &&
1584 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
1580 rc = 0; 1585 rc = 0;
1581 if (!dev_hard_start_xmit(skb, dev)) { 1586 if (!dev_hard_start_xmit(skb, dev)) {
1582 HARD_TX_UNLOCK(dev); 1587 HARD_TX_UNLOCK(dev);
@@ -3539,16 +3544,18 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
3539} 3544}
3540 3545
3541/** 3546/**
3542 * alloc_netdev - allocate network device 3547 * alloc_netdev_mq - allocate network device
3543 * @sizeof_priv: size of private data to allocate space for 3548 * @sizeof_priv: size of private data to allocate space for
3544 * @name: device name format string 3549 * @name: device name format string
3545 * @setup: callback to initialize device 3550 * @setup: callback to initialize device
3551 * @queue_count: the number of subqueues to allocate
3546 * 3552 *
3547 * Allocates a struct net_device with private data area for driver use 3553 * Allocates a struct net_device with private data area for driver use
3548 * and performs basic initialization. 3554 * and performs basic initialization. Also allocates subquue structs
3555 * for each queue on the device at the end of the netdevice.
3549 */ 3556 */
3550struct net_device *alloc_netdev(int sizeof_priv, const char *name, 3557struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3551 void (*setup)(struct net_device *)) 3558 void (*setup)(struct net_device *), unsigned int queue_count)
3552{ 3559{
3553 void *p; 3560 void *p;
3554 struct net_device *dev; 3561 struct net_device *dev;
@@ -3557,7 +3564,9 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3557 BUG_ON(strlen(name) >= sizeof(dev->name)); 3564 BUG_ON(strlen(name) >= sizeof(dev->name));
3558 3565
3559 /* ensure 32-byte alignment of both the device and private area */ 3566 /* ensure 32-byte alignment of both the device and private area */
3560 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 3567 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
3568 (sizeof(struct net_device_subqueue) * queue_count)) &
3569 ~NETDEV_ALIGN_CONST;
3561 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; 3570 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3562 3571
3563 p = kzalloc(alloc_size, GFP_KERNEL); 3572 p = kzalloc(alloc_size, GFP_KERNEL);
@@ -3570,15 +3579,22 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3570 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 3579 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3571 dev->padded = (char *)dev - (char *)p; 3580 dev->padded = (char *)dev - (char *)p;
3572 3581
3573 if (sizeof_priv) 3582 if (sizeof_priv) {
3574 dev->priv = netdev_priv(dev); 3583 dev->priv = ((char *)dev +
3584 ((sizeof(struct net_device) +
3585 (sizeof(struct net_device_subqueue) *
3586 queue_count) + NETDEV_ALIGN_CONST)
3587 & ~NETDEV_ALIGN_CONST));
3588 }
3589
3590 dev->egress_subqueue_count = queue_count;
3575 3591
3576 dev->get_stats = internal_stats; 3592 dev->get_stats = internal_stats;
3577 setup(dev); 3593 setup(dev);
3578 strcpy(dev->name, name); 3594 strcpy(dev->name, name);
3579 return dev; 3595 return dev;
3580} 3596}
3581EXPORT_SYMBOL(alloc_netdev); 3597EXPORT_SYMBOL(alloc_netdev_mq);
3582 3598
3583/** 3599/**
3584 * free_netdev - free network device 3600 * free_netdev - free network device