aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 03:34:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:00 -0400
commite8a0464cc950972824e2e128028ae3db666ec1ed (patch)
tree5022b95396c0f3b313531bc39b19543c03551b9a /net/core/dev.c
parent070825b3840a743e21ebcc44f8279708a4fed977 (diff)
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue structures for TX, based upon the queue_count argument. Furthermore, all accesses to the TX queues are now vectored through the netdev_get_tx_queue() and netdev_for_each_tx_queue() interfaces. This makes it easy to grep the tree for all things that want to get to a TX queue of a net device. Problem spots which are not really multiqueue aware yet, and only work with one queue, can easily be spotted by grepping for all netdev_get_tx_queue() calls that pass in a zero index. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 9b49f74a9820..69378f250695 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1666,6 +1666,12 @@ out_kfree_skb:
1666 * --BLG 1666 * --BLG
1667 */ 1667 */
1668 1668
1669static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb)
1671{
1672 return netdev_get_tx_queue(dev, 0);
1673}
1674
1669int dev_queue_xmit(struct sk_buff *skb) 1675int dev_queue_xmit(struct sk_buff *skb)
1670{ 1676{
1671 struct net_device *dev = skb->dev; 1677 struct net_device *dev = skb->dev;
@@ -1702,7 +1708,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1702 } 1708 }
1703 1709
1704gso: 1710gso:
1705 txq = &dev->tx_queue; 1711 txq = dev_pick_tx(dev, skb);
1706 spin_lock_prefetch(&txq->lock); 1712 spin_lock_prefetch(&txq->lock);
1707 1713
1708 /* Disable soft irqs for various locks below. Also 1714 /* Disable soft irqs for various locks below. Also
@@ -3788,8 +3794,9 @@ static void rollback_registered(struct net_device *dev)
3788 dev_put(dev); 3794 dev_put(dev);
3789} 3795}
3790 3796
3791static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue, 3797static void __netdev_init_queue_locks_one(struct net_device *dev,
3792 struct net_device *dev) 3798 struct netdev_queue *dev_queue,
3799 void *_unused)
3793{ 3800{
3794 spin_lock_init(&dev_queue->_xmit_lock); 3801 spin_lock_init(&dev_queue->_xmit_lock);
3795 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); 3802 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
@@ -3798,8 +3805,8 @@ static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue,
3798 3805
3799static void netdev_init_queue_locks(struct net_device *dev) 3806static void netdev_init_queue_locks(struct net_device *dev)
3800{ 3807{
3801 __netdev_init_queue_locks_one(&dev->tx_queue, dev); 3808 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3802 __netdev_init_queue_locks_one(&dev->rx_queue, dev); 3809 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3803} 3810}
3804 3811
3805/** 3812/**
@@ -4119,7 +4126,8 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
4119} 4126}
4120 4127
4121static void netdev_init_one_queue(struct net_device *dev, 4128static void netdev_init_one_queue(struct net_device *dev,
4122 struct netdev_queue *queue) 4129 struct netdev_queue *queue,
4130 void *_unused)
4123{ 4131{
4124 spin_lock_init(&queue->lock); 4132 spin_lock_init(&queue->lock);
4125 queue->dev = dev; 4133 queue->dev = dev;
@@ -4127,8 +4135,8 @@ static void netdev_init_one_queue(struct net_device *dev,
4127 4135
4128static void netdev_init_queues(struct net_device *dev) 4136static void netdev_init_queues(struct net_device *dev)
4129{ 4137{
4130 netdev_init_one_queue(dev, &dev->rx_queue); 4138 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4131 netdev_init_one_queue(dev, &dev->tx_queue); 4139 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4132} 4140}
4133 4141
4134/** 4142/**
@@ -4145,9 +4153,10 @@ static void netdev_init_queues(struct net_device *dev)
4145struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 4153struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4146 void (*setup)(struct net_device *), unsigned int queue_count) 4154 void (*setup)(struct net_device *), unsigned int queue_count)
4147{ 4155{
4148 void *p; 4156 struct netdev_queue *tx;
4149 struct net_device *dev; 4157 struct net_device *dev;
4150 int alloc_size; 4158 int alloc_size;
4159 void *p;
4151 4160
4152 BUG_ON(strlen(name) >= sizeof(dev->name)); 4161 BUG_ON(strlen(name) >= sizeof(dev->name));
4153 4162
@@ -4167,11 +4176,22 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4167 return NULL; 4176 return NULL;
4168 } 4177 }
4169 4178
4179 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
4180 if (!tx) {
4181 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4182 "tx qdiscs.\n");
4183 kfree(p);
4184 return NULL;
4185 }
4186
4170 dev = (struct net_device *) 4187 dev = (struct net_device *)
4171 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 4188 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4172 dev->padded = (char *)dev - (char *)p; 4189 dev->padded = (char *)dev - (char *)p;
4173 dev_net_set(dev, &init_net); 4190 dev_net_set(dev, &init_net);
4174 4191
4192 dev->_tx = tx;
4193 dev->num_tx_queues = queue_count;
4194
4175 if (sizeof_priv) { 4195 if (sizeof_priv) {
4176 dev->priv = ((char *)dev + 4196 dev->priv = ((char *)dev +
4177 ((sizeof(struct net_device) + 4197 ((sizeof(struct net_device) +
@@ -4205,6 +4225,8 @@ void free_netdev(struct net_device *dev)
4205{ 4225{
4206 release_net(dev_net(dev)); 4226 release_net(dev_net(dev));
4207 4227
4228 kfree(dev->_tx);
4229
4208 /* Compatibility with error handling in drivers */ 4230 /* Compatibility with error handling in drivers */
4209 if (dev->reg_state == NETREG_UNINITIALIZED) { 4231 if (dev->reg_state == NETREG_UNINITIALIZED) {
4210 kfree((char *)dev - dev->padded); 4232 kfree((char *)dev - dev->padded);