diff options
author | Tom Herbert <therbert@google.com> | 2010-10-18 14:04:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-20 05:27:59 -0400 |
commit | e6484930d7c73d324bccda7d43d131088da697b9 (patch) | |
tree | afa528185a8f679730275722cbd19f660101af50 | |
parent | bd25fa7ba59cd26094319dfba0011b48465f7355 (diff) |
net: allocate tx queues in register_netdevice
This patch introduces netif_alloc_netdev_queues which is called from
register_device instead of alloc_netdev_mq. This makes TX queue
allocation symmetric with RX allocation. Also, queue locks allocation
is done in netdev_init_one_queue. Change set_real_num_tx_queues to
fail if requested number < 1 or greater than number of allocated
queues.
Signed-off-by: Tom Herbert <therbert@google.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netdevice.h | 4 | ||||
-rw-r--r-- | net/core/dev.c | 106 |
2 files changed, 55 insertions, 55 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 14fbb04c459d..880d56565828 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1696,8 +1696,8 @@ static inline int netif_is_multiqueue(const struct net_device *dev) | |||
1696 | return dev->num_tx_queues > 1; | 1696 | return dev->num_tx_queues > 1; |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | extern void netif_set_real_num_tx_queues(struct net_device *dev, | 1699 | extern int netif_set_real_num_tx_queues(struct net_device *dev, |
1700 | unsigned int txq); | 1700 | unsigned int txq); |
1701 | 1701 | ||
1702 | #ifdef CONFIG_RPS | 1702 | #ifdef CONFIG_RPS |
1703 | extern int netif_set_real_num_rx_queues(struct net_device *dev, | 1703 | extern int netif_set_real_num_rx_queues(struct net_device *dev, |
diff --git a/net/core/dev.c b/net/core/dev.c index d33adecec44b..4c3ac53e4b16 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1553,18 +1553,20 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1553 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | 1553 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues |
1554 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | 1554 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. |
1555 | */ | 1555 | */ |
1556 | void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | 1556 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) |
1557 | { | 1557 | { |
1558 | unsigned int real_num = dev->real_num_tx_queues; | 1558 | if (txq < 1 || txq > dev->num_tx_queues) |
1559 | return -EINVAL; | ||
1559 | 1560 | ||
1560 | if (unlikely(txq > dev->num_tx_queues)) | 1561 | if (dev->reg_state == NETREG_REGISTERED) { |
1561 | ; | 1562 | ASSERT_RTNL(); |
1562 | else if (txq > real_num) | 1563 | |
1563 | dev->real_num_tx_queues = txq; | 1564 | if (txq < dev->real_num_tx_queues) |
1564 | else if (txq < real_num) { | 1565 | qdisc_reset_all_tx_gt(dev, txq); |
1565 | dev->real_num_tx_queues = txq; | ||
1566 | qdisc_reset_all_tx_gt(dev, txq); | ||
1567 | } | 1566 | } |
1567 | |||
1568 | dev->real_num_tx_queues = txq; | ||
1569 | return 0; | ||
1568 | } | 1570 | } |
1569 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | 1571 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); |
1570 | 1572 | ||
@@ -4928,20 +4930,6 @@ static void rollback_registered(struct net_device *dev) | |||
4928 | rollback_registered_many(&single); | 4930 | rollback_registered_many(&single); |
4929 | } | 4931 | } |
4930 | 4932 | ||
4931 | static void __netdev_init_queue_locks_one(struct net_device *dev, | ||
4932 | struct netdev_queue *dev_queue, | ||
4933 | void *_unused) | ||
4934 | { | ||
4935 | spin_lock_init(&dev_queue->_xmit_lock); | ||
4936 | netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); | ||
4937 | dev_queue->xmit_lock_owner = -1; | ||
4938 | } | ||
4939 | |||
4940 | static void netdev_init_queue_locks(struct net_device *dev) | ||
4941 | { | ||
4942 | netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL); | ||
4943 | } | ||
4944 | |||
4945 | unsigned long netdev_fix_features(unsigned long features, const char *name) | 4933 | unsigned long netdev_fix_features(unsigned long features, const char *name) |
4946 | { | 4934 | { |
4947 | /* Fix illegal SG+CSUM combinations. */ | 4935 | /* Fix illegal SG+CSUM combinations. */ |
@@ -5034,6 +5022,41 @@ static int netif_alloc_rx_queues(struct net_device *dev) | |||
5034 | return 0; | 5022 | return 0; |
5035 | } | 5023 | } |
5036 | 5024 | ||
5025 | static int netif_alloc_netdev_queues(struct net_device *dev) | ||
5026 | { | ||
5027 | unsigned int count = dev->num_tx_queues; | ||
5028 | struct netdev_queue *tx; | ||
5029 | |||
5030 | BUG_ON(count < 1); | ||
5031 | |||
5032 | tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); | ||
5033 | if (!tx) { | ||
5034 | pr_err("netdev: Unable to allocate %u tx queues.\n", | ||
5035 | count); | ||
5036 | return -ENOMEM; | ||
5037 | } | ||
5038 | dev->_tx = tx; | ||
5039 | return 0; | ||
5040 | } | ||
5041 | |||
5042 | static void netdev_init_one_queue(struct net_device *dev, | ||
5043 | struct netdev_queue *queue, | ||
5044 | void *_unused) | ||
5045 | { | ||
5046 | queue->dev = dev; | ||
5047 | |||
5048 | /* Initialize queue lock */ | ||
5049 | spin_lock_init(&queue->_xmit_lock); | ||
5050 | netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); | ||
5051 | queue->xmit_lock_owner = -1; | ||
5052 | } | ||
5053 | |||
5054 | static void netdev_init_queues(struct net_device *dev) | ||
5055 | { | ||
5056 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | ||
5057 | spin_lock_init(&dev->tx_global_lock); | ||
5058 | } | ||
5059 | |||
5037 | /** | 5060 | /** |
5038 | * register_netdevice - register a network device | 5061 | * register_netdevice - register a network device |
5039 | * @dev: device to register | 5062 | * @dev: device to register |
@@ -5067,7 +5090,6 @@ int register_netdevice(struct net_device *dev) | |||
5067 | 5090 | ||
5068 | spin_lock_init(&dev->addr_list_lock); | 5091 | spin_lock_init(&dev->addr_list_lock); |
5069 | netdev_set_addr_lockdep_class(dev); | 5092 | netdev_set_addr_lockdep_class(dev); |
5070 | netdev_init_queue_locks(dev); | ||
5071 | 5093 | ||
5072 | dev->iflink = -1; | 5094 | dev->iflink = -1; |
5073 | 5095 | ||
@@ -5075,6 +5097,12 @@ int register_netdevice(struct net_device *dev) | |||
5075 | if (ret) | 5097 | if (ret) |
5076 | goto out; | 5098 | goto out; |
5077 | 5099 | ||
5100 | ret = netif_alloc_netdev_queues(dev); | ||
5101 | if (ret) | ||
5102 | goto out; | ||
5103 | |||
5104 | netdev_init_queues(dev); | ||
5105 | |||
5078 | /* Init, if this function is available */ | 5106 | /* Init, if this function is available */ |
5079 | if (dev->netdev_ops->ndo_init) { | 5107 | if (dev->netdev_ops->ndo_init) { |
5080 | ret = dev->netdev_ops->ndo_init(dev); | 5108 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5456,19 +5484,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |||
5456 | } | 5484 | } |
5457 | EXPORT_SYMBOL(dev_get_stats); | 5485 | EXPORT_SYMBOL(dev_get_stats); |
5458 | 5486 | ||
5459 | static void netdev_init_one_queue(struct net_device *dev, | ||
5460 | struct netdev_queue *queue, | ||
5461 | void *_unused) | ||
5462 | { | ||
5463 | queue->dev = dev; | ||
5464 | } | ||
5465 | |||
5466 | static void netdev_init_queues(struct net_device *dev) | ||
5467 | { | ||
5468 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | ||
5469 | spin_lock_init(&dev->tx_global_lock); | ||
5470 | } | ||
5471 | |||
5472 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | 5487 | struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) |
5473 | { | 5488 | { |
5474 | struct netdev_queue *queue = dev_ingress_queue(dev); | 5489 | struct netdev_queue *queue = dev_ingress_queue(dev); |
@@ -5480,7 +5495,6 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | |||
5480 | if (!queue) | 5495 | if (!queue) |
5481 | return NULL; | 5496 | return NULL; |
5482 | netdev_init_one_queue(dev, queue, NULL); | 5497 | netdev_init_one_queue(dev, queue, NULL); |
5483 | __netdev_init_queue_locks_one(dev, queue, NULL); | ||
5484 | queue->qdisc = &noop_qdisc; | 5498 | queue->qdisc = &noop_qdisc; |
5485 | queue->qdisc_sleeping = &noop_qdisc; | 5499 | queue->qdisc_sleeping = &noop_qdisc; |
5486 | rcu_assign_pointer(dev->ingress_queue, queue); | 5500 | rcu_assign_pointer(dev->ingress_queue, queue); |
@@ -5502,7 +5516,6 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | |||
5502 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 5516 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
5503 | void (*setup)(struct net_device *), unsigned int queue_count) | 5517 | void (*setup)(struct net_device *), unsigned int queue_count) |
5504 | { | 5518 | { |
5505 | struct netdev_queue *tx; | ||
5506 | struct net_device *dev; | 5519 | struct net_device *dev; |
5507 | size_t alloc_size; | 5520 | size_t alloc_size; |
5508 | struct net_device *p; | 5521 | struct net_device *p; |
@@ -5530,20 +5543,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5530 | return NULL; | 5543 | return NULL; |
5531 | } | 5544 | } |
5532 | 5545 | ||
5533 | tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL); | ||
5534 | if (!tx) { | ||
5535 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
5536 | "tx qdiscs.\n"); | ||
5537 | goto free_p; | ||
5538 | } | ||
5539 | |||
5540 | |||
5541 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5546 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5542 | dev->padded = (char *)dev - (char *)p; | 5547 | dev->padded = (char *)dev - (char *)p; |
5543 | 5548 | ||
5544 | dev->pcpu_refcnt = alloc_percpu(int); | 5549 | dev->pcpu_refcnt = alloc_percpu(int); |
5545 | if (!dev->pcpu_refcnt) | 5550 | if (!dev->pcpu_refcnt) |
5546 | goto free_tx; | 5551 | goto free_p; |
5547 | 5552 | ||
5548 | if (dev_addr_init(dev)) | 5553 | if (dev_addr_init(dev)) |
5549 | goto free_pcpu; | 5554 | goto free_pcpu; |
@@ -5553,7 +5558,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5553 | 5558 | ||
5554 | dev_net_set(dev, &init_net); | 5559 | dev_net_set(dev, &init_net); |
5555 | 5560 | ||
5556 | dev->_tx = tx; | ||
5557 | dev->num_tx_queues = queue_count; | 5561 | dev->num_tx_queues = queue_count; |
5558 | dev->real_num_tx_queues = queue_count; | 5562 | dev->real_num_tx_queues = queue_count; |
5559 | 5563 | ||
@@ -5564,8 +5568,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5564 | 5568 | ||
5565 | dev->gso_max_size = GSO_MAX_SIZE; | 5569 | dev->gso_max_size = GSO_MAX_SIZE; |
5566 | 5570 | ||
5567 | netdev_init_queues(dev); | ||
5568 | |||
5569 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | 5571 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); |
5570 | dev->ethtool_ntuple_list.count = 0; | 5572 | dev->ethtool_ntuple_list.count = 0; |
5571 | INIT_LIST_HEAD(&dev->napi_list); | 5573 | INIT_LIST_HEAD(&dev->napi_list); |
@@ -5576,8 +5578,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5576 | strcpy(dev->name, name); | 5578 | strcpy(dev->name, name); |
5577 | return dev; | 5579 | return dev; |
5578 | 5580 | ||
5579 | free_tx: | ||
5580 | kfree(tx); | ||
5581 | free_pcpu: | 5581 | free_pcpu: |
5582 | free_percpu(dev->pcpu_refcnt); | 5582 | free_percpu(dev->pcpu_refcnt); |
5583 | free_p: | 5583 | free_p: |