diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-03-24 15:13:54 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-25 15:07:00 -0400 |
commit | df3345457a7a174dfb5872a070af80d456985038 (patch) | |
tree | d8d2d2a86d0b3473783ea2709ff242817e78ed54 /net | |
parent | 2381a55c88453d3f29fe62d235579a05fc20b7b3 (diff) |
rps: add CONFIG_RPS
RPS currently depends on SMP and SYSFS
Adding a CONFIG_RPS makes sense in case this requirement changes in the
future. This patch saves about 1500 bytes of kernel text in case SMP is
on but SYSFS is off.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/Kconfig | 5 | ||||
-rw-r--r-- | net/core/dev.c | 29 |
2 files changed, 24 insertions, 10 deletions
diff --git a/net/Kconfig b/net/Kconfig index 041c35edb763..68514644ce91 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -203,6 +203,11 @@ source "net/ieee802154/Kconfig" | |||
203 | source "net/sched/Kconfig" | 203 | source "net/sched/Kconfig" |
204 | source "net/dcb/Kconfig" | 204 | source "net/dcb/Kconfig" |
205 | 205 | ||
206 | config RPS | ||
207 | boolean | ||
208 | depends on SMP && SYSFS | ||
209 | default y | ||
210 | |||
206 | menu "Network testing" | 211 | menu "Network testing" |
207 | 212 | ||
208 | config NET_PKTGEN | 213 | config NET_PKTGEN |
diff --git a/net/core/dev.c b/net/core/dev.c index 5e3dc28cbf5a..bcb3ed26af1c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2177,7 +2177,7 @@ int weight_p __read_mostly = 64; /* old backlog weight */ | |||
2177 | 2177 | ||
2178 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2178 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; |
2179 | 2179 | ||
2180 | #ifdef CONFIG_SMP | 2180 | #ifdef CONFIG_RPS |
2181 | /* | 2181 | /* |
2182 | * get_rps_cpu is called from netif_receive_skb and returns the target | 2182 | * get_rps_cpu is called from netif_receive_skb and returns the target |
2183 | * CPU from the RPS map of the receiving queue for a given skb. | 2183 | * CPU from the RPS map of the receiving queue for a given skb. |
@@ -2325,7 +2325,7 @@ enqueue: | |||
2325 | 2325 | ||
2326 | /* Schedule NAPI for backlog device */ | 2326 | /* Schedule NAPI for backlog device */ |
2327 | if (napi_schedule_prep(&queue->backlog)) { | 2327 | if (napi_schedule_prep(&queue->backlog)) { |
2328 | #ifdef CONFIG_SMP | 2328 | #ifdef CONFIG_RPS |
2329 | if (cpu != smp_processor_id()) { | 2329 | if (cpu != smp_processor_id()) { |
2330 | struct rps_remote_softirq_cpus *rcpus = | 2330 | struct rps_remote_softirq_cpus *rcpus = |
2331 | &__get_cpu_var(rps_remote_softirq_cpus); | 2331 | &__get_cpu_var(rps_remote_softirq_cpus); |
@@ -2376,7 +2376,7 @@ int netif_rx(struct sk_buff *skb) | |||
2376 | if (!skb->tstamp.tv64) | 2376 | if (!skb->tstamp.tv64) |
2377 | net_timestamp(skb); | 2377 | net_timestamp(skb); |
2378 | 2378 | ||
2379 | #ifdef CONFIG_SMP | 2379 | #ifdef CONFIG_RPS |
2380 | cpu = get_rps_cpu(skb->dev, skb); | 2380 | cpu = get_rps_cpu(skb->dev, skb); |
2381 | if (cpu < 0) | 2381 | if (cpu < 0) |
2382 | cpu = smp_processor_id(); | 2382 | cpu = smp_processor_id(); |
@@ -2750,7 +2750,7 @@ out: | |||
2750 | */ | 2750 | */ |
2751 | int netif_receive_skb(struct sk_buff *skb) | 2751 | int netif_receive_skb(struct sk_buff *skb) |
2752 | { | 2752 | { |
2753 | #ifdef CONFIG_SMP | 2753 | #ifdef CONFIG_RPS |
2754 | int cpu; | 2754 | int cpu; |
2755 | 2755 | ||
2756 | cpu = get_rps_cpu(skb->dev, skb); | 2756 | cpu = get_rps_cpu(skb->dev, skb); |
@@ -3189,7 +3189,7 @@ void netif_napi_del(struct napi_struct *napi) | |||
3189 | } | 3189 | } |
3190 | EXPORT_SYMBOL(netif_napi_del); | 3190 | EXPORT_SYMBOL(netif_napi_del); |
3191 | 3191 | ||
3192 | #ifdef CONFIG_SMP | 3192 | #ifdef CONFIG_RPS |
3193 | /* | 3193 | /* |
3194 | * net_rps_action sends any pending IPI's for rps. This is only called from | 3194 | * net_rps_action sends any pending IPI's for rps. This is only called from |
3195 | * softirq and interrupts must be enabled. | 3195 | * softirq and interrupts must be enabled. |
@@ -3214,7 +3214,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3214 | unsigned long time_limit = jiffies + 2; | 3214 | unsigned long time_limit = jiffies + 2; |
3215 | int budget = netdev_budget; | 3215 | int budget = netdev_budget; |
3216 | void *have; | 3216 | void *have; |
3217 | #ifdef CONFIG_SMP | 3217 | #ifdef CONFIG_RPS |
3218 | int select; | 3218 | int select; |
3219 | struct rps_remote_softirq_cpus *rcpus; | 3219 | struct rps_remote_softirq_cpus *rcpus; |
3220 | #endif | 3220 | #endif |
@@ -3280,7 +3280,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3280 | netpoll_poll_unlock(have); | 3280 | netpoll_poll_unlock(have); |
3281 | } | 3281 | } |
3282 | out: | 3282 | out: |
3283 | #ifdef CONFIG_SMP | 3283 | #ifdef CONFIG_RPS |
3284 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); | 3284 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); |
3285 | select = rcpus->select; | 3285 | select = rcpus->select; |
3286 | rcpus->select ^= 1; | 3286 | rcpus->select ^= 1; |
@@ -5277,6 +5277,7 @@ int register_netdevice(struct net_device *dev) | |||
5277 | 5277 | ||
5278 | dev->iflink = -1; | 5278 | dev->iflink = -1; |
5279 | 5279 | ||
5280 | #ifdef CONFIG_RPS | ||
5280 | if (!dev->num_rx_queues) { | 5281 | if (!dev->num_rx_queues) { |
5281 | /* | 5282 | /* |
5282 | * Allocate a single RX queue if driver never called | 5283 | * Allocate a single RX queue if driver never called |
@@ -5293,7 +5294,7 @@ int register_netdevice(struct net_device *dev) | |||
5293 | atomic_set(&dev->_rx->count, 1); | 5294 | atomic_set(&dev->_rx->count, 1); |
5294 | dev->num_rx_queues = 1; | 5295 | dev->num_rx_queues = 1; |
5295 | } | 5296 | } |
5296 | 5297 | #endif | |
5297 | /* Init, if this function is available */ | 5298 | /* Init, if this function is available */ |
5298 | if (dev->netdev_ops->ndo_init) { | 5299 | if (dev->netdev_ops->ndo_init) { |
5299 | ret = dev->netdev_ops->ndo_init(dev); | 5300 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5653,11 +5654,13 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5653 | void (*setup)(struct net_device *), unsigned int queue_count) | 5654 | void (*setup)(struct net_device *), unsigned int queue_count) |
5654 | { | 5655 | { |
5655 | struct netdev_queue *tx; | 5656 | struct netdev_queue *tx; |
5656 | struct netdev_rx_queue *rx; | ||
5657 | struct net_device *dev; | 5657 | struct net_device *dev; |
5658 | size_t alloc_size; | 5658 | size_t alloc_size; |
5659 | struct net_device *p; | 5659 | struct net_device *p; |
5660 | #ifdef CONFIG_RPS | ||
5661 | struct netdev_rx_queue *rx; | ||
5660 | int i; | 5662 | int i; |
5663 | #endif | ||
5661 | 5664 | ||
5662 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5665 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5663 | 5666 | ||
@@ -5683,6 +5686,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5683 | goto free_p; | 5686 | goto free_p; |
5684 | } | 5687 | } |
5685 | 5688 | ||
5689 | #ifdef CONFIG_RPS | ||
5686 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | 5690 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); |
5687 | if (!rx) { | 5691 | if (!rx) { |
5688 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | 5692 | printk(KERN_ERR "alloc_netdev: Unable to allocate " |
@@ -5698,6 +5702,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5698 | */ | 5702 | */ |
5699 | for (i = 0; i < queue_count; i++) | 5703 | for (i = 0; i < queue_count; i++) |
5700 | rx[i].first = rx; | 5704 | rx[i].first = rx; |
5705 | #endif | ||
5701 | 5706 | ||
5702 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5707 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5703 | dev->padded = (char *)dev - (char *)p; | 5708 | dev->padded = (char *)dev - (char *)p; |
@@ -5713,8 +5718,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5713 | dev->num_tx_queues = queue_count; | 5718 | dev->num_tx_queues = queue_count; |
5714 | dev->real_num_tx_queues = queue_count; | 5719 | dev->real_num_tx_queues = queue_count; |
5715 | 5720 | ||
5721 | #ifdef CONFIG_RPS | ||
5716 | dev->_rx = rx; | 5722 | dev->_rx = rx; |
5717 | dev->num_rx_queues = queue_count; | 5723 | dev->num_rx_queues = queue_count; |
5724 | #endif | ||
5718 | 5725 | ||
5719 | dev->gso_max_size = GSO_MAX_SIZE; | 5726 | dev->gso_max_size = GSO_MAX_SIZE; |
5720 | 5727 | ||
@@ -5731,8 +5738,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5731 | return dev; | 5738 | return dev; |
5732 | 5739 | ||
5733 | free_rx: | 5740 | free_rx: |
5741 | #ifdef CONFIG_RPS | ||
5734 | kfree(rx); | 5742 | kfree(rx); |
5735 | free_tx: | 5743 | free_tx: |
5744 | #endif | ||
5736 | kfree(tx); | 5745 | kfree(tx); |
5737 | free_p: | 5746 | free_p: |
5738 | kfree(p); | 5747 | kfree(p); |
@@ -6236,7 +6245,7 @@ static int __init net_dev_init(void) | |||
6236 | queue->completion_queue = NULL; | 6245 | queue->completion_queue = NULL; |
6237 | INIT_LIST_HEAD(&queue->poll_list); | 6246 | INIT_LIST_HEAD(&queue->poll_list); |
6238 | 6247 | ||
6239 | #ifdef CONFIG_SMP | 6248 | #ifdef CONFIG_RPS |
6240 | queue->csd.func = trigger_softirq; | 6249 | queue->csd.func = trigger_softirq; |
6241 | queue->csd.info = queue; | 6250 | queue->csd.info = queue; |
6242 | queue->csd.flags = 0; | 6251 | queue->csd.flags = 0; |