diff options
author | Tom Herbert <therbert@google.com> | 2010-03-18 20:45:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-18 20:45:44 -0400 |
commit | 1e94d72feab025b8f7c55d07020602f82f3a97dd (patch) | |
tree | f8d38b4fa5b51046e1bbcaf8f3fc88edc2052cae /net/core/dev.c | |
parent | a034016287236f435dbb8f1f57aee906f22b4598 (diff) |
rps: Fixed build with CONFIG_SMP not enabled.
Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 17b168671501..1a7e1d1d5ad9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2174,6 +2174,7 @@ int weight_p __read_mostly = 64; /* old backlog weight */ | |||
2174 | 2174 | ||
2175 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2175 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; |
2176 | 2176 | ||
2177 | #ifdef CONFIG_SMP | ||
2177 | /* | 2178 | /* |
2178 | * get_rps_cpu is called from netif_receive_skb and returns the target | 2179 | * get_rps_cpu is called from netif_receive_skb and returns the target |
2179 | * CPU from the RPS map of the receiving queue for a given skb. | 2180 | * CPU from the RPS map of the receiving queue for a given skb. |
@@ -2293,6 +2294,7 @@ static void trigger_softirq(void *data) | |||
2293 | __napi_schedule(&queue->backlog); | 2294 | __napi_schedule(&queue->backlog); |
2294 | __get_cpu_var(netdev_rx_stat).received_rps++; | 2295 | __get_cpu_var(netdev_rx_stat).received_rps++; |
2295 | } | 2296 | } |
2297 | #endif /* CONFIG_SMP */ | ||
2296 | 2298 | ||
2297 | /* | 2299 | /* |
2298 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | 2300 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog |
@@ -2320,6 +2322,7 @@ enqueue: | |||
2320 | 2322 | ||
2321 | /* Schedule NAPI for backlog device */ | 2323 | /* Schedule NAPI for backlog device */ |
2322 | if (napi_schedule_prep(&queue->backlog)) { | 2324 | if (napi_schedule_prep(&queue->backlog)) { |
2325 | #ifdef CONFIG_SMP | ||
2323 | if (cpu != smp_processor_id()) { | 2326 | if (cpu != smp_processor_id()) { |
2324 | struct rps_remote_softirq_cpus *rcpus = | 2327 | struct rps_remote_softirq_cpus *rcpus = |
2325 | &__get_cpu_var(rps_remote_softirq_cpus); | 2328 | &__get_cpu_var(rps_remote_softirq_cpus); |
@@ -2328,6 +2331,9 @@ enqueue: | |||
2328 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 2331 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
2329 | } else | 2332 | } else |
2330 | __napi_schedule(&queue->backlog); | 2333 | __napi_schedule(&queue->backlog); |
2334 | #else | ||
2335 | __napi_schedule(&queue->backlog); | ||
2336 | #endif | ||
2331 | } | 2337 | } |
2332 | goto enqueue; | 2338 | goto enqueue; |
2333 | } | 2339 | } |
@@ -2367,9 +2373,13 @@ int netif_rx(struct sk_buff *skb) | |||
2367 | if (!skb->tstamp.tv64) | 2373 | if (!skb->tstamp.tv64) |
2368 | net_timestamp(skb); | 2374 | net_timestamp(skb); |
2369 | 2375 | ||
2376 | #ifdef CONFIG_SMP | ||
2370 | cpu = get_rps_cpu(skb->dev, skb); | 2377 | cpu = get_rps_cpu(skb->dev, skb); |
2371 | if (cpu < 0) | 2378 | if (cpu < 0) |
2372 | cpu = smp_processor_id(); | 2379 | cpu = smp_processor_id(); |
2380 | #else | ||
2381 | cpu = smp_processor_id(); | ||
2382 | #endif | ||
2373 | 2383 | ||
2374 | return enqueue_to_backlog(skb, cpu); | 2384 | return enqueue_to_backlog(skb, cpu); |
2375 | } | 2385 | } |
@@ -2735,6 +2745,7 @@ out: | |||
2735 | */ | 2745 | */ |
2736 | int netif_receive_skb(struct sk_buff *skb) | 2746 | int netif_receive_skb(struct sk_buff *skb) |
2737 | { | 2747 | { |
2748 | #ifdef CONFIG_SMP | ||
2738 | int cpu; | 2749 | int cpu; |
2739 | 2750 | ||
2740 | cpu = get_rps_cpu(skb->dev, skb); | 2751 | cpu = get_rps_cpu(skb->dev, skb); |
@@ -2743,6 +2754,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2743 | return __netif_receive_skb(skb); | 2754 | return __netif_receive_skb(skb); |
2744 | else | 2755 | else |
2745 | return enqueue_to_backlog(skb, cpu); | 2756 | return enqueue_to_backlog(skb, cpu); |
2757 | #else | ||
2758 | return __netif_receive_skb(skb); | ||
2759 | #endif | ||
2746 | } | 2760 | } |
2747 | EXPORT_SYMBOL(netif_receive_skb); | 2761 | EXPORT_SYMBOL(netif_receive_skb); |
2748 | 2762 | ||
@@ -3168,6 +3182,7 @@ void netif_napi_del(struct napi_struct *napi) | |||
3168 | } | 3182 | } |
3169 | EXPORT_SYMBOL(netif_napi_del); | 3183 | EXPORT_SYMBOL(netif_napi_del); |
3170 | 3184 | ||
3185 | #ifdef CONFIG_SMP | ||
3171 | /* | 3186 | /* |
3172 | * net_rps_action sends any pending IPI's for rps. This is only called from | 3187 | * net_rps_action sends any pending IPI's for rps. This is only called from |
3173 | * softirq and interrupts must be enabled. | 3188 | * softirq and interrupts must be enabled. |
@@ -3184,6 +3199,7 @@ static void net_rps_action(cpumask_t *mask) | |||
3184 | } | 3199 | } |
3185 | cpus_clear(*mask); | 3200 | cpus_clear(*mask); |
3186 | } | 3201 | } |
3202 | #endif | ||
3187 | 3203 | ||
3188 | static void net_rx_action(struct softirq_action *h) | 3204 | static void net_rx_action(struct softirq_action *h) |
3189 | { | 3205 | { |
@@ -3191,8 +3207,10 @@ static void net_rx_action(struct softirq_action *h) | |||
3191 | unsigned long time_limit = jiffies + 2; | 3207 | unsigned long time_limit = jiffies + 2; |
3192 | int budget = netdev_budget; | 3208 | int budget = netdev_budget; |
3193 | void *have; | 3209 | void *have; |
3210 | #ifdef CONFIG_SMP | ||
3194 | int select; | 3211 | int select; |
3195 | struct rps_remote_softirq_cpus *rcpus; | 3212 | struct rps_remote_softirq_cpus *rcpus; |
3213 | #endif | ||
3196 | 3214 | ||
3197 | local_irq_disable(); | 3215 | local_irq_disable(); |
3198 | 3216 | ||
@@ -3255,6 +3273,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3255 | netpoll_poll_unlock(have); | 3273 | netpoll_poll_unlock(have); |
3256 | } | 3274 | } |
3257 | out: | 3275 | out: |
3276 | #ifdef CONFIG_SMP | ||
3258 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); | 3277 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); |
3259 | select = rcpus->select; | 3278 | select = rcpus->select; |
3260 | rcpus->select ^= 1; | 3279 | rcpus->select ^= 1; |
@@ -3262,6 +3281,9 @@ out: | |||
3262 | local_irq_enable(); | 3281 | local_irq_enable(); |
3263 | 3282 | ||
3264 | net_rps_action(&rcpus->mask[select]); | 3283 | net_rps_action(&rcpus->mask[select]); |
3284 | #else | ||
3285 | local_irq_enable(); | ||
3286 | #endif | ||
3265 | 3287 | ||
3266 | #ifdef CONFIG_NET_DMA | 3288 | #ifdef CONFIG_NET_DMA |
3267 | /* | 3289 | /* |
@@ -6204,9 +6226,11 @@ static int __init net_dev_init(void) | |||
6204 | queue->completion_queue = NULL; | 6226 | queue->completion_queue = NULL; |
6205 | INIT_LIST_HEAD(&queue->poll_list); | 6227 | INIT_LIST_HEAD(&queue->poll_list); |
6206 | 6228 | ||
6229 | #ifdef CONFIG_SMP | ||
6207 | queue->csd.func = trigger_softirq; | 6230 | queue->csd.func = trigger_softirq; |
6208 | queue->csd.info = queue; | 6231 | queue->csd.info = queue; |
6209 | queue->csd.flags = 0; | 6232 | queue->csd.flags = 0; |
6233 | #endif | ||
6210 | 6234 | ||
6211 | queue->backlog.poll = process_backlog; | 6235 | queue->backlog.poll = process_backlog; |
6212 | queue->backlog.weight = weight_p; | 6236 | queue->backlog.weight = weight_p; |