diff options
-rw-r--r-- | include/linux/netdevice.h | 9 | ||||
-rw-r--r-- | net/core/dev.c | 79 |
2 files changed, 38 insertions, 50 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 649a0252686e..83ab3da149ad 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1381,17 +1381,20 @@ static inline int unregister_gifconf(unsigned int family) | |||
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | /* | 1383 | /* |
1384 | * Incoming packets are placed on per-cpu queues so that | 1384 | * Incoming packets are placed on per-cpu queues |
1385 | * no locking is needed. | ||
1386 | */ | 1385 | */ |
1387 | struct softnet_data { | 1386 | struct softnet_data { |
1388 | struct Qdisc *output_queue; | 1387 | struct Qdisc *output_queue; |
1389 | struct list_head poll_list; | 1388 | struct list_head poll_list; |
1390 | struct sk_buff *completion_queue; | 1389 | struct sk_buff *completion_queue; |
1391 | 1390 | ||
1392 | /* Elements below can be accessed between CPUs for RPS */ | ||
1393 | #ifdef CONFIG_RPS | 1391 | #ifdef CONFIG_RPS |
1392 | struct softnet_data *rps_ipi_list; | ||
1393 | |||
1394 | /* Elements below can be accessed between CPUs for RPS */ | ||
1394 | struct call_single_data csd ____cacheline_aligned_in_smp; | 1395 | struct call_single_data csd ____cacheline_aligned_in_smp; |
1396 | struct softnet_data *rps_ipi_next; | ||
1397 | unsigned int cpu; | ||
1395 | unsigned int input_queue_head; | 1398 | unsigned int input_queue_head; |
1396 | #endif | 1399 | #endif |
1397 | struct sk_buff_head input_pkt_queue; | 1400 | struct sk_buff_head input_pkt_queue; |
diff --git a/net/core/dev.c b/net/core/dev.c index 8eb50e2292fb..05a2b294906b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2345,21 +2345,6 @@ done: | |||
2345 | return cpu; | 2345 | return cpu; |
2346 | } | 2346 | } |
2347 | 2347 | ||
2348 | /* | ||
2349 | * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled | ||
2350 | * to be sent to kick remote softirq processing. There are two masks since | ||
2351 | * the sending of IPIs must be done with interrupts enabled. The select field | ||
2352 | * indicates the current mask that enqueue_backlog uses to schedule IPIs. | ||
2353 | * select is flipped before net_rps_action is called while still under lock, | ||
2354 | * net_rps_action then uses the non-selected mask to send the IPIs and clears | ||
2355 | * it without conflicting with enqueue_backlog operation. | ||
2356 | */ | ||
2357 | struct rps_remote_softirq_cpus { | ||
2358 | cpumask_t mask[2]; | ||
2359 | int select; | ||
2360 | }; | ||
2361 | static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus); | ||
2362 | |||
2363 | /* Called from hardirq (IPI) context */ | 2348 | /* Called from hardirq (IPI) context */ |
2364 | static void trigger_softirq(void *data) | 2349 | static void trigger_softirq(void *data) |
2365 | { | 2350 | { |
@@ -2402,10 +2387,12 @@ enqueue: | |||
2402 | if (napi_schedule_prep(&queue->backlog)) { | 2387 | if (napi_schedule_prep(&queue->backlog)) { |
2403 | #ifdef CONFIG_RPS | 2388 | #ifdef CONFIG_RPS |
2404 | if (cpu != smp_processor_id()) { | 2389 | if (cpu != smp_processor_id()) { |
2405 | struct rps_remote_softirq_cpus *rcpus = | 2390 | struct softnet_data *myqueue; |
2406 | &__get_cpu_var(rps_remote_softirq_cpus); | 2391 | |
2392 | myqueue = &__get_cpu_var(softnet_data); | ||
2393 | queue->rps_ipi_next = myqueue->rps_ipi_list; | ||
2394 | myqueue->rps_ipi_list = queue; | ||
2407 | 2395 | ||
2408 | cpu_set(cpu, rcpus->mask[rcpus->select]); | ||
2409 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 2396 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
2410 | goto enqueue; | 2397 | goto enqueue; |
2411 | } | 2398 | } |
@@ -2910,7 +2897,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2910 | } | 2897 | } |
2911 | EXPORT_SYMBOL(netif_receive_skb); | 2898 | EXPORT_SYMBOL(netif_receive_skb); |
2912 | 2899 | ||
2913 | /* Network device is going away, flush any packets still pending */ | 2900 | /* Network device is going away, flush any packets still pending |
2901 | * Called with irqs disabled. | ||
2902 | */ | ||
2914 | static void flush_backlog(void *arg) | 2903 | static void flush_backlog(void *arg) |
2915 | { | 2904 | { |
2916 | struct net_device *dev = arg; | 2905 | struct net_device *dev = arg; |
@@ -3338,24 +3327,33 @@ void netif_napi_del(struct napi_struct *napi) | |||
3338 | } | 3327 | } |
3339 | EXPORT_SYMBOL(netif_napi_del); | 3328 | EXPORT_SYMBOL(netif_napi_del); |
3340 | 3329 | ||
3341 | #ifdef CONFIG_RPS | ||
3342 | /* | 3330 | /* |
3343 | * net_rps_action sends any pending IPI's for rps. This is only called from | 3331 | * net_rps_action sends any pending IPI's for rps. |
3344 | * softirq and interrupts must be enabled. | 3332 | * Note: called with local irq disabled, but exits with local irq enabled. |
3345 | */ | 3333 | */ |
3346 | static void net_rps_action(cpumask_t *mask) | 3334 | static void net_rps_action(void) |
3347 | { | 3335 | { |
3348 | int cpu; | 3336 | #ifdef CONFIG_RPS |
3337 | struct softnet_data *locqueue = &__get_cpu_var(softnet_data); | ||
3338 | struct softnet_data *remqueue = locqueue->rps_ipi_list; | ||
3349 | 3339 | ||
3350 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | 3340 | if (remqueue) { |
3351 | for_each_cpu_mask_nr(cpu, *mask) { | 3341 | locqueue->rps_ipi_list = NULL; |
3352 | struct softnet_data *queue = &per_cpu(softnet_data, cpu); | 3342 | |
3353 | if (cpu_online(cpu)) | 3343 | local_irq_enable(); |
3354 | __smp_call_function_single(cpu, &queue->csd, 0); | 3344 | |
3355 | } | 3345 | /* Send pending IPI's to kick RPS processing on remote cpus. */ |
3356 | cpus_clear(*mask); | 3346 | while (remqueue) { |
3357 | } | 3347 | struct softnet_data *next = remqueue->rps_ipi_next; |
3348 | if (cpu_online(remqueue->cpu)) | ||
3349 | __smp_call_function_single(remqueue->cpu, | ||
3350 | &remqueue->csd, 0); | ||
3351 | remqueue = next; | ||
3352 | } | ||
3353 | } else | ||
3358 | #endif | 3354 | #endif |
3355 | local_irq_enable(); | ||
3356 | } | ||
3359 | 3357 | ||
3360 | static void net_rx_action(struct softirq_action *h) | 3358 | static void net_rx_action(struct softirq_action *h) |
3361 | { | 3359 | { |
@@ -3363,10 +3361,6 @@ static void net_rx_action(struct softirq_action *h) | |||
3363 | unsigned long time_limit = jiffies + 2; | 3361 | unsigned long time_limit = jiffies + 2; |
3364 | int budget = netdev_budget; | 3362 | int budget = netdev_budget; |
3365 | void *have; | 3363 | void *have; |
3366 | #ifdef CONFIG_RPS | ||
3367 | int select; | ||
3368 | struct rps_remote_softirq_cpus *rcpus; | ||
3369 | #endif | ||
3370 | 3364 | ||
3371 | local_irq_disable(); | 3365 | local_irq_disable(); |
3372 | 3366 | ||
@@ -3429,17 +3423,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3429 | netpoll_poll_unlock(have); | 3423 | netpoll_poll_unlock(have); |
3430 | } | 3424 | } |
3431 | out: | 3425 | out: |
3432 | #ifdef CONFIG_RPS | 3426 | net_rps_action(); |
3433 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); | ||
3434 | select = rcpus->select; | ||
3435 | rcpus->select ^= 1; | ||
3436 | |||
3437 | local_irq_enable(); | ||
3438 | |||
3439 | net_rps_action(&rcpus->mask[select]); | ||
3440 | #else | ||
3441 | local_irq_enable(); | ||
3442 | #endif | ||
3443 | 3427 | ||
3444 | #ifdef CONFIG_NET_DMA | 3428 | #ifdef CONFIG_NET_DMA |
3445 | /* | 3429 | /* |
@@ -5839,6 +5823,7 @@ static int __init net_dev_init(void) | |||
5839 | queue->csd.func = trigger_softirq; | 5823 | queue->csd.func = trigger_softirq; |
5840 | queue->csd.info = queue; | 5824 | queue->csd.info = queue; |
5841 | queue->csd.flags = 0; | 5825 | queue->csd.flags = 0; |
5826 | queue->cpu = i; | ||
5842 | #endif | 5827 | #endif |
5843 | 5828 | ||
5844 | queue->backlog.poll = process_backlog; | 5829 | queue->backlog.poll = process_backlog; |