diff options
-rw-r--r-- | include/linux/netdevice.h | 4 | ||||
-rw-r--r-- | net/core/dev.c | 149 |
2 files changed, 82 insertions, 71 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 83ab3da149ad..3c5ed5f5274e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1401,10 +1401,10 @@ struct softnet_data { | |||
1401 | struct napi_struct backlog; | 1401 | struct napi_struct backlog; |
1402 | }; | 1402 | }; |
1403 | 1403 | ||
1404 | static inline void incr_input_queue_head(struct softnet_data *queue) | 1404 | static inline void input_queue_head_incr(struct softnet_data *sd) |
1405 | { | 1405 | { |
1406 | #ifdef CONFIG_RPS | 1406 | #ifdef CONFIG_RPS |
1407 | queue->input_queue_head++; | 1407 | sd->input_queue_head++; |
1408 | #endif | 1408 | #endif |
1409 | } | 1409 | } |
1410 | 1410 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 05a2b294906b..7f5755b0a57c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -208,17 +208,17 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | |||
208 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; | 208 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline void rps_lock(struct softnet_data *queue) | 211 | static inline void rps_lock(struct softnet_data *sd) |
212 | { | 212 | { |
213 | #ifdef CONFIG_RPS | 213 | #ifdef CONFIG_RPS |
214 | spin_lock(&queue->input_pkt_queue.lock); | 214 | spin_lock(&sd->input_pkt_queue.lock); |
215 | #endif | 215 | #endif |
216 | } | 216 | } |
217 | 217 | ||
218 | static inline void rps_unlock(struct softnet_data *queue) | 218 | static inline void rps_unlock(struct softnet_data *sd) |
219 | { | 219 | { |
220 | #ifdef CONFIG_RPS | 220 | #ifdef CONFIG_RPS |
221 | spin_unlock(&queue->input_pkt_queue.lock); | 221 | spin_unlock(&sd->input_pkt_queue.lock); |
222 | #endif | 222 | #endif |
223 | } | 223 | } |
224 | 224 | ||
@@ -2346,63 +2346,74 @@ done: | |||
2346 | } | 2346 | } |
2347 | 2347 | ||
2348 | /* Called from hardirq (IPI) context */ | 2348 | /* Called from hardirq (IPI) context */ |
2349 | static void trigger_softirq(void *data) | 2349 | static void rps_trigger_softirq(void *data) |
2350 | { | 2350 | { |
2351 | struct softnet_data *queue = data; | 2351 | struct softnet_data *sd = data; |
2352 | __napi_schedule(&queue->backlog); | 2352 | |
2353 | __napi_schedule(&sd->backlog); | ||
2353 | __get_cpu_var(netdev_rx_stat).received_rps++; | 2354 | __get_cpu_var(netdev_rx_stat).received_rps++; |
2354 | } | 2355 | } |
2356 | |||
2355 | #endif /* CONFIG_RPS */ | 2357 | #endif /* CONFIG_RPS */ |
2356 | 2358 | ||
2357 | /* | 2359 | /* |
2360 | * Check if this softnet_data structure is another cpu one | ||
2361 | * If yes, queue it to our IPI list and return 1 | ||
2362 | * If no, return 0 | ||
2363 | */ | ||
2364 | static int rps_ipi_queued(struct softnet_data *sd) | ||
2365 | { | ||
2366 | #ifdef CONFIG_RPS | ||
2367 | struct softnet_data *mysd = &__get_cpu_var(softnet_data); | ||
2368 | |||
2369 | if (sd != mysd) { | ||
2370 | sd->rps_ipi_next = mysd->rps_ipi_list; | ||
2371 | mysd->rps_ipi_list = sd; | ||
2372 | |||
2373 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2374 | return 1; | ||
2375 | } | ||
2376 | #endif /* CONFIG_RPS */ | ||
2377 | return 0; | ||
2378 | } | ||
2379 | |||
2380 | /* | ||
2358 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | 2381 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog |
2359 | * queue (may be a remote CPU queue). | 2382 | * queue (may be a remote CPU queue). |
2360 | */ | 2383 | */ |
2361 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | 2384 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu, |
2362 | unsigned int *qtail) | 2385 | unsigned int *qtail) |
2363 | { | 2386 | { |
2364 | struct softnet_data *queue; | 2387 | struct softnet_data *sd; |
2365 | unsigned long flags; | 2388 | unsigned long flags; |
2366 | 2389 | ||
2367 | queue = &per_cpu(softnet_data, cpu); | 2390 | sd = &per_cpu(softnet_data, cpu); |
2368 | 2391 | ||
2369 | local_irq_save(flags); | 2392 | local_irq_save(flags); |
2370 | __get_cpu_var(netdev_rx_stat).total++; | 2393 | __get_cpu_var(netdev_rx_stat).total++; |
2371 | 2394 | ||
2372 | rps_lock(queue); | 2395 | rps_lock(sd); |
2373 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | 2396 | if (sd->input_pkt_queue.qlen <= netdev_max_backlog) { |
2374 | if (queue->input_pkt_queue.qlen) { | 2397 | if (sd->input_pkt_queue.qlen) { |
2375 | enqueue: | 2398 | enqueue: |
2376 | __skb_queue_tail(&queue->input_pkt_queue, skb); | 2399 | __skb_queue_tail(&sd->input_pkt_queue, skb); |
2377 | #ifdef CONFIG_RPS | 2400 | #ifdef CONFIG_RPS |
2378 | *qtail = queue->input_queue_head + | 2401 | *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen; |
2379 | queue->input_pkt_queue.qlen; | ||
2380 | #endif | 2402 | #endif |
2381 | rps_unlock(queue); | 2403 | rps_unlock(sd); |
2382 | local_irq_restore(flags); | 2404 | local_irq_restore(flags); |
2383 | return NET_RX_SUCCESS; | 2405 | return NET_RX_SUCCESS; |
2384 | } | 2406 | } |
2385 | 2407 | ||
2386 | /* Schedule NAPI for backlog device */ | 2408 | /* Schedule NAPI for backlog device */ |
2387 | if (napi_schedule_prep(&queue->backlog)) { | 2409 | if (napi_schedule_prep(&sd->backlog)) { |
2388 | #ifdef CONFIG_RPS | 2410 | if (!rps_ipi_queued(sd)) |
2389 | if (cpu != smp_processor_id()) { | 2411 | __napi_schedule(&sd->backlog); |
2390 | struct softnet_data *myqueue; | ||
2391 | |||
2392 | myqueue = &__get_cpu_var(softnet_data); | ||
2393 | queue->rps_ipi_next = myqueue->rps_ipi_list; | ||
2394 | myqueue->rps_ipi_list = queue; | ||
2395 | |||
2396 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2397 | goto enqueue; | ||
2398 | } | ||
2399 | #endif | ||
2400 | __napi_schedule(&queue->backlog); | ||
2401 | } | 2412 | } |
2402 | goto enqueue; | 2413 | goto enqueue; |
2403 | } | 2414 | } |
2404 | 2415 | ||
2405 | rps_unlock(queue); | 2416 | rps_unlock(sd); |
2406 | 2417 | ||
2407 | __get_cpu_var(netdev_rx_stat).dropped++; | 2418 | __get_cpu_var(netdev_rx_stat).dropped++; |
2408 | local_irq_restore(flags); | 2419 | local_irq_restore(flags); |
@@ -2903,17 +2914,17 @@ EXPORT_SYMBOL(netif_receive_skb); | |||
2903 | static void flush_backlog(void *arg) | 2914 | static void flush_backlog(void *arg) |
2904 | { | 2915 | { |
2905 | struct net_device *dev = arg; | 2916 | struct net_device *dev = arg; |
2906 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2917 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
2907 | struct sk_buff *skb, *tmp; | 2918 | struct sk_buff *skb, *tmp; |
2908 | 2919 | ||
2909 | rps_lock(queue); | 2920 | rps_lock(sd); |
2910 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | 2921 | skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) |
2911 | if (skb->dev == dev) { | 2922 | if (skb->dev == dev) { |
2912 | __skb_unlink(skb, &queue->input_pkt_queue); | 2923 | __skb_unlink(skb, &sd->input_pkt_queue); |
2913 | kfree_skb(skb); | 2924 | kfree_skb(skb); |
2914 | incr_input_queue_head(queue); | 2925 | input_queue_head_incr(sd); |
2915 | } | 2926 | } |
2916 | rps_unlock(queue); | 2927 | rps_unlock(sd); |
2917 | } | 2928 | } |
2918 | 2929 | ||
2919 | static int napi_gro_complete(struct sk_buff *skb) | 2930 | static int napi_gro_complete(struct sk_buff *skb) |
@@ -3219,23 +3230,23 @@ EXPORT_SYMBOL(napi_gro_frags); | |||
3219 | static int process_backlog(struct napi_struct *napi, int quota) | 3230 | static int process_backlog(struct napi_struct *napi, int quota) |
3220 | { | 3231 | { |
3221 | int work = 0; | 3232 | int work = 0; |
3222 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 3233 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
3223 | 3234 | ||
3224 | napi->weight = weight_p; | 3235 | napi->weight = weight_p; |
3225 | do { | 3236 | do { |
3226 | struct sk_buff *skb; | 3237 | struct sk_buff *skb; |
3227 | 3238 | ||
3228 | local_irq_disable(); | 3239 | local_irq_disable(); |
3229 | rps_lock(queue); | 3240 | rps_lock(sd); |
3230 | skb = __skb_dequeue(&queue->input_pkt_queue); | 3241 | skb = __skb_dequeue(&sd->input_pkt_queue); |
3231 | if (!skb) { | 3242 | if (!skb) { |
3232 | __napi_complete(napi); | 3243 | __napi_complete(napi); |
3233 | rps_unlock(queue); | 3244 | rps_unlock(sd); |
3234 | local_irq_enable(); | 3245 | local_irq_enable(); |
3235 | break; | 3246 | break; |
3236 | } | 3247 | } |
3237 | incr_input_queue_head(queue); | 3248 | input_queue_head_incr(sd); |
3238 | rps_unlock(queue); | 3249 | rps_unlock(sd); |
3239 | local_irq_enable(); | 3250 | local_irq_enable(); |
3240 | 3251 | ||
3241 | __netif_receive_skb(skb); | 3252 | __netif_receive_skb(skb); |
@@ -3331,24 +3342,25 @@ EXPORT_SYMBOL(netif_napi_del); | |||
3331 | * net_rps_action sends any pending IPI's for rps. | 3342 | * net_rps_action sends any pending IPI's for rps. |
3332 | * Note: called with local irq disabled, but exits with local irq enabled. | 3343 | * Note: called with local irq disabled, but exits with local irq enabled. |
3333 | */ | 3344 | */ |
3334 | static void net_rps_action(void) | 3345 | static void net_rps_action_and_irq_disable(void) |
3335 | { | 3346 | { |
3336 | #ifdef CONFIG_RPS | 3347 | #ifdef CONFIG_RPS |
3337 | struct softnet_data *locqueue = &__get_cpu_var(softnet_data); | 3348 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
3338 | struct softnet_data *remqueue = locqueue->rps_ipi_list; | 3349 | struct softnet_data *remsd = sd->rps_ipi_list; |
3339 | 3350 | ||
3340 | if (remqueue) { | 3351 | if (remsd) { |
3341 | locqueue->rps_ipi_list = NULL; | 3352 | sd->rps_ipi_list = NULL; |
3342 | 3353 | ||
3343 | local_irq_enable(); | 3354 | local_irq_enable(); |
3344 | 3355 | ||
3345 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | 3356 | /* Send pending IPI's to kick RPS processing on remote cpus. */ |
3346 | while (remqueue) { | 3357 | while (remsd) { |
3347 | struct softnet_data *next = remqueue->rps_ipi_next; | 3358 | struct softnet_data *next = remsd->rps_ipi_next; |
3348 | if (cpu_online(remqueue->cpu)) | 3359 | |
3349 | __smp_call_function_single(remqueue->cpu, | 3360 | if (cpu_online(remsd->cpu)) |
3350 | &remqueue->csd, 0); | 3361 | __smp_call_function_single(remsd->cpu, |
3351 | remqueue = next; | 3362 | &remsd->csd, 0); |
3363 | remsd = next; | ||
3352 | } | 3364 | } |
3353 | } else | 3365 | } else |
3354 | #endif | 3366 | #endif |
@@ -3423,7 +3435,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3423 | netpoll_poll_unlock(have); | 3435 | netpoll_poll_unlock(have); |
3424 | } | 3436 | } |
3425 | out: | 3437 | out: |
3426 | net_rps_action(); | 3438 | net_rps_action_and_irq_disable(); |
3427 | 3439 | ||
3428 | #ifdef CONFIG_NET_DMA | 3440 | #ifdef CONFIG_NET_DMA |
3429 | /* | 3441 | /* |
@@ -5595,7 +5607,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
5595 | /* Process offline CPU's input_pkt_queue */ | 5607 | /* Process offline CPU's input_pkt_queue */ |
5596 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { | 5608 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
5597 | netif_rx(skb); | 5609 | netif_rx(skb); |
5598 | incr_input_queue_head(oldsd); | 5610 | input_queue_head_incr(oldsd); |
5599 | } | 5611 | } |
5600 | 5612 | ||
5601 | return NOTIFY_OK; | 5613 | return NOTIFY_OK; |
@@ -5812,24 +5824,23 @@ static int __init net_dev_init(void) | |||
5812 | */ | 5824 | */ |
5813 | 5825 | ||
5814 | for_each_possible_cpu(i) { | 5826 | for_each_possible_cpu(i) { |
5815 | struct softnet_data *queue; | 5827 | struct softnet_data *sd = &per_cpu(softnet_data, i); |
5816 | 5828 | ||
5817 | queue = &per_cpu(softnet_data, i); | 5829 | skb_queue_head_init(&sd->input_pkt_queue); |
5818 | skb_queue_head_init(&queue->input_pkt_queue); | 5830 | sd->completion_queue = NULL; |
5819 | queue->completion_queue = NULL; | 5831 | INIT_LIST_HEAD(&sd->poll_list); |
5820 | INIT_LIST_HEAD(&queue->poll_list); | ||
5821 | 5832 | ||
5822 | #ifdef CONFIG_RPS | 5833 | #ifdef CONFIG_RPS |
5823 | queue->csd.func = trigger_softirq; | 5834 | sd->csd.func = rps_trigger_softirq; |
5824 | queue->csd.info = queue; | 5835 | sd->csd.info = sd; |
5825 | queue->csd.flags = 0; | 5836 | sd->csd.flags = 0; |
5826 | queue->cpu = i; | 5837 | sd->cpu = i; |
5827 | #endif | 5838 | #endif |
5828 | 5839 | ||
5829 | queue->backlog.poll = process_backlog; | 5840 | sd->backlog.poll = process_backlog; |
5830 | queue->backlog.weight = weight_p; | 5841 | sd->backlog.weight = weight_p; |
5831 | queue->backlog.gro_list = NULL; | 5842 | sd->backlog.gro_list = NULL; |
5832 | queue->backlog.gro_count = 0; | 5843 | sd->backlog.gro_count = 0; |
5833 | } | 5844 | } |
5834 | 5845 | ||
5835 | dev_boot_phase = 0; | 5846 | dev_boot_phase = 0; |