aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-04-22 03:22:45 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-22 03:22:45 -0400
commite326bed2f47d0365da5a8faaf8ee93ed2d86325b (patch)
tree46ee31550c49efa4c06c857b039ab6fdabf08a9c /net
parentde498c89254b5b89f676e9c9292396d5ebf52bf2 (diff)
rps: immediate send IPI in process_backlog()
If some skb are queued to our backlog, we are delaying IPI sending at the end of net_rx_action(), increasing latencies. This defeats the queueing, since we want to quickly dispatch packets to the pool of worker cpus, then eventually deeply process our packets. It's better to send IPI before processing our packets in upper layers, from process_backlog(). Change the _and_disable_irq suffix to _and_enable_irq(), since we enable local irq in net_rps_action(), sorry for the confusion. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c76
1 files changed, 42 insertions, 34 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 9bf1cccb067e..3ba774b6091c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3242,11 +3242,48 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
3242} 3242}
3243EXPORT_SYMBOL(napi_gro_frags); 3243EXPORT_SYMBOL(napi_gro_frags);
3244 3244
3245/*
3246 * net_rps_action sends any pending IPI's for rps.
3247 * Note: called with local irq disabled, but exits with local irq enabled.
3248 */
3249static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3250{
3251#ifdef CONFIG_RPS
3252 struct softnet_data *remsd = sd->rps_ipi_list;
3253
3254 if (remsd) {
3255 sd->rps_ipi_list = NULL;
3256
3257 local_irq_enable();
3258
3259 /* Send pending IPI's to kick RPS processing on remote cpus. */
3260 while (remsd) {
3261 struct softnet_data *next = remsd->rps_ipi_next;
3262
3263 if (cpu_online(remsd->cpu))
3264 __smp_call_function_single(remsd->cpu,
3265 &remsd->csd, 0);
3266 remsd = next;
3267 }
3268 } else
3269#endif
3270 local_irq_enable();
3271}
3272
3245static int process_backlog(struct napi_struct *napi, int quota) 3273static int process_backlog(struct napi_struct *napi, int quota)
3246{ 3274{
3247 int work = 0; 3275 int work = 0;
3248 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3276 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3249 3277
3278#ifdef CONFIG_RPS
3279 /* Check if we have pending ipi, its better to send them now,
3280 * not waiting net_rx_action() end.
3281 */
3282 if (sd->rps_ipi_list) {
3283 local_irq_disable();
3284 net_rps_action_and_irq_enable(sd);
3285 }
3286#endif
3250 napi->weight = weight_p; 3287 napi->weight = weight_p;
3251 do { 3288 do {
3252 struct sk_buff *skb; 3289 struct sk_buff *skb;
@@ -3353,45 +3390,16 @@ void netif_napi_del(struct napi_struct *napi)
3353} 3390}
3354EXPORT_SYMBOL(netif_napi_del); 3391EXPORT_SYMBOL(netif_napi_del);
3355 3392
3356/*
3357 * net_rps_action sends any pending IPI's for rps.
3358 * Note: called with local irq disabled, but exits with local irq enabled.
3359 */
3360static void net_rps_action_and_irq_disable(void)
3361{
3362#ifdef CONFIG_RPS
3363 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3364 struct softnet_data *remsd = sd->rps_ipi_list;
3365
3366 if (remsd) {
3367 sd->rps_ipi_list = NULL;
3368
3369 local_irq_enable();
3370
3371 /* Send pending IPI's to kick RPS processing on remote cpus. */
3372 while (remsd) {
3373 struct softnet_data *next = remsd->rps_ipi_next;
3374
3375 if (cpu_online(remsd->cpu))
3376 __smp_call_function_single(remsd->cpu,
3377 &remsd->csd, 0);
3378 remsd = next;
3379 }
3380 } else
3381#endif
3382 local_irq_enable();
3383}
3384
3385static void net_rx_action(struct softirq_action *h) 3393static void net_rx_action(struct softirq_action *h)
3386{ 3394{
3387 struct list_head *list = &__get_cpu_var(softnet_data).poll_list; 3395 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3388 unsigned long time_limit = jiffies + 2; 3396 unsigned long time_limit = jiffies + 2;
3389 int budget = netdev_budget; 3397 int budget = netdev_budget;
3390 void *have; 3398 void *have;
3391 3399
3392 local_irq_disable(); 3400 local_irq_disable();
3393 3401
3394 while (!list_empty(list)) { 3402 while (!list_empty(&sd->poll_list)) {
3395 struct napi_struct *n; 3403 struct napi_struct *n;
3396 int work, weight; 3404 int work, weight;
3397 3405
@@ -3409,7 +3417,7 @@ static void net_rx_action(struct softirq_action *h)
3409 * entries to the tail of this list, and only ->poll() 3417 * entries to the tail of this list, and only ->poll()
3410 * calls can remove this head entry from the list. 3418 * calls can remove this head entry from the list.
3411 */ 3419 */
3412 n = list_first_entry(list, struct napi_struct, poll_list); 3420 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3413 3421
3414 have = netpoll_poll_lock(n); 3422 have = netpoll_poll_lock(n);
3415 3423
@@ -3444,13 +3452,13 @@ static void net_rx_action(struct softirq_action *h)
3444 napi_complete(n); 3452 napi_complete(n);
3445 local_irq_disable(); 3453 local_irq_disable();
3446 } else 3454 } else
3447 list_move_tail(&n->poll_list, list); 3455 list_move_tail(&n->poll_list, &sd->poll_list);
3448 } 3456 }
3449 3457
3450 netpoll_poll_unlock(have); 3458 netpoll_poll_unlock(have);
3451 } 3459 }
3452out: 3460out:
3453 net_rps_action_and_irq_disable(); 3461 net_rps_action_and_irq_enable(sd);
3454 3462
3455#ifdef CONFIG_NET_DMA 3463#ifdef CONFIG_NET_DMA
3456 /* 3464 /*