aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-05-06 19:51:21 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-17 20:18:50 -0400
commitebda37c27d0c768947e9b058332d7ea798210cf8 (patch)
tree1c34bd9f9c2a87dcd150ad1fcc46a3adc6bb7ca2 /net/core
parent3f78d1f210ff89af77f042ab7f4a8fee39feb1c9 (diff)
rps: avoid one atomic in enqueue_to_backlog
If CONFIG_SMP=y, then we own a queue spinlock, we can avoid the atomic test_and_set_bit() from napi_schedule_prep(). We now have same number of atomic ops per netif_rx() calls than with pre-RPS kernel. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 988e42912e72..cdcb9cbedf41 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2432,8 +2432,10 @@ enqueue:
2432 return NET_RX_SUCCESS; 2432 return NET_RX_SUCCESS;
2433 } 2433 }
2434 2434
2435 /* Schedule NAPI for backlog device */ 2435 /* Schedule NAPI for backlog device
2436 if (napi_schedule_prep(&sd->backlog)) { 2436 * We can use non atomic operation since we own the queue lock
2437 */
2438 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2437 if (!rps_ipi_queued(sd)) 2439 if (!rps_ipi_queued(sd))
2438 ____napi_schedule(sd, &sd->backlog); 2440 ____napi_schedule(sd, &sd->backlog);
2439 } 2441 }