diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-06-17 18:53:19 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:38:12 -0400 |
commit | 29c00b4a1d9e277786120032aa8364631820d863 (patch) | |
tree | 23bf0c8cdb5268ef92ef9ed007639705e922b1b0 /kernel/rcutree.c | |
parent | 9d68197c05201d8edc70d58bd1d5dad05d8455e8 (diff) |
rcu: Add event-tracing for RCU callback invocation
There was recently some controversy about the overhead of invoking RCU
callbacks. Add TRACE_EVENT()s to obtain fine-grained timings for the
start and stop of a batch of callbacks and also for each callback invoked.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index a7c6bce1af83..45dcc2036a1e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -52,6 +52,9 @@ | |||
52 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
53 | 53 | ||
54 | #include "rcutree.h" | 54 | #include "rcutree.h" |
55 | #include <trace/events/rcu.h> | ||
56 | |||
57 | #include "rcu.h" | ||
55 | 58 | ||
56 | /* Data structures. */ | 59 | /* Data structures. */ |
57 | 60 | ||
@@ -1190,17 +1193,22 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1190 | { | 1193 | { |
1191 | unsigned long flags; | 1194 | unsigned long flags; |
1192 | struct rcu_head *next, *list, **tail; | 1195 | struct rcu_head *next, *list, **tail; |
1193 | int count; | 1196 | int bl, count; |
1194 | 1197 | ||
1195 | /* If no callbacks are ready, just return.*/ | 1198 | /* If no callbacks are ready, just return.*/ |
1196 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) | 1199 | if (!cpu_has_callbacks_ready_to_invoke(rdp)) { |
1200 | trace_rcu_batch_start(0, 0); | ||
1201 | trace_rcu_batch_end(0); | ||
1197 | return; | 1202 | return; |
1203 | } | ||
1198 | 1204 | ||
1199 | /* | 1205 | /* |
1200 | * Extract the list of ready callbacks, disabling to prevent | 1206 | * Extract the list of ready callbacks, disabling to prevent |
1201 | * races with call_rcu() from interrupt handlers. | 1207 | * races with call_rcu() from interrupt handlers. |
1202 | */ | 1208 | */ |
1203 | local_irq_save(flags); | 1209 | local_irq_save(flags); |
1210 | bl = rdp->blimit; | ||
1211 | trace_rcu_batch_start(rdp->qlen, bl); | ||
1204 | list = rdp->nxtlist; | 1212 | list = rdp->nxtlist; |
1205 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; | 1213 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; |
1206 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; | 1214 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; |
@@ -1218,11 +1226,12 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1218 | debug_rcu_head_unqueue(list); | 1226 | debug_rcu_head_unqueue(list); |
1219 | __rcu_reclaim(list); | 1227 | __rcu_reclaim(list); |
1220 | list = next; | 1228 | list = next; |
1221 | if (++count >= rdp->blimit) | 1229 | if (++count >= bl) |
1222 | break; | 1230 | break; |
1223 | } | 1231 | } |
1224 | 1232 | ||
1225 | local_irq_save(flags); | 1233 | local_irq_save(flags); |
1234 | trace_rcu_batch_end(count); | ||
1226 | 1235 | ||
1227 | /* Update count, and requeue any remaining callbacks. */ | 1236 | /* Update count, and requeue any remaining callbacks. */ |
1228 | rdp->qlen -= count; | 1237 | rdp->qlen -= count; |