aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-05-23 21:47:05 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-07-02 15:33:23 -0400
commita83eff0a82a7f3f14fea477fd41e6c082e7fc96a (patch)
tree9fb65b061716c622962c42b8318f06fd0d2919cd
parentcf3a9c4842b1e097dbe0854933c471d43dd24f69 (diff)
rcu: Add tracing for _rcu_barrier()
This commit adds event tracing for _rcu_barrier() execution. This is defined only if RCU_TRACE=y. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r--include/trace/events/rcu.h45
-rw-r--r--kernel/rcutree.c29
2 files changed, 73 insertions, 1 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d274734b2aa4..5bde94d8585b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read,
541 __entry->rcutorturename, __entry->rhp) 541 __entry->rcutorturename, __entry->rhp)
542); 542);
543 543
544/*
545 * Tracepoint for _rcu_barrier() execution. The string "s" describes
546 * the _rcu_barrier phase:
547 * "Begin": rcu_barrier_callback() started.
548 * "Check": rcu_barrier_callback() checking for piggybacking.
549 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
550 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
551 * "Offline": rcu_barrier_callback() found offline CPU
552 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
553 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
554 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
555 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
556 * "LastCB": An rcu_barrier_callback() invoked the last callback.
557 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
558 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
559 * is the count of remaining callbacks, and "done" is the piggybacking count.
560 */
561TRACE_EVENT(rcu_barrier,
562
563 TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
564
565 TP_ARGS(rcuname, s, cpu, cnt, done),
566
567 TP_STRUCT__entry(
568 __field(char *, rcuname)
569 __field(char *, s)
570 __field(int, cpu)
571 __field(int, cnt)
572 __field(unsigned long, done)
573 ),
574
575 TP_fast_assign(
576 __entry->rcuname = rcuname;
577 __entry->s = s;
578 __entry->cpu = cpu;
579 __entry->cnt = cnt;
580 __entry->done = done;
581 ),
582
583 TP_printk("%s %s cpu %d remaining %d # %lu",
584 __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
585 __entry->done)
586);
587
544#else /* #ifdef CONFIG_RCU_TRACE */ 588#else /* #ifdef CONFIG_RCU_TRACE */
545 589
546#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 590#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read,
564#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ 608#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
565 do { } while (0) 609 do { } while (0)
566#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) 610#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
611#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
567 612
568#endif /* #else #ifdef CONFIG_RCU_TRACE */ 613#endif /* #else #ifdef CONFIG_RCU_TRACE */
569 614
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6bb5d562253f..dda43d826504 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2260,6 +2260,17 @@ static int rcu_cpu_has_callbacks(int cpu)
2260} 2260}
2261 2261
2262/* 2262/*
2263 * Helper function for _rcu_barrier() tracing. If tracing is disabled,
2264 * the compiler is expected to optimize this away.
2265 */
2266static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
2267 int cpu, unsigned long done)
2268{
2269 trace_rcu_barrier(rsp->name, s, cpu,
2270 atomic_read(&rsp->barrier_cpu_count), done);
2271}
2272
2273/*
2263 * RCU callback function for _rcu_barrier(). If we are last, wake 2274 * RCU callback function for _rcu_barrier(). If we are last, wake
2264 * up the task executing _rcu_barrier(). 2275 * up the task executing _rcu_barrier().
2265 */ 2276 */
@@ -2268,8 +2279,12 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
2268 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); 2279 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
2269 struct rcu_state *rsp = rdp->rsp; 2280 struct rcu_state *rsp = rdp->rsp;
2270 2281
2271 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) 2282 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
2283 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
2272 complete(&rsp->barrier_completion); 2284 complete(&rsp->barrier_completion);
2285 } else {
2286 _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
2287 }
2273} 2288}
2274 2289
2275/* 2290/*
@@ -2280,6 +2295,7 @@ static void rcu_barrier_func(void *type)
2280 struct rcu_state *rsp = type; 2295 struct rcu_state *rsp = type;
2281 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); 2296 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2282 2297
2298 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
2283 atomic_inc(&rsp->barrier_cpu_count); 2299 atomic_inc(&rsp->barrier_cpu_count);
2284 rsp->call(&rdp->barrier_head, rcu_barrier_callback); 2300 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
2285} 2301}
@@ -2298,6 +2314,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2298 unsigned long snap_done; 2314 unsigned long snap_done;
2299 2315
2300 init_rcu_head_on_stack(&rd.barrier_head); 2316 init_rcu_head_on_stack(&rd.barrier_head);
2317 _rcu_barrier_trace(rsp, "Begin", -1, snap);
2301 2318
2302 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 2319 /* Take mutex to serialize concurrent rcu_barrier() requests. */
2303 mutex_lock(&rsp->barrier_mutex); 2320 mutex_lock(&rsp->barrier_mutex);
@@ -2315,7 +2332,9 @@ static void _rcu_barrier(struct rcu_state *rsp)
2315 * value up to the next even number and adds two before comparing. 2332 * value up to the next even number and adds two before comparing.
2316 */ 2333 */
2317 snap_done = ACCESS_ONCE(rsp->n_barrier_done); 2334 snap_done = ACCESS_ONCE(rsp->n_barrier_done);
2335 _rcu_barrier_trace(rsp, "Check", -1, snap_done);
2318 if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { 2336 if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
2337 _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
2319 smp_mb(); /* caller's subsequent code after above check. */ 2338 smp_mb(); /* caller's subsequent code after above check. */
2320 mutex_unlock(&rsp->barrier_mutex); 2339 mutex_unlock(&rsp->barrier_mutex);
2321 return; 2340 return;
@@ -2328,6 +2347,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2328 */ 2347 */
2329 ACCESS_ONCE(rsp->n_barrier_done)++; 2348 ACCESS_ONCE(rsp->n_barrier_done)++;
2330 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); 2349 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
2350 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
2331 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ 2351 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
2332 2352
2333 /* 2353 /*
@@ -2364,13 +2384,19 @@ static void _rcu_barrier(struct rcu_state *rsp)
2364 preempt_disable(); 2384 preempt_disable();
2365 rdp = per_cpu_ptr(rsp->rda, cpu); 2385 rdp = per_cpu_ptr(rsp->rda, cpu);
2366 if (cpu_is_offline(cpu)) { 2386 if (cpu_is_offline(cpu)) {
2387 _rcu_barrier_trace(rsp, "Offline", cpu,
2388 rsp->n_barrier_done);
2367 preempt_enable(); 2389 preempt_enable();
2368 while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) 2390 while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
2369 schedule_timeout_interruptible(1); 2391 schedule_timeout_interruptible(1);
2370 } else if (ACCESS_ONCE(rdp->qlen)) { 2392 } else if (ACCESS_ONCE(rdp->qlen)) {
2393 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
2394 rsp->n_barrier_done);
2371 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); 2395 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
2372 preempt_enable(); 2396 preempt_enable();
2373 } else { 2397 } else {
2398 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
2399 rsp->n_barrier_done);
2374 preempt_enable(); 2400 preempt_enable();
2375 } 2401 }
2376 } 2402 }
@@ -2403,6 +2429,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
2403 smp_mb(); /* Keep increment after above mechanism. */ 2429 smp_mb(); /* Keep increment after above mechanism. */
2404 ACCESS_ONCE(rsp->n_barrier_done)++; 2430 ACCESS_ONCE(rsp->n_barrier_done)++;
2405 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); 2431 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
2432 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
2406 smp_mb(); /* Keep increment before caller's subsequent code. */ 2433 smp_mb(); /* Keep increment before caller's subsequent code. */
2407 2434
2408 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 2435 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */