diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-23 21:47:05 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:33:23 -0400 |
commit | a83eff0a82a7f3f14fea477fd41e6c082e7fc96a (patch) | |
tree | 9fb65b061716c622962c42b8318f06fd0d2919cd /kernel/rcutree.c | |
parent | cf3a9c4842b1e097dbe0854933c471d43dd24f69 (diff) |
rcu: Add tracing for _rcu_barrier()
This commit adds event tracing for _rcu_barrier() execution. This
is defined only if RCU_TRACE=y.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 29 |
1 files changed, 28 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 6bb5d562253f..dda43d826504 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -2260,6 +2260,17 @@ static int rcu_cpu_has_callbacks(int cpu) | |||
2260 | } | 2260 | } |
2261 | 2261 | ||
2262 | /* | 2262 | /* |
2263 | * Helper function for _rcu_barrier() tracing. If tracing is disabled, | ||
2264 | * the compiler is expected to optimize this away. | ||
2265 | */ | ||
2266 | static void _rcu_barrier_trace(struct rcu_state *rsp, char *s, | ||
2267 | int cpu, unsigned long done) | ||
2268 | { | ||
2269 | trace_rcu_barrier(rsp->name, s, cpu, | ||
2270 | atomic_read(&rsp->barrier_cpu_count), done); | ||
2271 | } | ||
2272 | |||
2273 | /* | ||
2263 | * RCU callback function for _rcu_barrier(). If we are last, wake | 2274 | * RCU callback function for _rcu_barrier(). If we are last, wake |
2264 | * up the task executing _rcu_barrier(). | 2275 | * up the task executing _rcu_barrier(). |
2265 | */ | 2276 | */ |
@@ -2268,8 +2279,12 @@ static void rcu_barrier_callback(struct rcu_head *rhp) | |||
2268 | struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); | 2279 | struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); |
2269 | struct rcu_state *rsp = rdp->rsp; | 2280 | struct rcu_state *rsp = rdp->rsp; |
2270 | 2281 | ||
2271 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) | 2282 | if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { |
2283 | _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done); | ||
2272 | complete(&rsp->barrier_completion); | 2284 | complete(&rsp->barrier_completion); |
2285 | } else { | ||
2286 | _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done); | ||
2287 | } | ||
2273 | } | 2288 | } |
2274 | 2289 | ||
2275 | /* | 2290 | /* |
@@ -2280,6 +2295,7 @@ static void rcu_barrier_func(void *type) | |||
2280 | struct rcu_state *rsp = type; | 2295 | struct rcu_state *rsp = type; |
2281 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); | 2296 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); |
2282 | 2297 | ||
2298 | _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done); | ||
2283 | atomic_inc(&rsp->barrier_cpu_count); | 2299 | atomic_inc(&rsp->barrier_cpu_count); |
2284 | rsp->call(&rdp->barrier_head, rcu_barrier_callback); | 2300 | rsp->call(&rdp->barrier_head, rcu_barrier_callback); |
2285 | } | 2301 | } |
@@ -2298,6 +2314,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2298 | unsigned long snap_done; | 2314 | unsigned long snap_done; |
2299 | 2315 | ||
2300 | init_rcu_head_on_stack(&rd.barrier_head); | 2316 | init_rcu_head_on_stack(&rd.barrier_head); |
2317 | _rcu_barrier_trace(rsp, "Begin", -1, snap); | ||
2301 | 2318 | ||
2302 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | 2319 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ |
2303 | mutex_lock(&rsp->barrier_mutex); | 2320 | mutex_lock(&rsp->barrier_mutex); |
@@ -2315,7 +2332,9 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2315 | * value up to the next even number and adds two before comparing. | 2332 | * value up to the next even number and adds two before comparing. |
2316 | */ | 2333 | */ |
2317 | snap_done = ACCESS_ONCE(rsp->n_barrier_done); | 2334 | snap_done = ACCESS_ONCE(rsp->n_barrier_done); |
2335 | _rcu_barrier_trace(rsp, "Check", -1, snap_done); | ||
2318 | if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { | 2336 | if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { |
2337 | _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done); | ||
2319 | smp_mb(); /* caller's subsequent code after above check. */ | 2338 | smp_mb(); /* caller's subsequent code after above check. */ |
2320 | mutex_unlock(&rsp->barrier_mutex); | 2339 | mutex_unlock(&rsp->barrier_mutex); |
2321 | return; | 2340 | return; |
@@ -2328,6 +2347,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2328 | */ | 2347 | */ |
2329 | ACCESS_ONCE(rsp->n_barrier_done)++; | 2348 | ACCESS_ONCE(rsp->n_barrier_done)++; |
2330 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); | 2349 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); |
2350 | _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); | ||
2331 | smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ | 2351 | smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ |
2332 | 2352 | ||
2333 | /* | 2353 | /* |
@@ -2364,13 +2384,19 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2364 | preempt_disable(); | 2384 | preempt_disable(); |
2365 | rdp = per_cpu_ptr(rsp->rda, cpu); | 2385 | rdp = per_cpu_ptr(rsp->rda, cpu); |
2366 | if (cpu_is_offline(cpu)) { | 2386 | if (cpu_is_offline(cpu)) { |
2387 | _rcu_barrier_trace(rsp, "Offline", cpu, | ||
2388 | rsp->n_barrier_done); | ||
2367 | preempt_enable(); | 2389 | preempt_enable(); |
2368 | while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) | 2390 | while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) |
2369 | schedule_timeout_interruptible(1); | 2391 | schedule_timeout_interruptible(1); |
2370 | } else if (ACCESS_ONCE(rdp->qlen)) { | 2392 | } else if (ACCESS_ONCE(rdp->qlen)) { |
2393 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, | ||
2394 | rsp->n_barrier_done); | ||
2371 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); | 2395 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); |
2372 | preempt_enable(); | 2396 | preempt_enable(); |
2373 | } else { | 2397 | } else { |
2398 | _rcu_barrier_trace(rsp, "OnlineNQ", cpu, | ||
2399 | rsp->n_barrier_done); | ||
2374 | preempt_enable(); | 2400 | preempt_enable(); |
2375 | } | 2401 | } |
2376 | } | 2402 | } |
@@ -2403,6 +2429,7 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2403 | smp_mb(); /* Keep increment after above mechanism. */ | 2429 | smp_mb(); /* Keep increment after above mechanism. */ |
2404 | ACCESS_ONCE(rsp->n_barrier_done)++; | 2430 | ACCESS_ONCE(rsp->n_barrier_done)++; |
2405 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); | 2431 | WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); |
2432 | _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); | ||
2406 | smp_mb(); /* Keep increment before caller's subsequent code. */ | 2433 | smp_mb(); /* Keep increment before caller's subsequent code. */ |
2407 | 2434 | ||
2408 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ | 2435 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ |