aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-10-11 19:18:09 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-11-08 14:50:13 -0500
commita30489c5228fba6f16b4c740a0292879ef13371e (patch)
tree2a28a2d6180f8315a8c1cee12cdb415a90b167f0
parent40694d6644d5cca28531707559466122eb212d8b (diff)
rcu: Instrument synchronize_rcu_expedited() for debugfs tracing
This commit adds the counters to rcu_state and updates them in synchronize_rcu_expedited() to provide the data needed for debugfs tracing. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree.c18
-rw-r--r--kernel/rcutree.h9
2 files changed, 24 insertions, 3 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 3c72e5e5528c..b966d56ebb51 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2321,6 +2321,7 @@ void synchronize_sched_expedited(void)
2321 (ulong)atomic_long_read(&rsp->expedited_done) + 2321 (ulong)atomic_long_read(&rsp->expedited_done) +
2322 ULONG_MAX / 8)) { 2322 ULONG_MAX / 8)) {
2323 synchronize_sched(); 2323 synchronize_sched();
2324 atomic_long_inc(&rsp->expedited_wrap);
2324 return; 2325 return;
2325 } 2326 }
2326 2327
@@ -2341,11 +2342,14 @@ void synchronize_sched_expedited(void)
2341 synchronize_sched_expedited_cpu_stop, 2342 synchronize_sched_expedited_cpu_stop,
2342 NULL) == -EAGAIN) { 2343 NULL) == -EAGAIN) {
2343 put_online_cpus(); 2344 put_online_cpus();
2345 atomic_long_inc(&rsp->expedited_tryfail);
2344 2346
2345 /* Check to see if someone else did our work for us. */ 2347 /* Check to see if someone else did our work for us. */
2346 s = atomic_long_read(&rsp->expedited_done); 2348 s = atomic_long_read(&rsp->expedited_done);
2347 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2349 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2348 smp_mb(); /* ensure test happens before caller kfree */ 2350 /* ensure test happens before caller kfree */
2351 smp_mb__before_atomic_inc(); /* ^^^ */
2352 atomic_long_inc(&rsp->expedited_workdone1);
2349 return; 2353 return;
2350 } 2354 }
2351 2355
@@ -2354,13 +2358,16 @@ void synchronize_sched_expedited(void)
2354 udelay(trycount * num_online_cpus()); 2358 udelay(trycount * num_online_cpus());
2355 } else { 2359 } else {
2356 synchronize_sched(); 2360 synchronize_sched();
2361 atomic_long_inc(&rsp->expedited_normal);
2357 return; 2362 return;
2358 } 2363 }
2359 2364
2360 /* Recheck to see if someone else did our work for us. */ 2365 /* Recheck to see if someone else did our work for us. */
2361 s = atomic_long_read(&rsp->expedited_done); 2366 s = atomic_long_read(&rsp->expedited_done);
2362 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2367 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2363 smp_mb(); /* ensure test happens before caller kfree */ 2368 /* ensure test happens before caller kfree */
2369 smp_mb__before_atomic_inc(); /* ^^^ */
2370 atomic_long_inc(&rsp->expedited_workdone2);
2364 return; 2371 return;
2365 } 2372 }
2366 2373
@@ -2375,6 +2382,7 @@ void synchronize_sched_expedited(void)
2375 snap = atomic_long_read(&rsp->expedited_start); 2382 snap = atomic_long_read(&rsp->expedited_start);
2376 smp_mb(); /* ensure read is before try_stop_cpus(). */ 2383 smp_mb(); /* ensure read is before try_stop_cpus(). */
2377 } 2384 }
2385 atomic_long_inc(&rsp->expedited_stoppedcpus);
2378 2386
2379 /* 2387 /*
2380 * Everyone up to our most recent fetch is covered by our grace 2388 * Everyone up to our most recent fetch is covered by our grace
@@ -2383,12 +2391,16 @@ void synchronize_sched_expedited(void)
2383 * than we did already did their update. 2391 * than we did already did their update.
2384 */ 2392 */
2385 do { 2393 do {
2394 atomic_long_inc(&rsp->expedited_done_tries);
2386 s = atomic_long_read(&rsp->expedited_done); 2395 s = atomic_long_read(&rsp->expedited_done);
2387 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { 2396 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2388 smp_mb(); /* ensure test happens before caller kfree */ 2397 /* ensure test happens before caller kfree */
2398 smp_mb__before_atomic_inc(); /* ^^^ */
2399 atomic_long_inc(&rsp->expedited_done_lost);
2389 break; 2400 break;
2390 } 2401 }
2391 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); 2402 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
2403 atomic_long_inc(&rsp->expedited_done_exit);
2392 2404
2393 put_online_cpus(); 2405 put_online_cpus();
2394} 2406}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 88f3d9d5971d..d274af357210 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -406,6 +406,15 @@ struct rcu_state {
406 406
407 atomic_long_t expedited_start; /* Starting ticket. */ 407 atomic_long_t expedited_start; /* Starting ticket. */
408 atomic_long_t expedited_done; /* Done ticket. */ 408 atomic_long_t expedited_done; /* Done ticket. */
409 atomic_long_t expedited_wrap; /* # near-wrap incidents. */
410 atomic_long_t expedited_tryfail; /* # acquisition failures. */
411 atomic_long_t expedited_workdone1; /* # done by others #1. */
412 atomic_long_t expedited_workdone2; /* # done by others #2. */
413 atomic_long_t expedited_normal; /* # fallbacks to normal. */
414 atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
415 atomic_long_t expedited_done_tries; /* # tries to update _done. */
416 atomic_long_t expedited_done_lost; /* # times beaten to _done. */
417 atomic_long_t expedited_done_exit; /* # times exited _done loop. */
409 418
410 unsigned long jiffies_force_qs; /* Time at which to invoke */ 419 unsigned long jiffies_force_qs; /* Time at which to invoke */
411 /* force_quiescent_state(). */ 420 /* force_quiescent_state(). */