diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-11-21 20:10:16 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-01-10 22:08:02 -0500 |
commit | 917963d0b30f9c4153c372c165178501d97b6b55 (patch) | |
tree | e7d7d5f6000521527ba8e094eb176ab8936b1e3f /kernel/rcu | |
parent | f9103c390257d06c162d9e3c2a90d2bdedadfe17 (diff) |
rcutorture: Check from beginning to end of grace period
Currently, rcutorture's Reader Batch checks measure from the end of
the previous grace period to the end of the current one. This commit
tightens up these checks by measuring from the start and end of the same
grace period. This involves adding rcu_batches_started() and friends
corresponding to the existing rcu_batches_completed() and friends.
We leave SRCU alone for the moment, as it does not yet have a way of
tracking both ends of its grace periods.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/rcutorture.c | 37 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 40 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 28 |
3 files changed, 65 insertions, 40 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index aadbc072ccf4..24142c200901 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c | |||
@@ -244,6 +244,7 @@ struct rcu_torture_ops { | |||
244 | int (*readlock)(void); | 244 | int (*readlock)(void); |
245 | void (*read_delay)(struct torture_random_state *rrsp); | 245 | void (*read_delay)(struct torture_random_state *rrsp); |
246 | void (*readunlock)(int idx); | 246 | void (*readunlock)(int idx); |
247 | unsigned long (*started)(void); | ||
247 | unsigned long (*completed)(void); | 248 | unsigned long (*completed)(void); |
248 | void (*deferred_free)(struct rcu_torture *p); | 249 | void (*deferred_free)(struct rcu_torture *p); |
249 | void (*sync)(void); | 250 | void (*sync)(void); |
@@ -372,6 +373,7 @@ static struct rcu_torture_ops rcu_ops = { | |||
372 | .readlock = rcu_torture_read_lock, | 373 | .readlock = rcu_torture_read_lock, |
373 | .read_delay = rcu_read_delay, | 374 | .read_delay = rcu_read_delay, |
374 | .readunlock = rcu_torture_read_unlock, | 375 | .readunlock = rcu_torture_read_unlock, |
376 | .started = rcu_batches_started, | ||
375 | .completed = rcu_batches_completed, | 377 | .completed = rcu_batches_completed, |
376 | .deferred_free = rcu_torture_deferred_free, | 378 | .deferred_free = rcu_torture_deferred_free, |
377 | .sync = synchronize_rcu, | 379 | .sync = synchronize_rcu, |
@@ -413,6 +415,7 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
413 | .readlock = rcu_bh_torture_read_lock, | 415 | .readlock = rcu_bh_torture_read_lock, |
414 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 416 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
415 | .readunlock = rcu_bh_torture_read_unlock, | 417 | .readunlock = rcu_bh_torture_read_unlock, |
418 | .started = rcu_batches_started_bh, | ||
416 | .completed = rcu_batches_completed_bh, | 419 | .completed = rcu_batches_completed_bh, |
417 | .deferred_free = rcu_bh_torture_deferred_free, | 420 | .deferred_free = rcu_bh_torture_deferred_free, |
418 | .sync = synchronize_rcu_bh, | 421 | .sync = synchronize_rcu_bh, |
@@ -456,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = { | |||
456 | .readlock = rcu_torture_read_lock, | 459 | .readlock = rcu_torture_read_lock, |
457 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 460 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
458 | .readunlock = rcu_torture_read_unlock, | 461 | .readunlock = rcu_torture_read_unlock, |
462 | .started = rcu_no_completed, | ||
459 | .completed = rcu_no_completed, | 463 | .completed = rcu_no_completed, |
460 | .deferred_free = rcu_busted_torture_deferred_free, | 464 | .deferred_free = rcu_busted_torture_deferred_free, |
461 | .sync = synchronize_rcu_busted, | 465 | .sync = synchronize_rcu_busted, |
@@ -554,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = { | |||
554 | .readlock = srcu_torture_read_lock, | 558 | .readlock = srcu_torture_read_lock, |
555 | .read_delay = srcu_read_delay, | 559 | .read_delay = srcu_read_delay, |
556 | .readunlock = srcu_torture_read_unlock, | 560 | .readunlock = srcu_torture_read_unlock, |
561 | .started = NULL, | ||
557 | .completed = srcu_torture_completed, | 562 | .completed = srcu_torture_completed, |
558 | .deferred_free = srcu_torture_deferred_free, | 563 | .deferred_free = srcu_torture_deferred_free, |
559 | .sync = srcu_torture_synchronize, | 564 | .sync = srcu_torture_synchronize, |
@@ -590,6 +595,7 @@ static struct rcu_torture_ops sched_ops = { | |||
590 | .readlock = sched_torture_read_lock, | 595 | .readlock = sched_torture_read_lock, |
591 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 596 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
592 | .readunlock = sched_torture_read_unlock, | 597 | .readunlock = sched_torture_read_unlock, |
598 | .started = rcu_batches_started_sched, | ||
593 | .completed = rcu_batches_completed_sched, | 599 | .completed = rcu_batches_completed_sched, |
594 | .deferred_free = rcu_sched_torture_deferred_free, | 600 | .deferred_free = rcu_sched_torture_deferred_free, |
595 | .sync = synchronize_sched, | 601 | .sync = synchronize_sched, |
@@ -628,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = { | |||
628 | .readlock = tasks_torture_read_lock, | 634 | .readlock = tasks_torture_read_lock, |
629 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 635 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
630 | .readunlock = tasks_torture_read_unlock, | 636 | .readunlock = tasks_torture_read_unlock, |
637 | .started = rcu_no_completed, | ||
631 | .completed = rcu_no_completed, | 638 | .completed = rcu_no_completed, |
632 | .deferred_free = rcu_tasks_torture_deferred_free, | 639 | .deferred_free = rcu_tasks_torture_deferred_free, |
633 | .sync = synchronize_rcu_tasks, | 640 | .sync = synchronize_rcu_tasks, |
@@ -1005,8 +1012,8 @@ static void rcutorture_trace_dump(void) | |||
1005 | static void rcu_torture_timer(unsigned long unused) | 1012 | static void rcu_torture_timer(unsigned long unused) |
1006 | { | 1013 | { |
1007 | int idx; | 1014 | int idx; |
1015 | unsigned long started; | ||
1008 | unsigned long completed; | 1016 | unsigned long completed; |
1009 | unsigned long completed_end; | ||
1010 | static DEFINE_TORTURE_RANDOM(rand); | 1017 | static DEFINE_TORTURE_RANDOM(rand); |
1011 | static DEFINE_SPINLOCK(rand_lock); | 1018 | static DEFINE_SPINLOCK(rand_lock); |
1012 | struct rcu_torture *p; | 1019 | struct rcu_torture *p; |
@@ -1014,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused) | |||
1014 | unsigned long long ts; | 1021 | unsigned long long ts; |
1015 | 1022 | ||
1016 | idx = cur_ops->readlock(); | 1023 | idx = cur_ops->readlock(); |
1017 | completed = cur_ops->completed(); | 1024 | if (cur_ops->started) |
1025 | started = cur_ops->started(); | ||
1026 | else | ||
1027 | started = cur_ops->completed(); | ||
1018 | ts = rcu_trace_clock_local(); | 1028 | ts = rcu_trace_clock_local(); |
1019 | p = rcu_dereference_check(rcu_torture_current, | 1029 | p = rcu_dereference_check(rcu_torture_current, |
1020 | rcu_read_lock_bh_held() || | 1030 | rcu_read_lock_bh_held() || |
@@ -1037,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused) | |||
1037 | /* Should not happen, but... */ | 1047 | /* Should not happen, but... */ |
1038 | pipe_count = RCU_TORTURE_PIPE_LEN; | 1048 | pipe_count = RCU_TORTURE_PIPE_LEN; |
1039 | } | 1049 | } |
1040 | completed_end = cur_ops->completed(); | 1050 | completed = cur_ops->completed(); |
1041 | if (pipe_count > 1) { | 1051 | if (pipe_count > 1) { |
1042 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, | 1052 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, |
1043 | completed, completed_end); | 1053 | started, completed); |
1044 | rcutorture_trace_dump(); | 1054 | rcutorture_trace_dump(); |
1045 | } | 1055 | } |
1046 | __this_cpu_inc(rcu_torture_count[pipe_count]); | 1056 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
1047 | completed = completed_end - completed; | 1057 | completed = completed - started; |
1058 | if (cur_ops->started) | ||
1059 | completed++; | ||
1048 | if (completed > RCU_TORTURE_PIPE_LEN) { | 1060 | if (completed > RCU_TORTURE_PIPE_LEN) { |
1049 | /* Should not happen, but... */ | 1061 | /* Should not happen, but... */ |
1050 | completed = RCU_TORTURE_PIPE_LEN; | 1062 | completed = RCU_TORTURE_PIPE_LEN; |
@@ -1063,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused) | |||
1063 | static int | 1075 | static int |
1064 | rcu_torture_reader(void *arg) | 1076 | rcu_torture_reader(void *arg) |
1065 | { | 1077 | { |
1078 | unsigned long started; | ||
1066 | unsigned long completed; | 1079 | unsigned long completed; |
1067 | unsigned long completed_end; | ||
1068 | int idx; | 1080 | int idx; |
1069 | DEFINE_TORTURE_RANDOM(rand); | 1081 | DEFINE_TORTURE_RANDOM(rand); |
1070 | struct rcu_torture *p; | 1082 | struct rcu_torture *p; |
@@ -1083,7 +1095,10 @@ rcu_torture_reader(void *arg) | |||
1083 | mod_timer(&t, jiffies + 1); | 1095 | mod_timer(&t, jiffies + 1); |
1084 | } | 1096 | } |
1085 | idx = cur_ops->readlock(); | 1097 | idx = cur_ops->readlock(); |
1086 | completed = cur_ops->completed(); | 1098 | if (cur_ops->started) |
1099 | started = cur_ops->started(); | ||
1100 | else | ||
1101 | started = cur_ops->completed(); | ||
1087 | ts = rcu_trace_clock_local(); | 1102 | ts = rcu_trace_clock_local(); |
1088 | p = rcu_dereference_check(rcu_torture_current, | 1103 | p = rcu_dereference_check(rcu_torture_current, |
1089 | rcu_read_lock_bh_held() || | 1104 | rcu_read_lock_bh_held() || |
@@ -1104,14 +1119,16 @@ rcu_torture_reader(void *arg) | |||
1104 | /* Should not happen, but... */ | 1119 | /* Should not happen, but... */ |
1105 | pipe_count = RCU_TORTURE_PIPE_LEN; | 1120 | pipe_count = RCU_TORTURE_PIPE_LEN; |
1106 | } | 1121 | } |
1107 | completed_end = cur_ops->completed(); | 1122 | completed = cur_ops->completed(); |
1108 | if (pipe_count > 1) { | 1123 | if (pipe_count > 1) { |
1109 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, | 1124 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, |
1110 | ts, completed, completed_end); | 1125 | ts, started, completed); |
1111 | rcutorture_trace_dump(); | 1126 | rcutorture_trace_dump(); |
1112 | } | 1127 | } |
1113 | __this_cpu_inc(rcu_torture_count[pipe_count]); | 1128 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
1114 | completed = completed_end - completed; | 1129 | completed = completed - started; |
1130 | if (cur_ops->started) | ||
1131 | completed++; | ||
1115 | if (completed > RCU_TORTURE_PIPE_LEN) { | 1132 | if (completed > RCU_TORTURE_PIPE_LEN) { |
1116 | /* Should not happen, but... */ | 1133 | /* Should not happen, but... */ |
1117 | completed = RCU_TORTURE_PIPE_LEN; | 1134 | completed = RCU_TORTURE_PIPE_LEN; |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e26d78712e16..c0faad51ae87 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -315,7 +315,43 @@ static void force_quiescent_state(struct rcu_state *rsp); | |||
315 | static int rcu_pending(void); | 315 | static int rcu_pending(void); |
316 | 316 | ||
317 | /* | 317 | /* |
318 | * Return the number of RCU-sched batches processed thus far for debug & stats. | 318 | * Return the number of RCU batches started thus far for debug & stats. |
319 | */ | ||
320 | unsigned long rcu_batches_started(void) | ||
321 | { | ||
322 | return rcu_state_p->gpnum; | ||
323 | } | ||
324 | EXPORT_SYMBOL_GPL(rcu_batches_started); | ||
325 | |||
326 | /* | ||
327 | * Return the number of RCU-sched batches started thus far for debug & stats. | ||
328 | */ | ||
329 | unsigned long rcu_batches_started_sched(void) | ||
330 | { | ||
331 | return rcu_sched_state.gpnum; | ||
332 | } | ||
333 | EXPORT_SYMBOL_GPL(rcu_batches_started_sched); | ||
334 | |||
335 | /* | ||
336 | * Return the number of RCU BH batches started thus far for debug & stats. | ||
337 | */ | ||
338 | unsigned long rcu_batches_started_bh(void) | ||
339 | { | ||
340 | return rcu_bh_state.gpnum; | ||
341 | } | ||
342 | EXPORT_SYMBOL_GPL(rcu_batches_started_bh); | ||
343 | |||
344 | /* | ||
345 | * Return the number of RCU batches completed thus far for debug & stats. | ||
346 | */ | ||
347 | unsigned long rcu_batches_completed(void) | ||
348 | { | ||
349 | return rcu_state_p->completed; | ||
350 | } | ||
351 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
352 | |||
353 | /* | ||
354 | * Return the number of RCU-sched batches completed thus far for debug & stats. | ||
319 | */ | 355 | */ |
320 | unsigned long rcu_batches_completed_sched(void) | 356 | unsigned long rcu_batches_completed_sched(void) |
321 | { | 357 | { |
@@ -324,7 +360,7 @@ unsigned long rcu_batches_completed_sched(void) | |||
324 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); | 360 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
325 | 361 | ||
326 | /* | 362 | /* |
327 | * Return the number of RCU BH batches processed thus far for debug & stats. | 363 | * Return the number of RCU BH batches completed thus far for debug & stats. |
328 | */ | 364 | */ |
329 | unsigned long rcu_batches_completed_bh(void) | 365 | unsigned long rcu_batches_completed_bh(void) |
330 | { | 366 | { |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index f69300d4a51f..07e61a04de1d 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -114,25 +114,6 @@ static void __init rcu_bootup_announce(void) | |||
114 | } | 114 | } |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Return the number of RCU-preempt batches processed thus far | ||
118 | * for debug and statistics. | ||
119 | */ | ||
120 | static unsigned long rcu_batches_completed_preempt(void) | ||
121 | { | ||
122 | return rcu_preempt_state.completed; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | ||
125 | |||
126 | /* | ||
127 | * Return the number of RCU batches processed thus far for debug & stats. | ||
128 | */ | ||
129 | unsigned long rcu_batches_completed(void) | ||
130 | { | ||
131 | return rcu_batches_completed_preempt(); | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
134 | |||
135 | /* | ||
136 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | 117 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
137 | * that this just means that the task currently running on the CPU is | 118 | * that this just means that the task currently running on the CPU is |
138 | * not in a quiescent state. There might be any number of tasks blocked | 119 | * not in a quiescent state. There might be any number of tasks blocked |
@@ -933,15 +914,6 @@ static void __init rcu_bootup_announce(void) | |||
933 | } | 914 | } |
934 | 915 | ||
935 | /* | 916 | /* |
936 | * Return the number of RCU batches processed thus far for debug & stats. | ||
937 | */ | ||
938 | unsigned long rcu_batches_completed(void) | ||
939 | { | ||
940 | return rcu_batches_completed_sched(); | ||
941 | } | ||
942 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
943 | |||
944 | /* | ||
945 | * Because preemptible RCU does not exist, we never have to check for | 917 | * Because preemptible RCU does not exist, we never have to check for |
946 | * CPUs being in quiescent states. | 918 | * CPUs being in quiescent states. |
947 | */ | 919 | */ |