diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-11-14 19:26:40 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-01-08 17:14:55 -0500 |
commit | 52494535103986dbbf689b44d8c2c7efe2132b16 (patch) | |
tree | 88dd5a9d064608e9711c36f05b3b4aab8e255a4f /kernel/rcutorture.c | |
parent | dc975e94f322e60fa8fcc44dec1820fde4de174c (diff) |
rcu: Reduce rcutorture tracing
Currently, rcutorture traces every read-side access. This can be
problematic because even a two-minute rcutorture run on a two-CPU system
can generate 28,853,363 reads. Normally, only a failing read is of
interest, so this commit traces adjusts rcutorture's tracing to only
trace failing reads. The resulting event tracing records the time
and the ->completed value captured at the beginning of the RCU read-side
critical section, allowing correlation with other event-tracing messages.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
[ paulmck: Add fix to build problem located by Randy Dunlap based on
diagnosis by Steven Rostedt. ]
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r-- | kernel/rcutorture.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 31dea01c85fd..a583f1ce713d 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/stat.h> | 46 | #include <linux/stat.h> |
47 | #include <linux/srcu.h> | 47 | #include <linux/srcu.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <linux/trace_clock.h> | ||
49 | #include <asm/byteorder.h> | 50 | #include <asm/byteorder.h> |
50 | 51 | ||
51 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
@@ -1028,7 +1029,6 @@ void rcutorture_trace_dump(void) | |||
1028 | return; | 1029 | return; |
1029 | if (atomic_xchg(&beenhere, 1) != 0) | 1030 | if (atomic_xchg(&beenhere, 1) != 0) |
1030 | return; | 1031 | return; |
1031 | do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL); | ||
1032 | ftrace_dump(DUMP_ALL); | 1032 | ftrace_dump(DUMP_ALL); |
1033 | } | 1033 | } |
1034 | 1034 | ||
@@ -1042,13 +1042,16 @@ static void rcu_torture_timer(unsigned long unused) | |||
1042 | { | 1042 | { |
1043 | int idx; | 1043 | int idx; |
1044 | int completed; | 1044 | int completed; |
1045 | int completed_end; | ||
1045 | static DEFINE_RCU_RANDOM(rand); | 1046 | static DEFINE_RCU_RANDOM(rand); |
1046 | static DEFINE_SPINLOCK(rand_lock); | 1047 | static DEFINE_SPINLOCK(rand_lock); |
1047 | struct rcu_torture *p; | 1048 | struct rcu_torture *p; |
1048 | int pipe_count; | 1049 | int pipe_count; |
1050 | unsigned long long ts; | ||
1049 | 1051 | ||
1050 | idx = cur_ops->readlock(); | 1052 | idx = cur_ops->readlock(); |
1051 | completed = cur_ops->completed(); | 1053 | completed = cur_ops->completed(); |
1054 | ts = trace_clock_local(); | ||
1052 | p = rcu_dereference_check(rcu_torture_current, | 1055 | p = rcu_dereference_check(rcu_torture_current, |
1053 | rcu_read_lock_bh_held() || | 1056 | rcu_read_lock_bh_held() || |
1054 | rcu_read_lock_sched_held() || | 1057 | rcu_read_lock_sched_held() || |
@@ -1058,7 +1061,6 @@ static void rcu_torture_timer(unsigned long unused) | |||
1058 | cur_ops->readunlock(idx); | 1061 | cur_ops->readunlock(idx); |
1059 | return; | 1062 | return; |
1060 | } | 1063 | } |
1061 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); | ||
1062 | if (p->rtort_mbtest == 0) | 1064 | if (p->rtort_mbtest == 0) |
1063 | atomic_inc(&n_rcu_torture_mberror); | 1065 | atomic_inc(&n_rcu_torture_mberror); |
1064 | spin_lock(&rand_lock); | 1066 | spin_lock(&rand_lock); |
@@ -1071,10 +1073,16 @@ static void rcu_torture_timer(unsigned long unused) | |||
1071 | /* Should not happen, but... */ | 1073 | /* Should not happen, but... */ |
1072 | pipe_count = RCU_TORTURE_PIPE_LEN; | 1074 | pipe_count = RCU_TORTURE_PIPE_LEN; |
1073 | } | 1075 | } |
1074 | if (pipe_count > 1) | 1076 | completed_end = cur_ops->completed(); |
1077 | if (pipe_count > 1) { | ||
1078 | unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); | ||
1079 | |||
1080 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, | ||
1081 | completed, completed_end); | ||
1075 | rcutorture_trace_dump(); | 1082 | rcutorture_trace_dump(); |
1083 | } | ||
1076 | __this_cpu_inc(rcu_torture_count[pipe_count]); | 1084 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
1077 | completed = cur_ops->completed() - completed; | 1085 | completed = completed_end - completed; |
1078 | if (completed > RCU_TORTURE_PIPE_LEN) { | 1086 | if (completed > RCU_TORTURE_PIPE_LEN) { |
1079 | /* Should not happen, but... */ | 1087 | /* Should not happen, but... */ |
1080 | completed = RCU_TORTURE_PIPE_LEN; | 1088 | completed = RCU_TORTURE_PIPE_LEN; |
@@ -1094,11 +1102,13 @@ static int | |||
1094 | rcu_torture_reader(void *arg) | 1102 | rcu_torture_reader(void *arg) |
1095 | { | 1103 | { |
1096 | int completed; | 1104 | int completed; |
1105 | int completed_end; | ||
1097 | int idx; | 1106 | int idx; |
1098 | DEFINE_RCU_RANDOM(rand); | 1107 | DEFINE_RCU_RANDOM(rand); |
1099 | struct rcu_torture *p; | 1108 | struct rcu_torture *p; |
1100 | int pipe_count; | 1109 | int pipe_count; |
1101 | struct timer_list t; | 1110 | struct timer_list t; |
1111 | unsigned long long ts; | ||
1102 | 1112 | ||
1103 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | 1113 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); |
1104 | set_user_nice(current, 19); | 1114 | set_user_nice(current, 19); |
@@ -1112,6 +1122,7 @@ rcu_torture_reader(void *arg) | |||
1112 | } | 1122 | } |
1113 | idx = cur_ops->readlock(); | 1123 | idx = cur_ops->readlock(); |
1114 | completed = cur_ops->completed(); | 1124 | completed = cur_ops->completed(); |
1125 | ts = trace_clock_local(); | ||
1115 | p = rcu_dereference_check(rcu_torture_current, | 1126 | p = rcu_dereference_check(rcu_torture_current, |
1116 | rcu_read_lock_bh_held() || | 1127 | rcu_read_lock_bh_held() || |
1117 | rcu_read_lock_sched_held() || | 1128 | rcu_read_lock_sched_held() || |
@@ -1122,7 +1133,6 @@ rcu_torture_reader(void *arg) | |||
1122 | schedule_timeout_interruptible(HZ); | 1133 | schedule_timeout_interruptible(HZ); |
1123 | continue; | 1134 | continue; |
1124 | } | 1135 | } |
1125 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); | ||
1126 | if (p->rtort_mbtest == 0) | 1136 | if (p->rtort_mbtest == 0) |
1127 | atomic_inc(&n_rcu_torture_mberror); | 1137 | atomic_inc(&n_rcu_torture_mberror); |
1128 | cur_ops->read_delay(&rand); | 1138 | cur_ops->read_delay(&rand); |
@@ -1132,10 +1142,17 @@ rcu_torture_reader(void *arg) | |||
1132 | /* Should not happen, but... */ | 1142 | /* Should not happen, but... */ |
1133 | pipe_count = RCU_TORTURE_PIPE_LEN; | 1143 | pipe_count = RCU_TORTURE_PIPE_LEN; |
1134 | } | 1144 | } |
1135 | if (pipe_count > 1) | 1145 | completed_end = cur_ops->completed(); |
1146 | if (pipe_count > 1) { | ||
1147 | unsigned long __maybe_unused ts_rem = | ||
1148 | do_div(ts, NSEC_PER_USEC); | ||
1149 | |||
1150 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, | ||
1151 | ts, completed, completed_end); | ||
1136 | rcutorture_trace_dump(); | 1152 | rcutorture_trace_dump(); |
1153 | } | ||
1137 | __this_cpu_inc(rcu_torture_count[pipe_count]); | 1154 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
1138 | completed = cur_ops->completed() - completed; | 1155 | completed = completed_end - completed; |
1139 | if (completed > RCU_TORTURE_PIPE_LEN) { | 1156 | if (completed > RCU_TORTURE_PIPE_LEN) { |
1140 | /* Should not happen, but... */ | 1157 | /* Should not happen, but... */ |
1141 | completed = RCU_TORTURE_PIPE_LEN; | 1158 | completed = RCU_TORTURE_PIPE_LEN; |