aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-04-16 12:15:44 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-17 11:10:35 -0400
commit9ea21c1ecdb35ecdcac5fd9d95f62a1f6a7ffec0 (patch)
treec38a7f2f30d145e7d53b24159b99bc4734d291b5 /kernel/trace/trace_events.c
parent69abe6a5d18a9394baa325bab8f57748b037c517 (diff)
tracing/events: perform function tracing in event selftests
We can find some bugs in the trace events if we stress the writes as well. The function tracer is a good way to stress the events. [ Impact: extend scope of event tracer self-tests ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20090416161746.604786131@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c78
1 files changed, 72 insertions, 6 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7163a2bb021a..1137f951be42 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1017,7 +1017,7 @@ static __init void event_test_stuff(void)
1017 * For every trace event defined, we will test each trace point separately, 1017 * For every trace event defined, we will test each trace point separately,
1018 * and then by groups, and finally all trace points. 1018 * and then by groups, and finally all trace points.
1019 */ 1019 */
1020static __init int event_trace_self_tests(void) 1020static __init void event_trace_self_tests(void)
1021{ 1021{
1022 struct ftrace_event_call *call; 1022 struct ftrace_event_call *call;
1023 struct event_subsystem *system; 1023 struct event_subsystem *system;
@@ -1071,7 +1071,7 @@ static __init int event_trace_self_tests(void)
1071 sysname = kstrdup(system->name, GFP_KERNEL); 1071 sysname = kstrdup(system->name, GFP_KERNEL);
1072 if (WARN_ON(!sysname)) { 1072 if (WARN_ON(!sysname)) {
1073 pr_warning("Can't allocate memory, giving up!\n"); 1073 pr_warning("Can't allocate memory, giving up!\n");
1074 return 0; 1074 return;
1075 } 1075 }
1076 ret = ftrace_set_clr_event(sysname, 1); 1076 ret = ftrace_set_clr_event(sysname, 1);
1077 kfree(sysname); 1077 kfree(sysname);
@@ -1086,7 +1086,7 @@ static __init int event_trace_self_tests(void)
1086 sysname = kstrdup(system->name, GFP_KERNEL); 1086 sysname = kstrdup(system->name, GFP_KERNEL);
1087 if (WARN_ON(!sysname)) { 1087 if (WARN_ON(!sysname)) {
1088 pr_warning("Can't allocate memory, giving up!\n"); 1088 pr_warning("Can't allocate memory, giving up!\n");
1089 return 0; 1089 return;
1090 } 1090 }
1091 ret = ftrace_set_clr_event(sysname, 0); 1091 ret = ftrace_set_clr_event(sysname, 0);
1092 kfree(sysname); 1092 kfree(sysname);
@@ -1106,14 +1106,14 @@ static __init int event_trace_self_tests(void)
1106 sysname = kmalloc(4, GFP_KERNEL); 1106 sysname = kmalloc(4, GFP_KERNEL);
1107 if (WARN_ON(!sysname)) { 1107 if (WARN_ON(!sysname)) {
1108 pr_warning("Can't allocate memory, giving up!\n"); 1108 pr_warning("Can't allocate memory, giving up!\n");
1109 return 0; 1109 return;
1110 } 1110 }
1111 memcpy(sysname, "*:*", 4); 1111 memcpy(sysname, "*:*", 4);
1112 ret = ftrace_set_clr_event(sysname, 1); 1112 ret = ftrace_set_clr_event(sysname, 1);
1113 if (WARN_ON_ONCE(ret)) { 1113 if (WARN_ON_ONCE(ret)) {
1114 kfree(sysname); 1114 kfree(sysname);
1115 pr_warning("error enabling all events\n"); 1115 pr_warning("error enabling all events\n");
1116 return 0; 1116 return;
1117 } 1117 }
1118 1118
1119 event_test_stuff(); 1119 event_test_stuff();
@@ -1125,10 +1125,76 @@ static __init int event_trace_self_tests(void)
1125 1125
1126 if (WARN_ON_ONCE(ret)) { 1126 if (WARN_ON_ONCE(ret)) {
1127 pr_warning("error disabling all events\n"); 1127 pr_warning("error disabling all events\n");
1128 return 0; 1128 return;
1129 } 1129 }
1130 1130
1131 pr_cont("OK\n"); 1131 pr_cont("OK\n");
1132}
1133
1134#ifdef CONFIG_FUNCTION_TRACER
1135
1136static DEFINE_PER_CPU(atomic_t, test_event_disable);
1137
1138static void
1139function_test_events_call(unsigned long ip, unsigned long parent_ip)
1140{
1141 struct ring_buffer_event *event;
1142 struct ftrace_entry *entry;
1143 unsigned long flags;
1144 long disabled;
1145 int resched;
1146 int cpu;
1147 int pc;
1148
1149 pc = preempt_count();
1150 resched = ftrace_preempt_disable();
1151 cpu = raw_smp_processor_id();
1152 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1153
1154 if (disabled != 1)
1155 goto out;
1156
1157 local_save_flags(flags);
1158
1159 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1160 flags, pc);
1161 if (!event)
1162 goto out;
1163 entry = ring_buffer_event_data(event);
1164 entry->ip = ip;
1165 entry->parent_ip = parent_ip;
1166
1167 trace_current_buffer_unlock_commit(event, flags, pc);
1168
1169 out:
1170 atomic_dec(&per_cpu(test_event_disable, cpu));
1171 ftrace_preempt_enable(resched);
1172}
1173
1174static struct ftrace_ops trace_ops __initdata =
1175{
1176 .func = function_test_events_call,
1177};
1178
1179static __init void event_trace_self_test_with_function(void)
1180{
1181 register_ftrace_function(&trace_ops);
1182 pr_info("Running tests again, along with the function tracer\n");
1183 event_trace_self_tests();
1184 unregister_ftrace_function(&trace_ops);
1185}
1186#else
1187static __init void event_trace_self_test_with_function(void)
1188{
1189}
1190#endif
1191
1192static __init int event_trace_self_tests_init(void)
1193{
1194
1195 event_trace_self_tests();
1196
1197 event_trace_self_test_with_function();
1132 1198
1133 return 0; 1199 return 0;
1134} 1200}