aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorTom Zanussi <tzanussi@gmail.com>2009-03-31 01:48:49 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-13 18:00:50 -0400
commite1112b4d96859367a93468027c9635e2ac04eb3f (patch)
tree5170980ea71ee4bb5d0196880b58dbc997211b65 /kernel/trace/trace.c
parent66de7792c02693b49671afe58c771fde3b092fc7 (diff)
tracing/filters: add run-time field descriptions to TRACE_EVENT_FORMAT events
This patch adds run-time field descriptions to all the event formats exported using TRACE_EVENT_FORMAT. It also hooks up all the tracers that use them (i.e. the tracers in the 'ftrace subsystem') so they can also have their output filtered by the event-filtering mechanism. When I was testing this, there were a couple of things that fooled me into thinking the filters weren't working, when actually they were - I'll mention them here so others don't make the same mistakes (and file bug reports. ;-) One is that some of the tracers trace multiple events e.g. the sched_switch tracer uses the context_switch and wakeup events, and if you don't set filters on all of the traced events, the unfiltered output from the events without filters on them can make it look like the filtering as a whole isn't working properly, when actually it is doing what it was asked to do - it just wasn't asked to do the right thing. The other is that for the really high-volume tracers e.g. the function tracer, the volume of filtered events can be so high that it pushes the unfiltered events out of the ring buffer before they can be read so e.g. cat'ing the trace file repeatedly shows either no output, or once in awhile some output but that isn't there the next time you read the trace, which isn't what you normally expect when reading the trace file. If you read from the trace_pipe file though, you can catch them before they disappear. Changes from v1: As suggested by Frederic Weisbecker: - get rid of externs in functions - added unlikely() to filter_check_discard() Signed-off-by: Tom Zanussi <tzanussi@gmail.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4865459f609f..962e6179994a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -898,6 +898,7 @@ trace_function(struct trace_array *tr,
898 unsigned long ip, unsigned long parent_ip, unsigned long flags, 898 unsigned long ip, unsigned long parent_ip, unsigned long flags,
899 int pc) 899 int pc)
900{ 900{
901 struct ftrace_event_call *call = &event_function;
901 struct ring_buffer_event *event; 902 struct ring_buffer_event *event;
902 struct ftrace_entry *entry; 903 struct ftrace_entry *entry;
903 904
@@ -912,6 +913,9 @@ trace_function(struct trace_array *tr,
912 entry = ring_buffer_event_data(event); 913 entry = ring_buffer_event_data(event);
913 entry->ip = ip; 914 entry->ip = ip;
914 entry->parent_ip = parent_ip; 915 entry->parent_ip = parent_ip;
916
917 filter_check_discard(call, entry, event);
918
915 ring_buffer_unlock_commit(tr->buffer, event); 919 ring_buffer_unlock_commit(tr->buffer, event);
916} 920}
917 921
@@ -921,6 +925,7 @@ static int __trace_graph_entry(struct trace_array *tr,
921 unsigned long flags, 925 unsigned long flags,
922 int pc) 926 int pc)
923{ 927{
928 struct ftrace_event_call *call = &event_funcgraph_entry;
924 struct ring_buffer_event *event; 929 struct ring_buffer_event *event;
925 struct ftrace_graph_ent_entry *entry; 930 struct ftrace_graph_ent_entry *entry;
926 931
@@ -933,6 +938,7 @@ static int __trace_graph_entry(struct trace_array *tr,
933 return 0; 938 return 0;
934 entry = ring_buffer_event_data(event); 939 entry = ring_buffer_event_data(event);
935 entry->graph_ent = *trace; 940 entry->graph_ent = *trace;
941 filter_check_discard(call, entry, event);
936 ring_buffer_unlock_commit(global_trace.buffer, event); 942 ring_buffer_unlock_commit(global_trace.buffer, event);
937 943
938 return 1; 944 return 1;
@@ -943,6 +949,7 @@ static void __trace_graph_return(struct trace_array *tr,
943 unsigned long flags, 949 unsigned long flags,
944 int pc) 950 int pc)
945{ 951{
952 struct ftrace_event_call *call = &event_funcgraph_exit;
946 struct ring_buffer_event *event; 953 struct ring_buffer_event *event;
947 struct ftrace_graph_ret_entry *entry; 954 struct ftrace_graph_ret_entry *entry;
948 955
@@ -955,6 +962,7 @@ static void __trace_graph_return(struct trace_array *tr,
955 return; 962 return;
956 entry = ring_buffer_event_data(event); 963 entry = ring_buffer_event_data(event);
957 entry->ret = *trace; 964 entry->ret = *trace;
965 filter_check_discard(call, entry, event);
958 ring_buffer_unlock_commit(global_trace.buffer, event); 966 ring_buffer_unlock_commit(global_trace.buffer, event);
959} 967}
960#endif 968#endif
@@ -973,6 +981,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
973 int skip, int pc) 981 int skip, int pc)
974{ 982{
975#ifdef CONFIG_STACKTRACE 983#ifdef CONFIG_STACKTRACE
984 struct ftrace_event_call *call = &event_kernel_stack;
976 struct ring_buffer_event *event; 985 struct ring_buffer_event *event;
977 struct stack_entry *entry; 986 struct stack_entry *entry;
978 struct stack_trace trace; 987 struct stack_trace trace;
@@ -990,6 +999,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
990 trace.entries = entry->caller; 999 trace.entries = entry->caller;
991 1000
992 save_stack_trace(&trace); 1001 save_stack_trace(&trace);
1002 filter_check_discard(call, entry, event);
993 ring_buffer_unlock_commit(tr->buffer, event); 1003 ring_buffer_unlock_commit(tr->buffer, event);
994#endif 1004#endif
995} 1005}
@@ -1015,6 +1025,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1015 unsigned long flags, int pc) 1025 unsigned long flags, int pc)
1016{ 1026{
1017#ifdef CONFIG_STACKTRACE 1027#ifdef CONFIG_STACKTRACE
1028 struct ftrace_event_call *call = &event_user_stack;
1018 struct ring_buffer_event *event; 1029 struct ring_buffer_event *event;
1019 struct userstack_entry *entry; 1030 struct userstack_entry *entry;
1020 struct stack_trace trace; 1031 struct stack_trace trace;
@@ -1036,6 +1047,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1036 trace.entries = entry->caller; 1047 trace.entries = entry->caller;
1037 1048
1038 save_stack_trace_user(&trace); 1049 save_stack_trace_user(&trace);
1050 filter_check_discard(call, entry, event);
1039 ring_buffer_unlock_commit(tr->buffer, event); 1051 ring_buffer_unlock_commit(tr->buffer, event);
1040#endif 1052#endif
1041} 1053}
@@ -1052,6 +1064,7 @@ ftrace_trace_special(void *__tr,
1052 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1064 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1053 int pc) 1065 int pc)
1054{ 1066{
1067 struct ftrace_event_call *call = &event_special;
1055 struct ring_buffer_event *event; 1068 struct ring_buffer_event *event;
1056 struct trace_array *tr = __tr; 1069 struct trace_array *tr = __tr;
1057 struct special_entry *entry; 1070 struct special_entry *entry;
@@ -1064,6 +1077,7 @@ ftrace_trace_special(void *__tr,
1064 entry->arg1 = arg1; 1077 entry->arg1 = arg1;
1065 entry->arg2 = arg2; 1078 entry->arg2 = arg2;
1066 entry->arg3 = arg3; 1079 entry->arg3 = arg3;
1080 filter_check_discard(call, entry, event);
1067 trace_buffer_unlock_commit(tr, event, 0, pc); 1081 trace_buffer_unlock_commit(tr, event, 0, pc);
1068} 1082}
1069 1083
@@ -1080,6 +1094,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
1080 struct task_struct *next, 1094 struct task_struct *next,
1081 unsigned long flags, int pc) 1095 unsigned long flags, int pc)
1082{ 1096{
1097 struct ftrace_event_call *call = &event_context_switch;
1083 struct ring_buffer_event *event; 1098 struct ring_buffer_event *event;
1084 struct ctx_switch_entry *entry; 1099 struct ctx_switch_entry *entry;
1085 1100
@@ -1095,6 +1110,9 @@ tracing_sched_switch_trace(struct trace_array *tr,
1095 entry->next_prio = next->prio; 1110 entry->next_prio = next->prio;
1096 entry->next_state = next->state; 1111 entry->next_state = next->state;
1097 entry->next_cpu = task_cpu(next); 1112 entry->next_cpu = task_cpu(next);
1113
1114 filter_check_discard(call, entry, event);
1115
1098 trace_buffer_unlock_commit(tr, event, flags, pc); 1116 trace_buffer_unlock_commit(tr, event, flags, pc);
1099} 1117}
1100 1118
@@ -1104,6 +1122,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1104 struct task_struct *curr, 1122 struct task_struct *curr,
1105 unsigned long flags, int pc) 1123 unsigned long flags, int pc)
1106{ 1124{
1125 struct ftrace_event_call *call = &event_wakeup;
1107 struct ring_buffer_event *event; 1126 struct ring_buffer_event *event;
1108 struct ctx_switch_entry *entry; 1127 struct ctx_switch_entry *entry;
1109 1128
@@ -1120,6 +1139,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1120 entry->next_state = wakee->state; 1139 entry->next_state = wakee->state;
1121 entry->next_cpu = task_cpu(wakee); 1140 entry->next_cpu = task_cpu(wakee);
1122 1141
1142 filter_check_discard(call, entry, event);
1143
1123 ring_buffer_unlock_commit(tr->buffer, event); 1144 ring_buffer_unlock_commit(tr->buffer, event);
1124 ftrace_trace_stack(tr, flags, 6, pc); 1145 ftrace_trace_stack(tr, flags, 6, pc);
1125 ftrace_trace_userstack(tr, flags, pc); 1146 ftrace_trace_userstack(tr, flags, pc);
@@ -1221,6 +1242,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1221 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1242 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1222 static u32 trace_buf[TRACE_BUF_SIZE]; 1243 static u32 trace_buf[TRACE_BUF_SIZE];
1223 1244
1245 struct ftrace_event_call *call = &event_bprint;
1224 struct ring_buffer_event *event; 1246 struct ring_buffer_event *event;
1225 struct trace_array *tr = &global_trace; 1247 struct trace_array *tr = &global_trace;
1226 struct trace_array_cpu *data; 1248 struct trace_array_cpu *data;
@@ -1260,6 +1282,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1260 entry->fmt = fmt; 1282 entry->fmt = fmt;
1261 1283
1262 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1284 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1285 filter_check_discard(call, entry, event);
1263 ring_buffer_unlock_commit(tr->buffer, event); 1286 ring_buffer_unlock_commit(tr->buffer, event);
1264 1287
1265out_unlock: 1288out_unlock:
@@ -1279,6 +1302,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1279 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1302 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1280 static char trace_buf[TRACE_BUF_SIZE]; 1303 static char trace_buf[TRACE_BUF_SIZE];
1281 1304
1305 struct ftrace_event_call *call = &event_print;
1282 struct ring_buffer_event *event; 1306 struct ring_buffer_event *event;
1283 struct trace_array *tr = &global_trace; 1307 struct trace_array *tr = &global_trace;
1284 struct trace_array_cpu *data; 1308 struct trace_array_cpu *data;
@@ -1314,6 +1338,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1314 1338
1315 memcpy(&entry->buf, trace_buf, len); 1339 memcpy(&entry->buf, trace_buf, len);
1316 entry->buf[len] = 0; 1340 entry->buf[len] = 0;
1341 filter_check_discard(call, entry, event);
1317 ring_buffer_unlock_commit(tr->buffer, event); 1342 ring_buffer_unlock_commit(tr->buffer, event);
1318 1343
1319 out_unlock: 1344 out_unlock: