aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorTom Zanussi <tzanussi@gmail.com>2009-04-08 04:15:54 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-13 18:00:56 -0400
commiteb02ce017dd83985041a7e54c6449f92d53b026f (patch)
tree7f52a3e92bf3dae1f3c7754a58ab76fb2eceb2e1 /kernel/trace/trace.c
parent5f77a88b3f8268b11940b51d2e03d26a663ceb90 (diff)
tracing/filters: use ring_buffer_discard_commit() in filter_check_discard()
This patch changes filter_check_discard() to make use of the new ring_buffer_discard_commit() function and modifies the current users to call the old commit function in the non-discard case. It also introduces a version of filter_check_discard() that uses the global trace buffer (filter_current_check_discard()) for those cases. v2 changes: - fix compile error noticed by Ingo Molnar Signed-off-by: Tom Zanussi <tzanussi@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: fweisbec@gmail.com LKML-Reference: <1239178554.10295.36.camel@tropicana> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d880ab2772ce..c0047fcf7076 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -171,6 +171,12 @@ static struct trace_array global_trace;
171 171
172static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 172static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
173 173
174int filter_current_check_discard(struct ftrace_event_call *call, void *rec,
175 struct ring_buffer_event *event)
176{
177 return filter_check_discard(call, rec, global_trace.buffer, event);
178}
179
174cycle_t ftrace_now(int cpu) 180cycle_t ftrace_now(int cpu)
175{ 181{
176 u64 ts; 182 u64 ts;
@@ -919,9 +925,8 @@ trace_function(struct trace_array *tr,
919 entry->ip = ip; 925 entry->ip = ip;
920 entry->parent_ip = parent_ip; 926 entry->parent_ip = parent_ip;
921 927
922 filter_check_discard(call, entry, event); 928 if (!filter_check_discard(call, entry, tr->buffer, event))
923 929 ring_buffer_unlock_commit(tr->buffer, event);
924 ring_buffer_unlock_commit(tr->buffer, event);
925} 930}
926 931
927#ifdef CONFIG_FUNCTION_GRAPH_TRACER 932#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -943,8 +948,8 @@ static int __trace_graph_entry(struct trace_array *tr,
943 return 0; 948 return 0;
944 entry = ring_buffer_event_data(event); 949 entry = ring_buffer_event_data(event);
945 entry->graph_ent = *trace; 950 entry->graph_ent = *trace;
946 filter_check_discard(call, entry, event); 951 if (!filter_current_check_discard(call, entry, event))
947 ring_buffer_unlock_commit(global_trace.buffer, event); 952 ring_buffer_unlock_commit(global_trace.buffer, event);
948 953
949 return 1; 954 return 1;
950} 955}
@@ -967,8 +972,8 @@ static void __trace_graph_return(struct trace_array *tr,
967 return; 972 return;
968 entry = ring_buffer_event_data(event); 973 entry = ring_buffer_event_data(event);
969 entry->ret = *trace; 974 entry->ret = *trace;
970 filter_check_discard(call, entry, event); 975 if (!filter_current_check_discard(call, entry, event))
971 ring_buffer_unlock_commit(global_trace.buffer, event); 976 ring_buffer_unlock_commit(global_trace.buffer, event);
972} 977}
973#endif 978#endif
974 979
@@ -1004,8 +1009,8 @@ static void __ftrace_trace_stack(struct trace_array *tr,
1004 trace.entries = entry->caller; 1009 trace.entries = entry->caller;
1005 1010
1006 save_stack_trace(&trace); 1011 save_stack_trace(&trace);
1007 filter_check_discard(call, entry, event); 1012 if (!filter_check_discard(call, entry, tr->buffer, event))
1008 ring_buffer_unlock_commit(tr->buffer, event); 1013 ring_buffer_unlock_commit(tr->buffer, event);
1009#endif 1014#endif
1010} 1015}
1011 1016
@@ -1052,8 +1057,8 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1052 trace.entries = entry->caller; 1057 trace.entries = entry->caller;
1053 1058
1054 save_stack_trace_user(&trace); 1059 save_stack_trace_user(&trace);
1055 filter_check_discard(call, entry, event); 1060 if (!filter_check_discard(call, entry, tr->buffer, event))
1056 ring_buffer_unlock_commit(tr->buffer, event); 1061 ring_buffer_unlock_commit(tr->buffer, event);
1057#endif 1062#endif
1058} 1063}
1059 1064
@@ -1114,9 +1119,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
1114 entry->next_state = next->state; 1119 entry->next_state = next->state;
1115 entry->next_cpu = task_cpu(next); 1120 entry->next_cpu = task_cpu(next);
1116 1121
1117 filter_check_discard(call, entry, event); 1122 if (!filter_check_discard(call, entry, tr->buffer, event))
1118 1123 trace_buffer_unlock_commit(tr, event, flags, pc);
1119 trace_buffer_unlock_commit(tr, event, flags, pc);
1120} 1124}
1121 1125
1122void 1126void
@@ -1142,9 +1146,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
1142 entry->next_state = wakee->state; 1146 entry->next_state = wakee->state;
1143 entry->next_cpu = task_cpu(wakee); 1147 entry->next_cpu = task_cpu(wakee);
1144 1148
1145 filter_check_discard(call, entry, event); 1149 if (!filter_check_discard(call, entry, tr->buffer, event))
1146 1150 ring_buffer_unlock_commit(tr->buffer, event);
1147 ring_buffer_unlock_commit(tr->buffer, event);
1148 ftrace_trace_stack(tr, flags, 6, pc); 1151 ftrace_trace_stack(tr, flags, 6, pc);
1149 ftrace_trace_userstack(tr, flags, pc); 1152 ftrace_trace_userstack(tr, flags, pc);
1150} 1153}
@@ -1285,8 +1288,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1285 entry->fmt = fmt; 1288 entry->fmt = fmt;
1286 1289
1287 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1290 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1288 filter_check_discard(call, entry, event); 1291 if (!filter_check_discard(call, entry, tr->buffer, event))
1289 ring_buffer_unlock_commit(tr->buffer, event); 1292 ring_buffer_unlock_commit(tr->buffer, event);
1290 1293
1291out_unlock: 1294out_unlock:
1292 __raw_spin_unlock(&trace_buf_lock); 1295 __raw_spin_unlock(&trace_buf_lock);
@@ -1341,8 +1344,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1341 1344
1342 memcpy(&entry->buf, trace_buf, len); 1345 memcpy(&entry->buf, trace_buf, len);
1343 entry->buf[len] = 0; 1346 entry->buf[len] = 0;
1344 filter_check_discard(call, entry, event); 1347 if (!filter_check_discard(call, entry, tr->buffer, event))
1345 ring_buffer_unlock_commit(tr->buffer, event); 1348 ring_buffer_unlock_commit(tr->buffer, event);
1346 1349
1347 out_unlock: 1350 out_unlock:
1348 __raw_spin_unlock(&trace_buf_lock); 1351 __raw_spin_unlock(&trace_buf_lock);