diff options
author | Tom Zanussi <tzanussi@gmail.com> | 2009-04-08 04:15:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-13 18:00:56 -0400 |
commit | eb02ce017dd83985041a7e54c6449f92d53b026f (patch) | |
tree | 7f52a3e92bf3dae1f3c7754a58ab76fb2eceb2e1 /kernel | |
parent | 5f77a88b3f8268b11940b51d2e03d26a663ceb90 (diff) |
tracing/filters: use ring_buffer_discard_commit() in filter_check_discard()
This patch changes filter_check_discard() to make use of the new
ring_buffer_discard_commit() function and modifies the current users to
call the old commit function in the non-discard case.
It also introduces a version of filter_check_discard() that uses the
global trace buffer (filter_current_check_discard()) for those cases.
v2 changes:
- fix compile error noticed by Ingo Molnar
Signed-off-by: Tom Zanussi <tzanussi@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: fweisbec@gmail.com
LKML-Reference: <1239178554.10295.36.camel@tropicana>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/kmemtrace.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace.c | 45 | ||||
-rw-r--r-- | kernel/trace/trace.h | 14 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_3.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_power.c | 8 |
7 files changed, 48 insertions, 43 deletions
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index 9419ad10541b..86cdf671d7e2 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -63,9 +63,8 @@ static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | |||
63 | entry->gfp_flags = gfp_flags; | 63 | entry->gfp_flags = gfp_flags; |
64 | entry->node = node; | 64 | entry->node = node; |
65 | 65 | ||
66 | filter_check_discard(call, entry, event); | 66 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
67 | 67 | ring_buffer_unlock_commit(tr->buffer, event); | |
68 | ring_buffer_unlock_commit(tr->buffer, event); | ||
69 | 68 | ||
70 | trace_wake_up(); | 69 | trace_wake_up(); |
71 | } | 70 | } |
@@ -90,9 +89,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | |||
90 | entry->call_site = call_site; | 89 | entry->call_site = call_site; |
91 | entry->ptr = ptr; | 90 | entry->ptr = ptr; |
92 | 91 | ||
93 | filter_check_discard(call, entry, event); | 92 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
94 | 93 | ring_buffer_unlock_commit(tr->buffer, event); | |
95 | ring_buffer_unlock_commit(tr->buffer, event); | ||
96 | 94 | ||
97 | trace_wake_up(); | 95 | trace_wake_up(); |
98 | } | 96 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d880ab2772ce..c0047fcf7076 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -171,6 +171,12 @@ static struct trace_array global_trace; | |||
171 | 171 | ||
172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | 172 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); |
173 | 173 | ||
174 | int filter_current_check_discard(struct ftrace_event_call *call, void *rec, | ||
175 | struct ring_buffer_event *event) | ||
176 | { | ||
177 | return filter_check_discard(call, rec, global_trace.buffer, event); | ||
178 | } | ||
179 | |||
174 | cycle_t ftrace_now(int cpu) | 180 | cycle_t ftrace_now(int cpu) |
175 | { | 181 | { |
176 | u64 ts; | 182 | u64 ts; |
@@ -919,9 +925,8 @@ trace_function(struct trace_array *tr, | |||
919 | entry->ip = ip; | 925 | entry->ip = ip; |
920 | entry->parent_ip = parent_ip; | 926 | entry->parent_ip = parent_ip; |
921 | 927 | ||
922 | filter_check_discard(call, entry, event); | 928 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
923 | 929 | ring_buffer_unlock_commit(tr->buffer, event); | |
924 | ring_buffer_unlock_commit(tr->buffer, event); | ||
925 | } | 930 | } |
926 | 931 | ||
927 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 932 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
@@ -943,8 +948,8 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
943 | return 0; | 948 | return 0; |
944 | entry = ring_buffer_event_data(event); | 949 | entry = ring_buffer_event_data(event); |
945 | entry->graph_ent = *trace; | 950 | entry->graph_ent = *trace; |
946 | filter_check_discard(call, entry, event); | 951 | if (!filter_current_check_discard(call, entry, event)) |
947 | ring_buffer_unlock_commit(global_trace.buffer, event); | 952 | ring_buffer_unlock_commit(global_trace.buffer, event); |
948 | 953 | ||
949 | return 1; | 954 | return 1; |
950 | } | 955 | } |
@@ -967,8 +972,8 @@ static void __trace_graph_return(struct trace_array *tr, | |||
967 | return; | 972 | return; |
968 | entry = ring_buffer_event_data(event); | 973 | entry = ring_buffer_event_data(event); |
969 | entry->ret = *trace; | 974 | entry->ret = *trace; |
970 | filter_check_discard(call, entry, event); | 975 | if (!filter_current_check_discard(call, entry, event)) |
971 | ring_buffer_unlock_commit(global_trace.buffer, event); | 976 | ring_buffer_unlock_commit(global_trace.buffer, event); |
972 | } | 977 | } |
973 | #endif | 978 | #endif |
974 | 979 | ||
@@ -1004,8 +1009,8 @@ static void __ftrace_trace_stack(struct trace_array *tr, | |||
1004 | trace.entries = entry->caller; | 1009 | trace.entries = entry->caller; |
1005 | 1010 | ||
1006 | save_stack_trace(&trace); | 1011 | save_stack_trace(&trace); |
1007 | filter_check_discard(call, entry, event); | 1012 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1008 | ring_buffer_unlock_commit(tr->buffer, event); | 1013 | ring_buffer_unlock_commit(tr->buffer, event); |
1009 | #endif | 1014 | #endif |
1010 | } | 1015 | } |
1011 | 1016 | ||
@@ -1052,8 +1057,8 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
1052 | trace.entries = entry->caller; | 1057 | trace.entries = entry->caller; |
1053 | 1058 | ||
1054 | save_stack_trace_user(&trace); | 1059 | save_stack_trace_user(&trace); |
1055 | filter_check_discard(call, entry, event); | 1060 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1056 | ring_buffer_unlock_commit(tr->buffer, event); | 1061 | ring_buffer_unlock_commit(tr->buffer, event); |
1057 | #endif | 1062 | #endif |
1058 | } | 1063 | } |
1059 | 1064 | ||
@@ -1114,9 +1119,8 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1114 | entry->next_state = next->state; | 1119 | entry->next_state = next->state; |
1115 | entry->next_cpu = task_cpu(next); | 1120 | entry->next_cpu = task_cpu(next); |
1116 | 1121 | ||
1117 | filter_check_discard(call, entry, event); | 1122 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1118 | 1123 | trace_buffer_unlock_commit(tr, event, flags, pc); | |
1119 | trace_buffer_unlock_commit(tr, event, flags, pc); | ||
1120 | } | 1124 | } |
1121 | 1125 | ||
1122 | void | 1126 | void |
@@ -1142,9 +1146,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1142 | entry->next_state = wakee->state; | 1146 | entry->next_state = wakee->state; |
1143 | entry->next_cpu = task_cpu(wakee); | 1147 | entry->next_cpu = task_cpu(wakee); |
1144 | 1148 | ||
1145 | filter_check_discard(call, entry, event); | 1149 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1146 | 1150 | ring_buffer_unlock_commit(tr->buffer, event); | |
1147 | ring_buffer_unlock_commit(tr->buffer, event); | ||
1148 | ftrace_trace_stack(tr, flags, 6, pc); | 1151 | ftrace_trace_stack(tr, flags, 6, pc); |
1149 | ftrace_trace_userstack(tr, flags, pc); | 1152 | ftrace_trace_userstack(tr, flags, pc); |
1150 | } | 1153 | } |
@@ -1285,8 +1288,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1285 | entry->fmt = fmt; | 1288 | entry->fmt = fmt; |
1286 | 1289 | ||
1287 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); | 1290 | memcpy(entry->buf, trace_buf, sizeof(u32) * len); |
1288 | filter_check_discard(call, entry, event); | 1291 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1289 | ring_buffer_unlock_commit(tr->buffer, event); | 1292 | ring_buffer_unlock_commit(tr->buffer, event); |
1290 | 1293 | ||
1291 | out_unlock: | 1294 | out_unlock: |
1292 | __raw_spin_unlock(&trace_buf_lock); | 1295 | __raw_spin_unlock(&trace_buf_lock); |
@@ -1341,8 +1344,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
1341 | 1344 | ||
1342 | memcpy(&entry->buf, trace_buf, len); | 1345 | memcpy(&entry->buf, trace_buf, len); |
1343 | entry->buf[len] = 0; | 1346 | entry->buf[len] = 0; |
1344 | filter_check_discard(call, entry, event); | 1347 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
1345 | ring_buffer_unlock_commit(tr->buffer, event); | 1348 | ring_buffer_unlock_commit(tr->buffer, event); |
1346 | 1349 | ||
1347 | out_unlock: | 1350 | out_unlock: |
1348 | __raw_spin_unlock(&trace_buf_lock); | 1351 | __raw_spin_unlock(&trace_buf_lock); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index dfefffd7ae39..9729d14767d8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -866,13 +866,21 @@ extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | |||
866 | extern void filter_free_subsystem_preds(struct event_subsystem *system); | 866 | extern void filter_free_subsystem_preds(struct event_subsystem *system); |
867 | extern int filter_add_subsystem_pred(struct event_subsystem *system, | 867 | extern int filter_add_subsystem_pred(struct event_subsystem *system, |
868 | struct filter_pred *pred); | 868 | struct filter_pred *pred); |
869 | extern int filter_current_check_discard(struct ftrace_event_call *call, | ||
870 | void *rec, | ||
871 | struct ring_buffer_event *event); | ||
869 | 872 | ||
870 | static inline void | 873 | static inline int |
871 | filter_check_discard(struct ftrace_event_call *call, void *rec, | 874 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
875 | struct ring_buffer *buffer, | ||
872 | struct ring_buffer_event *event) | 876 | struct ring_buffer_event *event) |
873 | { | 877 | { |
874 | if (unlikely(call->preds) && !filter_match_preds(call, rec)) | 878 | if (unlikely(call->preds) && !filter_match_preds(call, rec)) { |
875 | ring_buffer_event_discard(event); | 879 | ring_buffer_discard_commit(buffer, event); |
880 | return 1; | ||
881 | } | ||
882 | |||
883 | return 0; | ||
876 | } | 884 | } |
877 | 885 | ||
878 | #define __common_field(type, item) \ | 886 | #define __common_field(type, item) \ |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index c95c25d838ef..8e64e604f5a7 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -74,9 +74,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
74 | entry->line = f->line; | 74 | entry->line = f->line; |
75 | entry->correct = val == expect; | 75 | entry->correct = val == expect; |
76 | 76 | ||
77 | filter_check_discard(call, entry, event); | 77 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
78 | 78 | ring_buffer_unlock_commit(tr->buffer, event); | |
79 | ring_buffer_unlock_commit(tr->buffer, event); | ||
80 | 79 | ||
81 | out: | 80 | out: |
82 | atomic_dec(&tr->data[cpu]->disabled); | 81 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index d2f34bf30e59..b2b298269eb0 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h | |||
@@ -222,11 +222,8 @@ static void ftrace_raw_event_##call(proto) \ | |||
222 | \ | 222 | \ |
223 | assign; \ | 223 | assign; \ |
224 | \ | 224 | \ |
225 | if (call->preds && !filter_match_preds(call, entry)) \ | 225 | if (!filter_current_check_discard(call, entry, event)) \ |
226 | trace_current_buffer_discard_commit(event); \ | ||
227 | else \ | ||
228 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | 226 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ |
229 | \ | ||
230 | } \ | 227 | } \ |
231 | \ | 228 | \ |
232 | static int ftrace_raw_reg_event_##call(void) \ | 229 | static int ftrace_raw_reg_event_##call(void) \ |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index e6b275b22ac0..8683d50a753a 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -195,8 +195,8 @@ void trace_hw_branch(u64 from, u64 to) | |||
195 | entry->ent.type = TRACE_HW_BRANCHES; | 195 | entry->ent.type = TRACE_HW_BRANCHES; |
196 | entry->from = from; | 196 | entry->from = from; |
197 | entry->to = to; | 197 | entry->to = to; |
198 | filter_check_discard(call, entry, event); | 198 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
199 | trace_buffer_unlock_commit(tr, event, 0, 0); | 199 | trace_buffer_unlock_commit(tr, event, 0, 0); |
200 | 200 | ||
201 | out: | 201 | out: |
202 | atomic_dec(&tr->data[cpu]->disabled); | 202 | atomic_dec(&tr->data[cpu]->disabled); |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 8ce7d7d62c07..810a5b7cf1c5 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -55,8 +55,8 @@ static void probe_power_end(struct power_trace *it) | |||
55 | goto out; | 55 | goto out; |
56 | entry = ring_buffer_event_data(event); | 56 | entry = ring_buffer_event_data(event); |
57 | entry->state_data = *it; | 57 | entry->state_data = *it; |
58 | filter_check_discard(call, entry, event); | 58 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
59 | trace_buffer_unlock_commit(tr, event, 0, 0); | 59 | trace_buffer_unlock_commit(tr, event, 0, 0); |
60 | out: | 60 | out: |
61 | preempt_enable(); | 61 | preempt_enable(); |
62 | } | 62 | } |
@@ -87,8 +87,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
87 | goto out; | 87 | goto out; |
88 | entry = ring_buffer_event_data(event); | 88 | entry = ring_buffer_event_data(event); |
89 | entry->state_data = *it; | 89 | entry->state_data = *it; |
90 | filter_check_discard(call, entry, event); | 90 | if (!filter_check_discard(call, entry, tr->buffer, event)) |
91 | trace_buffer_unlock_commit(tr, event, 0, 0); | 91 | trace_buffer_unlock_commit(tr, event, 0, 0); |
92 | out: | 92 | out: |
93 | preempt_enable(); | 93 | preempt_enable(); |
94 | } | 94 | } |