aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-04-23 11:12:36 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 14:33:22 -0400
commit553552ce1796c32cf4e3d4f45cd5b537de91dd1d (patch)
treea65defc1055bcc3e9f34327d2cc59704e536948b
parent32c0edaeaad74a7883e736ae0f3798784cfc2a80 (diff)
tracing: Combine event filter_active and enable into single flags field
The filter_active and enable both use an int (4 bytes each) to set a single flag. We can save 4 bytes per event by combining the two into a single integer. text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4894944 1018052 861512 6774508 675eec vmlinux.id 4894871 1012292 861512 6768675 674823 vmlinux.flags This gives us another 5K in savings. The modification of both the enable and filter fields are done under the event_mutex, so it is still safe to combine the two. Note: Although Mathieu gave his Acked-by, he would like it documented that the reads of flags are not protected by the mutex. The way the code works, these reads will not break anything, but will have a residual effect. Since this behavior is the same even before this patch, describing this situation is left to another patch, as this patch does not change the behavior, but just brought it to Mathieu's attention. v2: Updated the event trace self test to for this change. Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Tom Zanussi <tzanussi@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--include/linux/ftrace_event.h21
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_events_filter.c10
-rw-r--r--kernel/trace/trace_kprobe.c2
5 files changed, 34 insertions, 17 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 0be028527633..5ac97a42950d 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -143,6 +143,16 @@ struct ftrace_event_class {
143 int (*raw_init)(struct ftrace_event_call *); 143 int (*raw_init)(struct ftrace_event_call *);
144}; 144};
145 145
146enum {
147 TRACE_EVENT_FL_ENABLED_BIT,
148 TRACE_EVENT_FL_FILTERED_BIT,
149};
150
151enum {
152 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
153 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
154};
155
146struct ftrace_event_call { 156struct ftrace_event_call {
147 struct list_head list; 157 struct list_head list;
148 struct ftrace_event_class *class; 158 struct ftrace_event_class *class;
@@ -154,8 +164,15 @@ struct ftrace_event_call {
154 void *mod; 164 void *mod;
155 void *data; 165 void *data;
156 166
157 int enabled; 167 /*
158 int filter_active; 168 * 32 bit flags:
169 * bit 1: enabled
170 * bit 2: filter_active
171 *
172 * Must hold event_mutex to change.
173 */
174 unsigned int flags;
175
159 int perf_refcount; 176 int perf_refcount;
160}; 177};
161 178
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c88c563a59a5..63562595f2b2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -802,7 +802,7 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
802 struct ring_buffer *buffer, 802 struct ring_buffer *buffer,
803 struct ring_buffer_event *event) 803 struct ring_buffer_event *event)
804{ 804{
805 if (unlikely(call->filter_active) && 805 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
806 !filter_match_preds(call->filter, rec)) { 806 !filter_match_preds(call->filter, rec)) {
807 ring_buffer_discard_commit(buffer, event); 807 ring_buffer_discard_commit(buffer, event);
808 return 1; 808 return 1;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8daaca5475b5..53cffc0b0801 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -137,8 +137,8 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
137 137
138 switch (enable) { 138 switch (enable) {
139 case 0: 139 case 0:
140 if (call->enabled) { 140 if (call->flags & TRACE_EVENT_FL_ENABLED) {
141 call->enabled = 0; 141 call->flags &= ~TRACE_EVENT_FL_ENABLED;
142 tracing_stop_cmdline_record(); 142 tracing_stop_cmdline_record();
143 if (call->class->reg) 143 if (call->class->reg)
144 call->class->reg(call, TRACE_REG_UNREGISTER); 144 call->class->reg(call, TRACE_REG_UNREGISTER);
@@ -149,7 +149,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
149 } 149 }
150 break; 150 break;
151 case 1: 151 case 1:
152 if (!call->enabled) { 152 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
153 tracing_start_cmdline_record(); 153 tracing_start_cmdline_record();
154 if (call->class->reg) 154 if (call->class->reg)
155 ret = call->class->reg(call, TRACE_REG_REGISTER); 155 ret = call->class->reg(call, TRACE_REG_REGISTER);
@@ -163,7 +163,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
163 "%s\n", call->name); 163 "%s\n", call->name);
164 break; 164 break;
165 } 165 }
166 call->enabled = 1; 166 call->flags |= TRACE_EVENT_FL_ENABLED;
167 } 167 }
168 break; 168 break;
169 } 169 }
@@ -352,7 +352,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
352 (*pos)++; 352 (*pos)++;
353 353
354 list_for_each_entry_continue(call, &ftrace_events, list) { 354 list_for_each_entry_continue(call, &ftrace_events, list) {
355 if (call->enabled) 355 if (call->flags & TRACE_EVENT_FL_ENABLED)
356 return call; 356 return call;
357 } 357 }
358 358
@@ -411,7 +411,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
411 struct ftrace_event_call *call = filp->private_data; 411 struct ftrace_event_call *call = filp->private_data;
412 char *buf; 412 char *buf;
413 413
414 if (call->enabled) 414 if (call->flags & TRACE_EVENT_FL_ENABLED)
415 buf = "1\n"; 415 buf = "1\n";
416 else 416 else
417 buf = "0\n"; 417 buf = "0\n";
@@ -486,7 +486,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
486 * or if all events or cleared, or if we have 486 * or if all events or cleared, or if we have
487 * a mixture. 487 * a mixture.
488 */ 488 */
489 set |= (1 << !!call->enabled); 489 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
490 490
491 /* 491 /*
492 * If we have a mixture, no need to look further. 492 * If we have a mixture, no need to look further.
@@ -1447,7 +1447,7 @@ static __init void event_trace_self_tests(void)
1447 * If an event is already enabled, someone is using 1447 * If an event is already enabled, someone is using
1448 * it and the self test should not be on. 1448 * it and the self test should not be on.
1449 */ 1449 */
1450 if (call->enabled) { 1450 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1451 pr_warning("Enabled event during self test!\n"); 1451 pr_warning("Enabled event during self test!\n");
1452 WARN_ON_ONCE(1); 1452 WARN_ON_ONCE(1);
1453 continue; 1453 continue;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 2702d6bbf1ab..239ea5d77d68 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -547,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call)
547 struct event_filter *filter = call->filter; 547 struct event_filter *filter = call->filter;
548 int i; 548 int i;
549 549
550 call->filter_active = 0; 550 call->flags &= ~TRACE_EVENT_FL_FILTERED;
551 filter->n_preds = 0; 551 filter->n_preds = 0;
552 552
553 for (i = 0; i < MAX_FILTER_PRED; i++) 553 for (i = 0; i < MAX_FILTER_PRED; i++)
@@ -574,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call)
574{ 574{
575 __free_preds(call->filter); 575 __free_preds(call->filter);
576 call->filter = NULL; 576 call->filter = NULL;
577 call->filter_active = 0; 577 call->flags &= ~TRACE_EVENT_FL_FILTERED;
578} 578}
579 579
580static struct event_filter *__alloc_preds(void) 580static struct event_filter *__alloc_preds(void)
@@ -613,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call)
613 if (call->filter) 613 if (call->filter)
614 return 0; 614 return 0;
615 615
616 call->filter_active = 0; 616 call->flags &= ~TRACE_EVENT_FL_FILTERED;
617 call->filter = __alloc_preds(); 617 call->filter = __alloc_preds();
618 if (IS_ERR(call->filter)) 618 if (IS_ERR(call->filter))
619 return PTR_ERR(call->filter); 619 return PTR_ERR(call->filter);
@@ -1268,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system,
1268 if (err) 1268 if (err)
1269 filter_disable_preds(call); 1269 filter_disable_preds(call);
1270 else { 1270 else {
1271 call->filter_active = 1; 1271 call->flags |= TRACE_EVENT_FL_FILTERED;
1272 replace_filter_string(filter, filter_string); 1272 replace_filter_string(filter, filter_string);
1273 } 1273 }
1274 fail = false; 1274 fail = false;
@@ -1317,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1317 if (err) 1317 if (err)
1318 append_filter_err(ps, call->filter); 1318 append_filter_err(ps, call->filter);
1319 else 1319 else
1320 call->filter_active = 1; 1320 call->flags |= TRACE_EVENT_FL_FILTERED;
1321out: 1321out:
1322 filter_opstack_clear(ps); 1322 filter_opstack_clear(ps);
1323 postfix_clear(ps); 1323 postfix_clear(ps);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 934078bca3f9..0e3ded64cdb7 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1382,7 +1382,7 @@ static int register_probe_event(struct trace_probe *tp)
1382 kfree(call->print_fmt); 1382 kfree(call->print_fmt);
1383 return -ENODEV; 1383 return -ENODEV;
1384 } 1384 }
1385 call->enabled = 0; 1385 call->flags = 0;
1386 call->class->reg = kprobe_register; 1386 call->class->reg = kprobe_register;
1387 call->data = tp; 1387 call->data = tp;
1388 ret = trace_add_event_call(call); 1388 ret = trace_add_event_call(call);