aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-04-23 10:38:03 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 14:33:15 -0400
commit32c0edaeaad74a7883e736ae0f3798784cfc2a80 (patch)
treee70784ed690172cb0f1b4365b93aa077d40219c9 /kernel/trace
parent80decc70afc57c87eee9d6b836aec2ecacba3457 (diff)
tracing: Remove duplicate id information in event structure
Now that the trace_event structure is embedded in the ftrace_event_call structure, there is no need for the ftrace_event_call id field. The id field is the same as the trace_event type field. Removing the id and re-arranging the structure brings down the tracepoint footprint by another 5K. text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4895024 1023812 861512 6780348 6775bc vmlinux.print 4894944 1018052 861512 6774508 675eec vmlinux.id Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/trace/trace_events_filter.c2
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_kprobe.c18
-rw-r--r--kernel/trace/trace_syscalls.c14
6 files changed, 26 insertions, 23 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 196fe9d26773..0a47e8d6b491 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -80,7 +80,7 @@ int perf_trace_enable(int event_id)
80 80
81 mutex_lock(&event_mutex); 81 mutex_lock(&event_mutex);
82 list_for_each_entry(event, &ftrace_events, list) { 82 list_for_each_entry(event, &ftrace_events, list) {
83 if (event->id == event_id && 83 if (event->event.type == event_id &&
84 event->class && event->class->perf_probe && 84 event->class && event->class->perf_probe &&
85 try_module_get(event->mod)) { 85 try_module_get(event->mod)) {
86 ret = perf_trace_event_enable(event); 86 ret = perf_trace_event_enable(event);
@@ -128,7 +128,7 @@ void perf_trace_disable(int event_id)
128 128
129 mutex_lock(&event_mutex); 129 mutex_lock(&event_mutex);
130 list_for_each_entry(event, &ftrace_events, list) { 130 list_for_each_entry(event, &ftrace_events, list) {
131 if (event->id == event_id) { 131 if (event->event.type == event_id) {
132 perf_trace_event_disable(event); 132 perf_trace_event_disable(event);
133 module_put(event->mod); 133 module_put(event->mod);
134 break; 134 break;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aafe5bff8f59..8daaca5475b5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -125,7 +125,6 @@ int trace_event_raw_init(struct ftrace_event_call *call)
125 id = register_ftrace_event(&call->event); 125 id = register_ftrace_event(&call->event);
126 if (!id) 126 if (!id)
127 return -ENODEV; 127 return -ENODEV;
128 call->id = id;
129 128
130 return 0; 129 return 0;
131} 130}
@@ -567,7 +566,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
567 trace_seq_init(s); 566 trace_seq_init(s);
568 567
569 trace_seq_printf(s, "name: %s\n", call->name); 568 trace_seq_printf(s, "name: %s\n", call->name);
570 trace_seq_printf(s, "ID: %d\n", call->id); 569 trace_seq_printf(s, "ID: %d\n", call->event.type);
571 trace_seq_printf(s, "format:\n"); 570 trace_seq_printf(s, "format:\n");
572 571
573 head = trace_get_fields(call); 572 head = trace_get_fields(call);
@@ -641,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
641 return -ENOMEM; 640 return -ENOMEM;
642 641
643 trace_seq_init(s); 642 trace_seq_init(s);
644 trace_seq_printf(s, "%d\n", call->id); 643 trace_seq_printf(s, "%d\n", call->event.type);
645 644
646 r = simple_read_from_buffer(ubuf, cnt, ppos, 645 r = simple_read_from_buffer(ubuf, cnt, ppos,
647 s->buffer, s->len); 646 s->buffer, s->len);
@@ -969,7 +968,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
969 enable); 968 enable);
970 969
971#ifdef CONFIG_PERF_EVENTS 970#ifdef CONFIG_PERF_EVENTS
972 if (call->id && (call->class->perf_probe || call->class->reg)) 971 if (call->event.type && (call->class->perf_probe || call->class->reg))
973 trace_create_file("id", 0444, call->dir, call, 972 trace_create_file("id", 0444, call->dir, call,
974 id); 973 id);
975#endif 974#endif
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 961f99b74bdd..2702d6bbf1ab 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1395,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1395 mutex_lock(&event_mutex); 1395 mutex_lock(&event_mutex);
1396 1396
1397 list_for_each_entry(call, &ftrace_events, list) { 1397 list_for_each_entry(call, &ftrace_events, list) {
1398 if (call->id == event_id) 1398 if (call->event.type == event_id)
1399 break; 1399 break;
1400 } 1400 }
1401 1401
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index e878d06c0ac0..8536e2a65969 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -153,7 +153,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
153#define F_printk(fmt, args...) #fmt ", " __stringify(args) 153#define F_printk(fmt, args...) #fmt ", " __stringify(args)
154 154
155#undef FTRACE_ENTRY 155#undef FTRACE_ENTRY
156#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ 156#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
157 \ 157 \
158struct ftrace_event_class event_class_ftrace_##call = { \ 158struct ftrace_event_class event_class_ftrace_##call = { \
159 .system = __stringify(TRACE_SYSTEM), \ 159 .system = __stringify(TRACE_SYSTEM), \
@@ -165,7 +165,7 @@ struct ftrace_event_call __used \
165__attribute__((__aligned__(4))) \ 165__attribute__((__aligned__(4))) \
166__attribute__((section("_ftrace_events"))) event_##call = { \ 166__attribute__((section("_ftrace_events"))) event_##call = { \
167 .name = #call, \ 167 .name = #call, \
168 .id = type, \ 168 .event.type = etype, \
169 .class = &event_class_ftrace_##call, \ 169 .class = &event_class_ftrace_##call, \
170 .print_fmt = print, \ 170 .print_fmt = print, \
171}; \ 171}; \
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d8061c3e02c9..934078bca3f9 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -960,8 +960,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
960 960
961 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 961 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
962 962
963 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 963 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
964 irq_flags, pc); 964 size, irq_flags, pc);
965 if (!event) 965 if (!event)
966 return; 966 return;
967 967
@@ -992,8 +992,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
992 992
993 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 993 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
994 994
995 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 995 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
996 irq_flags, pc); 996 size, irq_flags, pc);
997 if (!event) 997 if (!event)
998 return; 998 return;
999 999
@@ -1228,7 +1228,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1228 "profile buffer not large enough")) 1228 "profile buffer not large enough"))
1229 return; 1229 return;
1230 1230
1231 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); 1231 entry = perf_trace_buf_prepare(size, call->event.type,
1232 &rctx, &irq_flags);
1232 if (!entry) 1233 if (!entry)
1233 return; 1234 return;
1234 1235
@@ -1258,7 +1259,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1258 "profile buffer not large enough")) 1259 "profile buffer not large enough"))
1259 return; 1260 return;
1260 1261
1261 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); 1262 entry = perf_trace_buf_prepare(size, call->event.type,
1263 &rctx, &irq_flags);
1262 if (!entry) 1264 if (!entry)
1263 return; 1265 return;
1264 1266
@@ -1375,8 +1377,8 @@ static int register_probe_event(struct trace_probe *tp)
1375 } 1377 }
1376 if (set_print_fmt(tp) < 0) 1378 if (set_print_fmt(tp) < 0)
1377 return -ENOMEM; 1379 return -ENOMEM;
1378 call->id = register_ftrace_event(&call->event); 1380 ret = register_ftrace_event(&call->event);
1379 if (!call->id) { 1381 if (!ret) {
1380 kfree(call->print_fmt); 1382 kfree(call->print_fmt);
1381 return -ENODEV; 1383 return -ENODEV;
1382 } 1384 }
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7c7cfe95a853..9d358301ae3e 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -117,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
117 if (!entry) 117 if (!entry)
118 goto end; 118 goto end;
119 119
120 if (entry->enter_event->id != ent->type) { 120 if (entry->enter_event->event.type != ent->type) {
121 WARN_ON_ONCE(1); 121 WARN_ON_ONCE(1);
122 goto end; 122 goto end;
123 } 123 }
@@ -173,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
173 return TRACE_TYPE_HANDLED; 173 return TRACE_TYPE_HANDLED;
174 } 174 }
175 175
176 if (entry->exit_event->id != ent->type) { 176 if (entry->exit_event->event.type != ent->type) {
177 WARN_ON_ONCE(1); 177 WARN_ON_ONCE(1);
178 return TRACE_TYPE_UNHANDLED; 178 return TRACE_TYPE_UNHANDLED;
179 } 179 }
@@ -315,7 +315,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
315 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 315 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
316 316
317 event = trace_current_buffer_lock_reserve(&buffer, 317 event = trace_current_buffer_lock_reserve(&buffer,
318 sys_data->enter_event->id, size, 0, 0); 318 sys_data->enter_event->event.type, size, 0, 0);
319 if (!event) 319 if (!event)
320 return; 320 return;
321 321
@@ -347,7 +347,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
347 return; 347 return;
348 348
349 event = trace_current_buffer_lock_reserve(&buffer, 349 event = trace_current_buffer_lock_reserve(&buffer,
350 sys_data->exit_event->id, sizeof(*entry), 0, 0); 350 sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
351 if (!event) 351 if (!event)
352 return; 352 return;
353 353
@@ -511,7 +511,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
511 return; 511 return;
512 512
513 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 513 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
514 sys_data->enter_event->id, &rctx, &flags); 514 sys_data->enter_event->event.type,
515 &rctx, &flags);
515 if (!rec) 516 if (!rec)
516 return; 517 return;
517 518
@@ -586,7 +587,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
586 return; 587 return;
587 588
588 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 589 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
589 sys_data->exit_event->id, &rctx, &flags); 590 sys_data->exit_event->event.type,
591 &rctx, &flags);
590 if (!rec) 592 if (!rec)
591 return; 593 return;
592 594