diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2009-12-07 22:14:52 -0500 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-12-13 12:37:25 -0500 |
commit | 3b8e4273814a7f9e9a74ece517d9206fea919aaa (patch) | |
tree | aa7960d90fe8cd4b04537bf9ea84ac73cb3b69ad | |
parent | 614a71a26ba3d97e9fa85649db69a682b78e407d (diff) |
tracing: Move a printk out of ftrace_raw_reg_event_foo()
Move the printk from each ftrace_raw_reg_event_foo() to
its caller ftrace_event_enable_disable(). This avoids each
regfunc trace event callbacks to handle a same error report
that can be carried from the caller.
See how much space this saves:
text data bss dec hex filename
5345151 1961864 7103260 14410275 dbe223 vmlinux.o.old
5331487 1961864 7103260 14396611 dbacc3 vmlinux.o
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Jason Baron <jbaron@redhat.com>
LKML-Reference: <4B1DC4AC.802@cn.fujitsu.com>
[start cmdline record before calling regfunc to avoid lost
window of pid to comm resolution]
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r-- | include/trace/ftrace.h | 16 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 20 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 10 |
3 files changed, 19 insertions, 27 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 2af2f7a2c1bd..0c21af85211c 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -555,13 +555,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ | |||
555 | * | 555 | * |
556 | * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) | 556 | * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) |
557 | * { | 557 | * { |
558 | * int ret; | 558 | * return register_trace_<call>(ftrace_event_<call>); |
559 | * | ||
560 | * ret = register_trace_<call>(ftrace_event_<call>); | ||
561 | * if (!ret) | ||
562 | * pr_info("event trace: Could not activate trace point " | ||
563 | * "probe to <call>"); | ||
564 | * return ret; | ||
565 | * } | 559 | * } |
566 | * | 560 | * |
567 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 561 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
@@ -710,13 +704,7 @@ static void ftrace_raw_event_##call(proto) \ | |||
710 | \ | 704 | \ |
711 | static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ | 705 | static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ |
712 | { \ | 706 | { \ |
713 | int ret; \ | 707 | return register_trace_##call(ftrace_raw_event_##call); \ |
714 | \ | ||
715 | ret = register_trace_##call(ftrace_raw_event_##call); \ | ||
716 | if (ret) \ | ||
717 | pr_info("event trace: Could not activate trace point " \ | ||
718 | "probe to %s\n", #call); \ | ||
719 | return ret; \ | ||
720 | } \ | 708 | } \ |
721 | \ | 709 | \ |
722 | static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ | 710 | static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 97b0b3aa166d..189b09baf4fb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -118,9 +118,11 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
118 | } | 118 | } |
119 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 119 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
120 | 120 | ||
121 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, | 121 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, |
122 | int enable) | 122 | int enable) |
123 | { | 123 | { |
124 | int ret = 0; | ||
125 | |||
124 | switch (enable) { | 126 | switch (enable) { |
125 | case 0: | 127 | case 0: |
126 | if (call->enabled) { | 128 | if (call->enabled) { |
@@ -131,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
131 | break; | 133 | break; |
132 | case 1: | 134 | case 1: |
133 | if (!call->enabled) { | 135 | if (!call->enabled) { |
134 | call->enabled = 1; | ||
135 | tracing_start_cmdline_record(); | 136 | tracing_start_cmdline_record(); |
136 | call->regfunc(call); | 137 | ret = call->regfunc(call); |
138 | if (ret) { | ||
139 | tracing_stop_cmdline_record(); | ||
140 | pr_info("event trace: Could not enable event " | ||
141 | "%s\n", call->name); | ||
142 | break; | ||
143 | } | ||
144 | call->enabled = 1; | ||
137 | } | 145 | } |
138 | break; | 146 | break; |
139 | } | 147 | } |
148 | |||
149 | return ret; | ||
140 | } | 150 | } |
141 | 151 | ||
142 | static void ftrace_clear_events(void) | 152 | static void ftrace_clear_events(void) |
@@ -415,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
415 | case 0: | 425 | case 0: |
416 | case 1: | 426 | case 1: |
417 | mutex_lock(&event_mutex); | 427 | mutex_lock(&event_mutex); |
418 | ftrace_event_enable_disable(call, val); | 428 | ret = ftrace_event_enable_disable(call, val); |
419 | mutex_unlock(&event_mutex); | 429 | mutex_unlock(&event_mutex); |
420 | break; | 430 | break; |
421 | 431 | ||
@@ -425,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
425 | 435 | ||
426 | *ppos += cnt; | 436 | *ppos += cnt; |
427 | 437 | ||
428 | return cnt; | 438 | return ret ? ret : cnt; |
429 | } | 439 | } |
430 | 440 | ||
431 | static ssize_t | 441 | static ssize_t |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index b957edd0ca3b..75289f372dd2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -325,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
325 | mutex_lock(&syscall_trace_lock); | 325 | mutex_lock(&syscall_trace_lock); |
326 | if (!sys_refcount_enter) | 326 | if (!sys_refcount_enter) |
327 | ret = register_trace_sys_enter(ftrace_syscall_enter); | 327 | ret = register_trace_sys_enter(ftrace_syscall_enter); |
328 | if (ret) { | 328 | if (!ret) { |
329 | pr_info("event trace: Could not activate" | ||
330 | "syscall entry trace point"); | ||
331 | } else { | ||
332 | set_bit(num, enabled_enter_syscalls); | 329 | set_bit(num, enabled_enter_syscalls); |
333 | sys_refcount_enter++; | 330 | sys_refcount_enter++; |
334 | } | 331 | } |
@@ -362,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
362 | mutex_lock(&syscall_trace_lock); | 359 | mutex_lock(&syscall_trace_lock); |
363 | if (!sys_refcount_exit) | 360 | if (!sys_refcount_exit) |
364 | ret = register_trace_sys_exit(ftrace_syscall_exit); | 361 | ret = register_trace_sys_exit(ftrace_syscall_exit); |
365 | if (ret) { | 362 | if (!ret) { |
366 | pr_info("event trace: Could not activate" | ||
367 | "syscall exit trace point"); | ||
368 | } else { | ||
369 | set_bit(num, enabled_exit_syscalls); | 363 | set_bit(num, enabled_exit_syscalls); |
370 | sys_refcount_exit++; | 364 | sys_refcount_exit++; |
371 | } | 365 | } |