diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-02-20 05:26:21 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-02-20 05:26:21 -0500 |
commit | ff1fb5f6b4925a536ffb8171e5f2dbd01ccfeb97 (patch) | |
tree | a88ff0074d2ff11fa71e653c2e74d6a47440975e | |
parent | 69943182bb9e19e4b60ea5033f683ec1af1703a9 (diff) | |
parent | 8c189ea64eea01ca20d102ddb74d6936dd16c579 (diff) |
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent
Pull two fixes from Steven Rostedt.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 24 | ||||
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 46 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 43 |
4 files changed, 94 insertions, 20 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 86cb51e1ca96..0525a8bdf65d 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -72,4 +72,28 @@ int ftrace_int3_handler(struct pt_regs *regs); | |||
72 | #endif /* __ASSEMBLY__ */ | 72 | #endif /* __ASSEMBLY__ */ |
73 | #endif /* CONFIG_FUNCTION_TRACER */ | 73 | #endif /* CONFIG_FUNCTION_TRACER */ |
74 | 74 | ||
75 | |||
76 | #if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) | ||
77 | |||
78 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) | ||
79 | #include <asm/compat.h> | ||
80 | |||
81 | /* | ||
82 | * Because ia32 syscalls do not map to x86_64 syscall numbers | ||
83 | * this screws up the trace output when tracing a ia32 task. | ||
84 | * Instead of reporting bogus syscalls, just do not trace them. | ||
85 | * | ||
86 | * If the user realy wants these, then they should use the | ||
87 | * raw syscall tracepoints with filtering. | ||
88 | */ | ||
89 | #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 | ||
90 | static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) | ||
91 | { | ||
92 | if (is_compat_task()) | ||
93 | return true; | ||
94 | return false; | ||
95 | } | ||
96 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ | ||
97 | #endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ | ||
98 | |||
75 | #endif /* _ASM_X86_FTRACE_H */ | 99 | #endif /* _ASM_X86_FTRACE_H */ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2d946e63ee82..2cd056e3ada3 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -20,7 +20,6 @@ | |||
20 | struct task_struct; | 20 | struct task_struct; |
21 | struct exec_domain; | 21 | struct exec_domain; |
22 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
23 | #include <asm/ftrace.h> | ||
24 | #include <linux/atomic.h> | 23 | #include <linux/atomic.h> |
25 | 24 | ||
26 | struct thread_info { | 25 | struct thread_info { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ce8c3d68292f..98ca94a41819 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3996,37 +3996,51 @@ static void ftrace_init_module(struct module *mod, | |||
3996 | ftrace_process_locs(mod, start, end); | 3996 | ftrace_process_locs(mod, start, end); |
3997 | } | 3997 | } |
3998 | 3998 | ||
3999 | static int ftrace_module_notify(struct notifier_block *self, | 3999 | static int ftrace_module_notify_enter(struct notifier_block *self, |
4000 | unsigned long val, void *data) | 4000 | unsigned long val, void *data) |
4001 | { | 4001 | { |
4002 | struct module *mod = data; | 4002 | struct module *mod = data; |
4003 | 4003 | ||
4004 | switch (val) { | 4004 | if (val == MODULE_STATE_COMING) |
4005 | case MODULE_STATE_COMING: | ||
4006 | ftrace_init_module(mod, mod->ftrace_callsites, | 4005 | ftrace_init_module(mod, mod->ftrace_callsites, |
4007 | mod->ftrace_callsites + | 4006 | mod->ftrace_callsites + |
4008 | mod->num_ftrace_callsites); | 4007 | mod->num_ftrace_callsites); |
4009 | break; | 4008 | return 0; |
4010 | case MODULE_STATE_GOING: | 4009 | } |
4010 | |||
4011 | static int ftrace_module_notify_exit(struct notifier_block *self, | ||
4012 | unsigned long val, void *data) | ||
4013 | { | ||
4014 | struct module *mod = data; | ||
4015 | |||
4016 | if (val == MODULE_STATE_GOING) | ||
4011 | ftrace_release_mod(mod); | 4017 | ftrace_release_mod(mod); |
4012 | break; | ||
4013 | } | ||
4014 | 4018 | ||
4015 | return 0; | 4019 | return 0; |
4016 | } | 4020 | } |
4017 | #else | 4021 | #else |
4018 | static int ftrace_module_notify(struct notifier_block *self, | 4022 | static int ftrace_module_notify_enter(struct notifier_block *self, |
4019 | unsigned long val, void *data) | 4023 | unsigned long val, void *data) |
4024 | { | ||
4025 | return 0; | ||
4026 | } | ||
4027 | static int ftrace_module_notify_exit(struct notifier_block *self, | ||
4028 | unsigned long val, void *data) | ||
4020 | { | 4029 | { |
4021 | return 0; | 4030 | return 0; |
4022 | } | 4031 | } |
4023 | #endif /* CONFIG_MODULES */ | 4032 | #endif /* CONFIG_MODULES */ |
4024 | 4033 | ||
4025 | struct notifier_block ftrace_module_nb = { | 4034 | struct notifier_block ftrace_module_enter_nb = { |
4026 | .notifier_call = ftrace_module_notify, | 4035 | .notifier_call = ftrace_module_notify_enter, |
4027 | .priority = INT_MAX, /* Run before anything that can use kprobes */ | 4036 | .priority = INT_MAX, /* Run before anything that can use kprobes */ |
4028 | }; | 4037 | }; |
4029 | 4038 | ||
4039 | struct notifier_block ftrace_module_exit_nb = { | ||
4040 | .notifier_call = ftrace_module_notify_exit, | ||
4041 | .priority = INT_MIN, /* Run after anything that can remove kprobes */ | ||
4042 | }; | ||
4043 | |||
4030 | extern unsigned long __start_mcount_loc[]; | 4044 | extern unsigned long __start_mcount_loc[]; |
4031 | extern unsigned long __stop_mcount_loc[]; | 4045 | extern unsigned long __stop_mcount_loc[]; |
4032 | 4046 | ||
@@ -4058,9 +4072,13 @@ void __init ftrace_init(void) | |||
4058 | __start_mcount_loc, | 4072 | __start_mcount_loc, |
4059 | __stop_mcount_loc); | 4073 | __stop_mcount_loc); |
4060 | 4074 | ||
4061 | ret = register_module_notifier(&ftrace_module_nb); | 4075 | ret = register_module_notifier(&ftrace_module_enter_nb); |
4076 | if (ret) | ||
4077 | pr_warning("Failed to register trace ftrace module enter notifier\n"); | ||
4078 | |||
4079 | ret = register_module_notifier(&ftrace_module_exit_nb); | ||
4062 | if (ret) | 4080 | if (ret) |
4063 | pr_warning("Failed to register trace ftrace module notifier\n"); | 4081 | pr_warning("Failed to register trace ftrace module exit notifier\n"); |
4064 | 4082 | ||
4065 | set_ftrace_early_filters(); | 4083 | set_ftrace_early_filters(); |
4066 | 4084 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 5329e13e74a1..7a809e321058 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <trace/syscall.h> | 1 | #include <trace/syscall.h> |
2 | #include <trace/events/syscalls.h> | 2 | #include <trace/events/syscalls.h> |
3 | #include <linux/syscalls.h> | ||
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
5 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ | 6 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ |
@@ -47,6 +48,38 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name | |||
47 | } | 48 | } |
48 | #endif | 49 | #endif |
49 | 50 | ||
51 | #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS | ||
52 | /* | ||
53 | * Some architectures that allow for 32bit applications | ||
54 | * to run on a 64bit kernel, do not map the syscalls for | ||
55 | * the 32bit tasks the same as they do for 64bit tasks. | ||
56 | * | ||
57 | * *cough*x86*cough* | ||
58 | * | ||
59 | * In such a case, instead of reporting the wrong syscalls, | ||
60 | * simply ignore them. | ||
61 | * | ||
62 | * For an arch to ignore the compat syscalls it needs to | ||
63 | * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as | ||
64 | * define the function arch_trace_is_compat_syscall() to let | ||
65 | * the tracing system know that it should ignore it. | ||
66 | */ | ||
67 | static int | ||
68 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) | ||
69 | { | ||
70 | if (unlikely(arch_trace_is_compat_syscall(regs))) | ||
71 | return -1; | ||
72 | |||
73 | return syscall_get_nr(task, regs); | ||
74 | } | ||
75 | #else | ||
76 | static inline int | ||
77 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) | ||
78 | { | ||
79 | return syscall_get_nr(task, regs); | ||
80 | } | ||
81 | #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ | ||
82 | |||
50 | static __init struct syscall_metadata * | 83 | static __init struct syscall_metadata * |
51 | find_syscall_meta(unsigned long syscall) | 84 | find_syscall_meta(unsigned long syscall) |
52 | { | 85 | { |
@@ -276,10 +309,10 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
276 | struct syscall_metadata *sys_data; | 309 | struct syscall_metadata *sys_data; |
277 | struct ring_buffer_event *event; | 310 | struct ring_buffer_event *event; |
278 | struct ring_buffer *buffer; | 311 | struct ring_buffer *buffer; |
279 | int size; | ||
280 | int syscall_nr; | 312 | int syscall_nr; |
313 | int size; | ||
281 | 314 | ||
282 | syscall_nr = syscall_get_nr(current, regs); | 315 | syscall_nr = trace_get_syscall_nr(current, regs); |
283 | if (syscall_nr < 0) | 316 | if (syscall_nr < 0) |
284 | return; | 317 | return; |
285 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) | 318 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) |
@@ -313,7 +346,7 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
313 | struct ring_buffer *buffer; | 346 | struct ring_buffer *buffer; |
314 | int syscall_nr; | 347 | int syscall_nr; |
315 | 348 | ||
316 | syscall_nr = syscall_get_nr(current, regs); | 349 | syscall_nr = trace_get_syscall_nr(current, regs); |
317 | if (syscall_nr < 0) | 350 | if (syscall_nr < 0) |
318 | return; | 351 | return; |
319 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) | 352 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) |
@@ -502,7 +535,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
502 | int rctx; | 535 | int rctx; |
503 | int size; | 536 | int size; |
504 | 537 | ||
505 | syscall_nr = syscall_get_nr(current, regs); | 538 | syscall_nr = trace_get_syscall_nr(current, regs); |
506 | if (syscall_nr < 0) | 539 | if (syscall_nr < 0) |
507 | return; | 540 | return; |
508 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | 541 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
@@ -578,7 +611,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
578 | int rctx; | 611 | int rctx; |
579 | int size; | 612 | int size; |
580 | 613 | ||
581 | syscall_nr = syscall_get_nr(current, regs); | 614 | syscall_nr = trace_get_syscall_nr(current, regs); |
582 | if (syscall_nr < 0) | 615 | if (syscall_nr < 0) |
583 | return; | 616 | return; |
584 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | 617 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |