diff options
| author | Ingo Molnar <mingo@elte.hu> | 2010-02-26 03:20:17 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-02-26 03:20:17 -0500 |
| commit | 281b3714e91162b66add1cfac404cf7b81e3e2f2 (patch) | |
| tree | 9f80453153db272c207129d971e17d31a6bb214a | |
| parent | 64b9fb5704a479d98a59f2a1d45d3331a8f847f8 (diff) | |
| parent | 7b60997f73865b019e595720185c85285ca3df9a (diff) | |
Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/core
| -rw-r--r-- | arch/x86/kernel/ftrace.c | 26 | ||||
| -rw-r--r-- | include/linux/syscalls.h | 6 | ||||
| -rw-r--r-- | include/trace/ftrace.h | 3 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 9 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 2 |
9 files changed, 38 insertions, 20 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 309689245431..605ef196fdd6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
| @@ -30,14 +30,32 @@ | |||
| 30 | 30 | ||
| 31 | #ifdef CONFIG_DYNAMIC_FTRACE | 31 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 32 | 32 | ||
| 33 | /* | ||
| 34 | * modifying_code is set to notify NMIs that they need to use | ||
| 35 | * memory barriers when entering or exiting. But we don't want | ||
| 36 | * to burden NMIs with unnecessary memory barriers when code | ||
| 37 | * modification is not being done (which is most of the time). | ||
| 38 | * | ||
| 39 | * A mutex is already held when ftrace_arch_code_modify_prepare | ||
| 40 | * and post_process are called. No locks need to be taken here. | ||
| 41 | * | ||
| 42 | * Stop machine will make sure currently running NMIs are done | ||
| 43 | * and new NMIs will see the updated variable before we need | ||
| 44 | * to worry about NMIs doing memory barriers. | ||
| 45 | */ | ||
| 46 | static int modifying_code __read_mostly; | ||
| 47 | static DEFINE_PER_CPU(int, save_modifying_code); | ||
| 48 | |||
| 33 | int ftrace_arch_code_modify_prepare(void) | 49 | int ftrace_arch_code_modify_prepare(void) |
| 34 | { | 50 | { |
| 35 | set_kernel_text_rw(); | 51 | set_kernel_text_rw(); |
| 52 | modifying_code = 1; | ||
| 36 | return 0; | 53 | return 0; |
| 37 | } | 54 | } |
| 38 | 55 | ||
| 39 | int ftrace_arch_code_modify_post_process(void) | 56 | int ftrace_arch_code_modify_post_process(void) |
| 40 | { | 57 | { |
| 58 | modifying_code = 0; | ||
| 41 | set_kernel_text_ro(); | 59 | set_kernel_text_ro(); |
| 42 | return 0; | 60 | return 0; |
| 43 | } | 61 | } |
| @@ -149,6 +167,11 @@ static void ftrace_mod_code(void) | |||
| 149 | 167 | ||
| 150 | void ftrace_nmi_enter(void) | 168 | void ftrace_nmi_enter(void) |
| 151 | { | 169 | { |
| 170 | __get_cpu_var(save_modifying_code) = modifying_code; | ||
| 171 | |||
| 172 | if (!__get_cpu_var(save_modifying_code)) | ||
| 173 | return; | ||
| 174 | |||
| 152 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | 175 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { |
| 153 | smp_rmb(); | 176 | smp_rmb(); |
| 154 | ftrace_mod_code(); | 177 | ftrace_mod_code(); |
| @@ -160,6 +183,9 @@ void ftrace_nmi_enter(void) | |||
| 160 | 183 | ||
| 161 | void ftrace_nmi_exit(void) | 184 | void ftrace_nmi_exit(void) |
| 162 | { | 185 | { |
| 186 | if (!__get_cpu_var(save_modifying_code)) | ||
| 187 | return; | ||
| 188 | |||
| 163 | /* Finish all executions before clearing nmi_running */ | 189 | /* Finish all executions before clearing nmi_running */ |
| 164 | smp_mb(); | 190 | smp_mb(); |
| 165 | atomic_dec(&nmi_running); | 191 | atomic_dec(&nmi_running); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7b219696ad24..91bd7d78a07d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -132,7 +132,8 @@ struct perf_event_attr; | |||
| 132 | 132 | ||
| 133 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ | 133 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ |
| 134 | static const struct syscall_metadata __syscall_meta_##sname; \ | 134 | static const struct syscall_metadata __syscall_meta_##sname; \ |
| 135 | static struct ftrace_event_call event_enter_##sname; \ | 135 | static struct ftrace_event_call \ |
| 136 | __attribute__((__aligned__(4))) event_enter_##sname; \ | ||
| 136 | static struct trace_event enter_syscall_print_##sname = { \ | 137 | static struct trace_event enter_syscall_print_##sname = { \ |
| 137 | .trace = print_syscall_enter, \ | 138 | .trace = print_syscall_enter, \ |
| 138 | }; \ | 139 | }; \ |
| @@ -153,7 +154,8 @@ struct perf_event_attr; | |||
| 153 | 154 | ||
| 154 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 155 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
| 155 | static const struct syscall_metadata __syscall_meta_##sname; \ | 156 | static const struct syscall_metadata __syscall_meta_##sname; \ |
| 156 | static struct ftrace_event_call event_exit_##sname; \ | 157 | static struct ftrace_event_call \ |
| 158 | __attribute__((__aligned__(4))) event_exit_##sname; \ | ||
| 157 | static struct trace_event exit_syscall_print_##sname = { \ | 159 | static struct trace_event exit_syscall_print_##sname = { \ |
| 158 | .trace = print_syscall_exit, \ | 160 | .trace = print_syscall_exit, \ |
| 159 | }; \ | 161 | }; \ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 09fd9afc0859..f23a0ca6910a 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -65,7 +65,8 @@ | |||
| 65 | }; | 65 | }; |
| 66 | #undef DEFINE_EVENT | 66 | #undef DEFINE_EVENT |
| 67 | #define DEFINE_EVENT(template, name, proto, args) \ | 67 | #define DEFINE_EVENT(template, name, proto, args) \ |
| 68 | static struct ftrace_event_call event_##name | 68 | static struct ftrace_event_call \ |
| 69 | __attribute__((__aligned__(4))) event_##name | ||
| 69 | 70 | ||
| 70 | #undef DEFINE_EVENT_PRINT | 71 | #undef DEFINE_EVENT_PRINT |
| 71 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | 72 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 60e2ce0181ee..e6b99b8c3d35 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -328,15 +328,6 @@ config BRANCH_TRACER | |||
| 328 | 328 | ||
| 329 | Say N if unsure. | 329 | Say N if unsure. |
| 330 | 330 | ||
| 331 | config POWER_TRACER | ||
| 332 | bool "Trace power consumption behavior" | ||
| 333 | depends on X86 | ||
| 334 | select GENERIC_TRACER | ||
| 335 | help | ||
| 336 | This tracer helps developers to analyze and optimize the kernel's | ||
| 337 | power management decisions, specifically the C-state and P-state | ||
| 338 | behavior. | ||
| 339 | |||
| 340 | config KSYM_TRACER | 331 | config KSYM_TRACER |
| 341 | bool "Trace read and write access on kernel memory locations" | 332 | bool "Trace read and write access on kernel memory locations" |
| 342 | depends on HAVE_HW_BREAKPOINT | 333 | depends on HAVE_HW_BREAKPOINT |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b477fce41edf..fd05bcaf91b0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -792,7 +792,8 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
| 792 | 792 | ||
| 793 | #undef FTRACE_ENTRY | 793 | #undef FTRACE_ENTRY |
| 794 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ | 794 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
| 795 | extern struct ftrace_event_call event_##call; | 795 | extern struct ftrace_event_call \ |
| 796 | __attribute__((__aligned__(4))) event_##call; | ||
| 796 | #undef FTRACE_ENTRY_DUP | 797 | #undef FTRACE_ENTRY_DUP |
| 797 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ | 798 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
| 798 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) | 799 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c2a3077b7353..3f972ad98d04 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -60,10 +60,8 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, | |||
| 60 | return 0; | 60 | return 0; |
| 61 | 61 | ||
| 62 | err: | 62 | err: |
| 63 | if (field) { | 63 | if (field) |
| 64 | kfree(field->name); | 64 | kfree(field->name); |
| 65 | kfree(field->type); | ||
| 66 | } | ||
| 67 | kfree(field); | 65 | kfree(field); |
| 68 | 66 | ||
| 69 | return -ENOMEM; | 67 | return -ENOMEM; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 616b135c9eb9..112561df2a0a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -855,7 +855,6 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 855 | int i; | 855 | int i; |
| 856 | 856 | ||
| 857 | if (data) { | 857 | if (data) { |
| 858 | int cpu = iter->cpu; | ||
| 859 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | 858 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
| 860 | 859 | ||
| 861 | /* | 860 | /* |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 53f748b64ef3..465b36bef4ca 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -651,12 +651,12 @@ static int create_trace_probe(int argc, char **argv) | |||
| 651 | event = strchr(group, '/') + 1; | 651 | event = strchr(group, '/') + 1; |
| 652 | event[-1] = '\0'; | 652 | event[-1] = '\0'; |
| 653 | if (strlen(group) == 0) { | 653 | if (strlen(group) == 0) { |
| 654 | pr_info("Group name is not specifiled\n"); | 654 | pr_info("Group name is not specified\n"); |
| 655 | return -EINVAL; | 655 | return -EINVAL; |
| 656 | } | 656 | } |
| 657 | } | 657 | } |
| 658 | if (strlen(event) == 0) { | 658 | if (strlen(event) == 0) { |
| 659 | pr_info("Event name is not specifiled\n"); | 659 | pr_info("Event name is not specified\n"); |
| 660 | return -EINVAL; | 660 | return -EINVAL; |
| 661 | } | 661 | } |
| 662 | } | 662 | } |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 49cea70fbf6d..8cdda95da81a 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -603,7 +603,7 @@ int prof_sysexit_enable(struct ftrace_event_call *call) | |||
| 603 | ret = register_trace_sys_exit(prof_syscall_exit); | 603 | ret = register_trace_sys_exit(prof_syscall_exit); |
| 604 | if (ret) { | 604 | if (ret) { |
| 605 | pr_info("event trace: Could not activate" | 605 | pr_info("event trace: Could not activate" |
| 606 | "syscall entry trace point"); | 606 | "syscall exit trace point"); |
| 607 | } else { | 607 | } else { |
| 608 | set_bit(num, enabled_prof_exit_syscalls); | 608 | set_bit(num, enabled_prof_exit_syscalls); |
| 609 | sys_prof_refcount_exit++; | 609 | sys_prof_refcount_exit++; |
