diff options
| -rw-r--r-- | include/linux/ftrace_event.h | 16 | ||||
| -rw-r--r-- | include/linux/syscalls.h | 24 | ||||
| -rw-r--r-- | include/trace/ftrace.h | 38 | ||||
| -rw-r--r-- | include/trace/syscall.h | 8 | ||||
| -rw-r--r-- | kernel/perf_event.c | 4 | ||||
| -rw-r--r-- | kernel/trace/Makefile | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c) | 44 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 28 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 72 |
10 files changed, 119 insertions, 119 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ac424f18ce63..c0f4b364c711 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -131,12 +131,12 @@ struct ftrace_event_call { | |||
| 131 | void *mod; | 131 | void *mod; |
| 132 | void *data; | 132 | void *data; |
| 133 | 133 | ||
| 134 | int profile_count; | 134 | int perf_refcount; |
| 135 | int (*profile_enable)(struct ftrace_event_call *); | 135 | int (*perf_event_enable)(struct ftrace_event_call *); |
| 136 | void (*profile_disable)(struct ftrace_event_call *); | 136 | void (*perf_event_disable)(struct ftrace_event_call *); |
| 137 | }; | 137 | }; |
| 138 | 138 | ||
| 139 | #define FTRACE_MAX_PROFILE_SIZE 2048 | 139 | #define PERF_MAX_TRACE_SIZE 2048 |
| 140 | 140 | ||
| 141 | #define MAX_FILTER_PRED 32 | 141 | #define MAX_FILTER_PRED 32 |
| 142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
| @@ -190,17 +190,17 @@ struct perf_event; | |||
| 190 | 190 | ||
| 191 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | 191 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
| 192 | 192 | ||
| 193 | extern int ftrace_profile_enable(int event_id); | 193 | extern int perf_trace_enable(int event_id); |
| 194 | extern void ftrace_profile_disable(int event_id); | 194 | extern void perf_trace_disable(int event_id); |
| 195 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | 195 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
| 196 | char *filter_str); | 196 | char *filter_str); |
| 197 | extern void ftrace_profile_free_filter(struct perf_event *event); | 197 | extern void ftrace_profile_free_filter(struct perf_event *event); |
| 198 | extern void * | 198 | extern void * |
| 199 | ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, | 199 | perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, |
| 200 | unsigned long *irq_flags); | 200 | unsigned long *irq_flags); |
| 201 | 201 | ||
| 202 | static inline void | 202 | static inline void |
| 203 | ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, | 203 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
| 204 | u64 count, unsigned long irq_flags, struct pt_regs *regs) | 204 | u64 count, unsigned long irq_flags, struct pt_regs *regs) |
| 205 | { | 205 | { |
| 206 | struct trace_entry *entry = raw_data; | 206 | struct trace_entry *entry = raw_data; |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8126f239edf0..51435bcc3460 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -101,18 +101,18 @@ struct perf_event_attr; | |||
| 101 | 101 | ||
| 102 | #ifdef CONFIG_PERF_EVENTS | 102 | #ifdef CONFIG_PERF_EVENTS |
| 103 | 103 | ||
| 104 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 104 | #define TRACE_SYS_ENTER_PERF_INIT(sname) \ |
| 105 | .profile_enable = prof_sysenter_enable, \ | 105 | .perf_event_enable = perf_sysenter_enable, \ |
| 106 | .profile_disable = prof_sysenter_disable, | 106 | .perf_event_disable = perf_sysenter_disable, |
| 107 | 107 | ||
| 108 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 108 | #define TRACE_SYS_EXIT_PERF_INIT(sname) \ |
| 109 | .profile_enable = prof_sysexit_enable, \ | 109 | .perf_event_enable = perf_sysexit_enable, \ |
| 110 | .profile_disable = prof_sysexit_disable, | 110 | .perf_event_disable = perf_sysexit_disable, |
| 111 | #else | 111 | #else |
| 112 | #define TRACE_SYS_ENTER_PROFILE(sname) | 112 | #define TRACE_SYS_ENTER_PERF(sname) |
| 113 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) | 113 | #define TRACE_SYS_ENTER_PERF_INIT(sname) |
| 114 | #define TRACE_SYS_EXIT_PROFILE(sname) | 114 | #define TRACE_SYS_EXIT_PERF(sname) |
| 115 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) | 115 | #define TRACE_SYS_EXIT_PERF_INIT(sname) |
| 116 | #endif /* CONFIG_PERF_EVENTS */ | 116 | #endif /* CONFIG_PERF_EVENTS */ |
| 117 | 117 | ||
| 118 | #ifdef CONFIG_FTRACE_SYSCALLS | 118 | #ifdef CONFIG_FTRACE_SYSCALLS |
| @@ -149,7 +149,7 @@ struct perf_event_attr; | |||
| 149 | .regfunc = reg_event_syscall_enter, \ | 149 | .regfunc = reg_event_syscall_enter, \ |
| 150 | .unregfunc = unreg_event_syscall_enter, \ | 150 | .unregfunc = unreg_event_syscall_enter, \ |
| 151 | .data = (void *)&__syscall_meta_##sname,\ | 151 | .data = (void *)&__syscall_meta_##sname,\ |
| 152 | TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 152 | TRACE_SYS_ENTER_PERF_INIT(sname) \ |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 155 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
| @@ -171,7 +171,7 @@ struct perf_event_attr; | |||
| 171 | .regfunc = reg_event_syscall_exit, \ | 171 | .regfunc = reg_event_syscall_exit, \ |
| 172 | .unregfunc = unreg_event_syscall_exit, \ | 172 | .unregfunc = unreg_event_syscall_exit, \ |
| 173 | .data = (void *)&__syscall_meta_##sname,\ | 173 | .data = (void *)&__syscall_meta_##sname,\ |
| 174 | TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 174 | TRACE_SYS_EXIT_PERF_INIT(sname) \ |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | #define SYSCALL_METADATA(sname, nb) \ | 177 | #define SYSCALL_METADATA(sname, nb) \ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f31bb8b9777c..25ab56f75d65 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
| 401 | #undef DEFINE_EVENT | 401 | #undef DEFINE_EVENT |
| 402 | #define DEFINE_EVENT(template, name, proto, args) \ | 402 | #define DEFINE_EVENT(template, name, proto, args) \ |
| 403 | \ | 403 | \ |
| 404 | static void ftrace_profile_##name(proto); \ | 404 | static void perf_trace_##name(proto); \ |
| 405 | \ | 405 | \ |
| 406 | static notrace int \ | 406 | static notrace int \ |
| 407 | ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ | 407 | perf_trace_enable_##name(struct ftrace_event_call *unused) \ |
| 408 | { \ | 408 | { \ |
| 409 | return register_trace_##name(ftrace_profile_##name); \ | 409 | return register_trace_##name(perf_trace_##name); \ |
| 410 | } \ | 410 | } \ |
| 411 | \ | 411 | \ |
| 412 | static notrace void \ | 412 | static notrace void \ |
| 413 | ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ | 413 | perf_trace_disable_##name(struct ftrace_event_call *unused) \ |
| 414 | { \ | 414 | { \ |
| 415 | unregister_trace_##name(ftrace_profile_##name); \ | 415 | unregister_trace_##name(perf_trace_##name); \ |
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | #undef DEFINE_EVENT_PRINT | 418 | #undef DEFINE_EVENT_PRINT |
| @@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ | |||
| 507 | 507 | ||
| 508 | #ifdef CONFIG_PERF_EVENTS | 508 | #ifdef CONFIG_PERF_EVENTS |
| 509 | 509 | ||
| 510 | #define _TRACE_PROFILE_INIT(call) \ | 510 | #define _TRACE_PERF_INIT(call) \ |
| 511 | .profile_enable = ftrace_profile_enable_##call, \ | 511 | .perf_event_enable = perf_trace_enable_##call, \ |
| 512 | .profile_disable = ftrace_profile_disable_##call, | 512 | .perf_event_disable = perf_trace_disable_##call, |
| 513 | 513 | ||
| 514 | #else | 514 | #else |
| 515 | #define _TRACE_PROFILE_INIT(call) | 515 | #define _TRACE_PERF_INIT(call) |
| 516 | #endif /* CONFIG_PERF_EVENTS */ | 516 | #endif /* CONFIG_PERF_EVENTS */ |
| 517 | 517 | ||
| 518 | #undef __entry | 518 | #undef __entry |
| @@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
| 638 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 638 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
| 639 | .print_fmt = print_fmt_##template, \ | 639 | .print_fmt = print_fmt_##template, \ |
| 640 | .define_fields = ftrace_define_fields_##template, \ | 640 | .define_fields = ftrace_define_fields_##template, \ |
| 641 | _TRACE_PROFILE_INIT(call) \ | 641 | _TRACE_PERF_INIT(call) \ |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | #undef DEFINE_EVENT_PRINT | 644 | #undef DEFINE_EVENT_PRINT |
| @@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
| 657 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 657 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
| 658 | .print_fmt = print_fmt_##call, \ | 658 | .print_fmt = print_fmt_##call, \ |
| 659 | .define_fields = ftrace_define_fields_##template, \ | 659 | .define_fields = ftrace_define_fields_##template, \ |
| 660 | _TRACE_PROFILE_INIT(call) \ | 660 | _TRACE_PERF_INIT(call) \ |
| 661 | } | 661 | } |
| 662 | 662 | ||
| 663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
| 664 | 664 | ||
| 665 | /* | 665 | /* |
| 666 | * Define the insertion callback to profile events | 666 | * Define the insertion callback to perf events |
| 667 | * | 667 | * |
| 668 | * The job is very similar to ftrace_raw_event_<call> except that we don't | 668 | * The job is very similar to ftrace_raw_event_<call> except that we don't |
| 669 | * insert in the ring buffer but in a perf counter. | 669 | * insert in the ring buffer but in a perf counter. |
| 670 | * | 670 | * |
| 671 | * static void ftrace_profile_<call>(proto) | 671 | * static void ftrace_perf_<call>(proto) |
| 672 | * { | 672 | * { |
| 673 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | 673 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; |
| 674 | * struct ftrace_event_call *event_call = &event_<call>; | 674 | * struct ftrace_event_call *event_call = &event_<call>; |
| @@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
| 757 | #undef DECLARE_EVENT_CLASS | 757 | #undef DECLARE_EVENT_CLASS |
| 758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
| 759 | static notrace void \ | 759 | static notrace void \ |
| 760 | ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | 760 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ |
| 761 | proto) \ | 761 | proto) \ |
| 762 | { \ | 762 | { \ |
| 763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
| @@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | |||
| 774 | sizeof(u64)); \ | 774 | sizeof(u64)); \ |
| 775 | __entry_size -= sizeof(u32); \ | 775 | __entry_size -= sizeof(u32); \ |
| 776 | \ | 776 | \ |
| 777 | if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ | 777 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ |
| 778 | "profile buffer not large enough")) \ | 778 | "profile buffer not large enough")) \ |
| 779 | return; \ | 779 | return; \ |
| 780 | entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ | 780 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ |
| 781 | __entry_size, event_call->id, &rctx, &irq_flags); \ | 781 | __entry_size, event_call->id, &rctx, &irq_flags); \ |
| 782 | if (!entry) \ | 782 | if (!entry) \ |
| 783 | return; \ | 783 | return; \ |
| @@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | |||
| 788 | __regs = &__get_cpu_var(perf_trace_regs); \ | 788 | __regs = &__get_cpu_var(perf_trace_regs); \ |
| 789 | perf_fetch_caller_regs(__regs, 2); \ | 789 | perf_fetch_caller_regs(__regs, 2); \ |
| 790 | \ | 790 | \ |
| 791 | ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ | 791 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ |
| 792 | __count, irq_flags, __regs); \ | 792 | __count, irq_flags, __regs); \ |
| 793 | } | 793 | } |
| 794 | 794 | ||
| 795 | #undef DEFINE_EVENT | 795 | #undef DEFINE_EVENT |
| 796 | #define DEFINE_EVENT(template, call, proto, args) \ | 796 | #define DEFINE_EVENT(template, call, proto, args) \ |
| 797 | static notrace void ftrace_profile_##call(proto) \ | 797 | static notrace void perf_trace_##call(proto) \ |
| 798 | { \ | 798 | { \ |
| 799 | struct ftrace_event_call *event_call = &event_##call; \ | 799 | struct ftrace_event_call *event_call = &event_##call; \ |
| 800 | \ | 800 | \ |
| 801 | ftrace_profile_templ_##template(event_call, args); \ | 801 | perf_trace_templ_##template(event_call, args); \ |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | #undef DEFINE_EVENT_PRINT | 804 | #undef DEFINE_EVENT_PRINT |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100752f0..e5e5f48dbfb3 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
| @@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | |||
| 47 | #endif | 47 | #endif |
| 48 | 48 | ||
| 49 | #ifdef CONFIG_PERF_EVENTS | 49 | #ifdef CONFIG_PERF_EVENTS |
| 50 | int prof_sysenter_enable(struct ftrace_event_call *call); | 50 | int perf_sysenter_enable(struct ftrace_event_call *call); |
| 51 | void prof_sysenter_disable(struct ftrace_event_call *call); | 51 | void perf_sysenter_disable(struct ftrace_event_call *call); |
| 52 | int prof_sysexit_enable(struct ftrace_event_call *call); | 52 | int perf_sysexit_enable(struct ftrace_event_call *call); |
| 53 | void prof_sysexit_disable(struct ftrace_event_call *call); | 53 | void perf_sysexit_disable(struct ftrace_event_call *call); |
| 54 | #endif | 54 | #endif |
| 55 | 55 | ||
| 56 | #endif /* _TRACE_SYSCALL_H */ | 56 | #endif /* _TRACE_SYSCALL_H */ |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b4b6e55891..c502b18594cc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event, | |||
| 4347 | 4347 | ||
| 4348 | static void tp_perf_event_destroy(struct perf_event *event) | 4348 | static void tp_perf_event_destroy(struct perf_event *event) |
| 4349 | { | 4349 | { |
| 4350 | ftrace_profile_disable(event->attr.config); | 4350 | perf_trace_disable(event->attr.config); |
| 4351 | } | 4351 | } |
| 4352 | 4352 | ||
| 4353 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4353 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
| @@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) | |||
| 4361 | !capable(CAP_SYS_ADMIN)) | 4361 | !capable(CAP_SYS_ADMIN)) |
| 4362 | return ERR_PTR(-EPERM); | 4362 | return ERR_PTR(-EPERM); |
| 4363 | 4363 | ||
| 4364 | if (ftrace_profile_enable(event->attr.config)) | 4364 | if (perf_trace_enable(event->attr.config)) |
| 4365 | return NULL; | 4365 | return NULL; |
| 4366 | 4366 | ||
| 4367 | event->destroy = tp_perf_event_destroy; | 4367 | event->destroy = tp_perf_event_destroy; |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o | |||
| 52 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o | 52 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o |
| 53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
| 54 | ifeq ($(CONFIG_PERF_EVENTS),y) | 54 | ifeq ($(CONFIG_PERF_EVENTS),y) |
| 55 | obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o |
| 56 | endif | 56 | endif |
| 57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
| 58 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 58 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c index e66d21e15a0f..f315b12a41d8 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * trace event based perf counter profiling | 2 | * trace event based perf event profiling/tracing |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> |
| 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> | 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> |
| @@ -14,20 +14,20 @@ DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); | |||
| 14 | static char *perf_trace_buf; | 14 | static char *perf_trace_buf; |
| 15 | static char *perf_trace_buf_nmi; | 15 | static char *perf_trace_buf_nmi; |
| 16 | 16 | ||
| 17 | typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; | 17 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; |
| 18 | 18 | ||
| 19 | /* Count the events in use (per event id, not per instance) */ | 19 | /* Count the events in use (per event id, not per instance) */ |
| 20 | static int total_profile_count; | 20 | static int total_ref_count; |
| 21 | 21 | ||
| 22 | static int ftrace_profile_enable_event(struct ftrace_event_call *event) | 22 | static int perf_trace_event_enable(struct ftrace_event_call *event) |
| 23 | { | 23 | { |
| 24 | char *buf; | 24 | char *buf; |
| 25 | int ret = -ENOMEM; | 25 | int ret = -ENOMEM; |
| 26 | 26 | ||
| 27 | if (event->profile_count++ > 0) | 27 | if (event->perf_refcount++ > 0) |
| 28 | return 0; | 28 | return 0; |
| 29 | 29 | ||
| 30 | if (!total_profile_count) { | 30 | if (!total_ref_count) { |
| 31 | buf = (char *)alloc_percpu(perf_trace_t); | 31 | buf = (char *)alloc_percpu(perf_trace_t); |
| 32 | if (!buf) | 32 | if (!buf) |
| 33 | goto fail_buf; | 33 | goto fail_buf; |
| @@ -41,35 +41,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
| 41 | rcu_assign_pointer(perf_trace_buf_nmi, buf); | 41 | rcu_assign_pointer(perf_trace_buf_nmi, buf); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | ret = event->profile_enable(event); | 44 | ret = event->perf_event_enable(event); |
| 45 | if (!ret) { | 45 | if (!ret) { |
| 46 | total_profile_count++; | 46 | total_ref_count++; |
| 47 | return 0; | 47 | return 0; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | fail_buf_nmi: | 50 | fail_buf_nmi: |
| 51 | if (!total_profile_count) { | 51 | if (!total_ref_count) { |
| 52 | free_percpu(perf_trace_buf_nmi); | 52 | free_percpu(perf_trace_buf_nmi); |
| 53 | free_percpu(perf_trace_buf); | 53 | free_percpu(perf_trace_buf); |
| 54 | perf_trace_buf_nmi = NULL; | 54 | perf_trace_buf_nmi = NULL; |
| 55 | perf_trace_buf = NULL; | 55 | perf_trace_buf = NULL; |
| 56 | } | 56 | } |
| 57 | fail_buf: | 57 | fail_buf: |
| 58 | event->profile_count--; | 58 | event->perf_refcount--; |
| 59 | 59 | ||
| 60 | return ret; | 60 | return ret; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | int ftrace_profile_enable(int event_id) | 63 | int perf_trace_enable(int event_id) |
| 64 | { | 64 | { |
| 65 | struct ftrace_event_call *event; | 65 | struct ftrace_event_call *event; |
| 66 | int ret = -EINVAL; | 66 | int ret = -EINVAL; |
| 67 | 67 | ||
| 68 | mutex_lock(&event_mutex); | 68 | mutex_lock(&event_mutex); |
| 69 | list_for_each_entry(event, &ftrace_events, list) { | 69 | list_for_each_entry(event, &ftrace_events, list) { |
| 70 | if (event->id == event_id && event->profile_enable && | 70 | if (event->id == event_id && event->perf_event_enable && |
| 71 | try_module_get(event->mod)) { | 71 | try_module_get(event->mod)) { |
| 72 | ret = ftrace_profile_enable_event(event); | 72 | ret = perf_trace_event_enable(event); |
| 73 | break; | 73 | break; |
| 74 | } | 74 | } |
| 75 | } | 75 | } |
| @@ -78,16 +78,16 @@ int ftrace_profile_enable(int event_id) | |||
| 78 | return ret; | 78 | return ret; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static void ftrace_profile_disable_event(struct ftrace_event_call *event) | 81 | static void perf_trace_event_disable(struct ftrace_event_call *event) |
| 82 | { | 82 | { |
| 83 | char *buf, *nmi_buf; | 83 | char *buf, *nmi_buf; |
| 84 | 84 | ||
| 85 | if (--event->profile_count > 0) | 85 | if (--event->perf_refcount > 0) |
| 86 | return; | 86 | return; |
| 87 | 87 | ||
| 88 | event->profile_disable(event); | 88 | event->perf_event_disable(event); |
| 89 | 89 | ||
| 90 | if (!--total_profile_count) { | 90 | if (!--total_ref_count) { |
| 91 | buf = perf_trace_buf; | 91 | buf = perf_trace_buf; |
| 92 | rcu_assign_pointer(perf_trace_buf, NULL); | 92 | rcu_assign_pointer(perf_trace_buf, NULL); |
| 93 | 93 | ||
| @@ -105,14 +105,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
| 105 | } | 105 | } |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | void ftrace_profile_disable(int event_id) | 108 | void perf_trace_disable(int event_id) |
| 109 | { | 109 | { |
| 110 | struct ftrace_event_call *event; | 110 | struct ftrace_event_call *event; |
| 111 | 111 | ||
| 112 | mutex_lock(&event_mutex); | 112 | mutex_lock(&event_mutex); |
| 113 | list_for_each_entry(event, &ftrace_events, list) { | 113 | list_for_each_entry(event, &ftrace_events, list) { |
| 114 | if (event->id == event_id) { | 114 | if (event->id == event_id) { |
| 115 | ftrace_profile_disable_event(event); | 115 | perf_trace_event_disable(event); |
| 116 | module_put(event->mod); | 116 | module_put(event->mod); |
| 117 | break; | 117 | break; |
| 118 | } | 118 | } |
| @@ -120,8 +120,8 @@ void ftrace_profile_disable(int event_id) | |||
| 120 | mutex_unlock(&event_mutex); | 120 | mutex_unlock(&event_mutex); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, | 123 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, |
| 124 | int *rctxp, unsigned long *irq_flags) | 124 | int *rctxp, unsigned long *irq_flags) |
| 125 | { | 125 | { |
| 126 | struct trace_entry *entry; | 126 | struct trace_entry *entry; |
| 127 | char *trace_buf, *raw_data; | 127 | char *trace_buf, *raw_data; |
| @@ -162,4 +162,4 @@ err_recursion: | |||
| 162 | local_irq_restore(*irq_flags); | 162 | local_irq_restore(*irq_flags); |
| 163 | return NULL; | 163 | return NULL; |
| 164 | } | 164 | } |
| 165 | EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); | 165 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
| 938 | trace_create_file("enable", 0644, call->dir, call, | 938 | trace_create_file("enable", 0644, call->dir, call, |
| 939 | enable); | 939 | enable); |
| 940 | 940 | ||
| 941 | if (call->id && call->profile_enable) | 941 | if (call->id && call->perf_event_enable) |
| 942 | trace_create_file("id", 0444, call->dir, call, | 942 | trace_create_file("id", 0444, call->dir, call, |
| 943 | id); | 943 | id); |
| 944 | 944 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f7a20a8bfb31..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) | |||
| 1214 | #ifdef CONFIG_PERF_EVENTS | 1214 | #ifdef CONFIG_PERF_EVENTS |
| 1215 | 1215 | ||
| 1216 | /* Kprobe profile handler */ | 1216 | /* Kprobe profile handler */ |
| 1217 | static __kprobes void kprobe_profile_func(struct kprobe *kp, | 1217 | static __kprobes void kprobe_perf_func(struct kprobe *kp, |
| 1218 | struct pt_regs *regs) | 1218 | struct pt_regs *regs) |
| 1219 | { | 1219 | { |
| 1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
| @@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, | |||
| 1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); |
| 1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1229 | size -= sizeof(u32); | 1229 | size -= sizeof(u32); |
| 1230 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1230 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| 1231 | "profile buffer not large enough")) | 1231 | "profile buffer not large enough")) |
| 1232 | return; | 1232 | return; |
| 1233 | 1233 | ||
| 1234 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1234 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); |
| 1235 | if (!entry) | 1235 | if (!entry) |
| 1236 | return; | 1236 | return; |
| 1237 | 1237 | ||
| @@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, | |||
| 1240 | for (i = 0; i < tp->nr_args; i++) | 1240 | for (i = 0; i < tp->nr_args; i++) |
| 1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
| 1242 | 1242 | ||
| 1243 | ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); | 1243 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); |
| 1244 | } | 1244 | } |
| 1245 | 1245 | ||
| 1246 | /* Kretprobe profile handler */ | 1246 | /* Kretprobe profile handler */ |
| 1247 | static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | 1247 | static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, |
| 1248 | struct pt_regs *regs) | 1248 | struct pt_regs *regs) |
| 1249 | { | 1249 | { |
| 1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
| @@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | |||
| 1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); |
| 1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
| 1259 | size -= sizeof(u32); | 1259 | size -= sizeof(u32); |
| 1260 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1260 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| 1261 | "profile buffer not large enough")) | 1261 | "profile buffer not large enough")) |
| 1262 | return; | 1262 | return; |
| 1263 | 1263 | ||
| 1264 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1264 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); |
| 1265 | if (!entry) | 1265 | if (!entry) |
| 1266 | return; | 1266 | return; |
| 1267 | 1267 | ||
| @@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | |||
| 1271 | for (i = 0; i < tp->nr_args; i++) | 1271 | for (i = 0; i < tp->nr_args; i++) |
| 1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
| 1273 | 1273 | ||
| 1274 | ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, | 1274 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, |
| 1275 | irq_flags, regs); | 1275 | irq_flags, regs); |
| 1276 | } | 1276 | } |
| 1277 | 1277 | ||
| 1278 | static int probe_profile_enable(struct ftrace_event_call *call) | 1278 | static int probe_perf_enable(struct ftrace_event_call *call) |
| 1279 | { | 1279 | { |
| 1280 | struct trace_probe *tp = (struct trace_probe *)call->data; | 1280 | struct trace_probe *tp = (struct trace_probe *)call->data; |
| 1281 | 1281 | ||
| @@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) | |||
| 1287 | return enable_kprobe(&tp->rp.kp); | 1287 | return enable_kprobe(&tp->rp.kp); |
| 1288 | } | 1288 | } |
| 1289 | 1289 | ||
| 1290 | static void probe_profile_disable(struct ftrace_event_call *call) | 1290 | static void probe_perf_disable(struct ftrace_event_call *call) |
| 1291 | { | 1291 | { |
| 1292 | struct trace_probe *tp = (struct trace_probe *)call->data; | 1292 | struct trace_probe *tp = (struct trace_probe *)call->data; |
| 1293 | 1293 | ||
| @@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | |||
| 1312 | kprobe_trace_func(kp, regs); | 1312 | kprobe_trace_func(kp, regs); |
| 1313 | #ifdef CONFIG_PERF_EVENTS | 1313 | #ifdef CONFIG_PERF_EVENTS |
| 1314 | if (tp->flags & TP_FLAG_PROFILE) | 1314 | if (tp->flags & TP_FLAG_PROFILE) |
| 1315 | kprobe_profile_func(kp, regs); | 1315 | kprobe_perf_func(kp, regs); |
| 1316 | #endif | 1316 | #endif |
| 1317 | return 0; /* We don't tweek kernel, so just return 0 */ | 1317 | return 0; /* We don't tweek kernel, so just return 0 */ |
| 1318 | } | 1318 | } |
| @@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
| 1326 | kretprobe_trace_func(ri, regs); | 1326 | kretprobe_trace_func(ri, regs); |
| 1327 | #ifdef CONFIG_PERF_EVENTS | 1327 | #ifdef CONFIG_PERF_EVENTS |
| 1328 | if (tp->flags & TP_FLAG_PROFILE) | 1328 | if (tp->flags & TP_FLAG_PROFILE) |
| 1329 | kretprobe_profile_func(ri, regs); | 1329 | kretprobe_perf_func(ri, regs); |
| 1330 | #endif | 1330 | #endif |
| 1331 | return 0; /* We don't tweek kernel, so just return 0 */ | 1331 | return 0; /* We don't tweek kernel, so just return 0 */ |
| 1332 | } | 1332 | } |
| @@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1359 | call->unregfunc = probe_event_disable; | 1359 | call->unregfunc = probe_event_disable; |
| 1360 | 1360 | ||
| 1361 | #ifdef CONFIG_PERF_EVENTS | 1361 | #ifdef CONFIG_PERF_EVENTS |
| 1362 | call->profile_enable = probe_profile_enable; | 1362 | call->perf_event_enable = probe_perf_enable; |
| 1363 | call->profile_disable = probe_profile_disable; | 1363 | call->perf_event_disable = probe_perf_disable; |
| 1364 | #endif | 1364 | #endif |
| 1365 | call->data = tp; | 1365 | call->data = tp; |
| 1366 | ret = trace_add_event_call(call); | 1366 | ret = trace_add_event_call(call); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7e6e84fb7b6c..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); | |||
| 428 | 428 | ||
| 429 | #ifdef CONFIG_PERF_EVENTS | 429 | #ifdef CONFIG_PERF_EVENTS |
| 430 | 430 | ||
| 431 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); | 431 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); |
| 432 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); | 432 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); |
| 433 | static int sys_prof_refcount_enter; | 433 | static int sys_perf_refcount_enter; |
| 434 | static int sys_prof_refcount_exit; | 434 | static int sys_perf_refcount_exit; |
| 435 | 435 | ||
| 436 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 436 | static void perf_syscall_enter(struct pt_regs *regs, long id) |
| 437 | { | 437 | { |
| 438 | struct syscall_metadata *sys_data; | 438 | struct syscall_metadata *sys_data; |
| 439 | struct syscall_trace_enter *rec; | 439 | struct syscall_trace_enter *rec; |
| @@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 443 | int size; | 443 | int size; |
| 444 | 444 | ||
| 445 | syscall_nr = syscall_get_nr(current, regs); | 445 | syscall_nr = syscall_get_nr(current, regs); |
| 446 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 446 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
| 447 | return; | 447 | return; |
| 448 | 448 | ||
| 449 | sys_data = syscall_nr_to_meta(syscall_nr); | 449 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
| 456 | size -= sizeof(u32); | 456 | size -= sizeof(u32); |
| 457 | 457 | ||
| 458 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 458 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| 459 | "profile buffer not large enough")) | 459 | "perf buffer not large enough")) |
| 460 | return; | 460 | return; |
| 461 | 461 | ||
| 462 | rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, | 462 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
| 463 | sys_data->enter_event->id, &rctx, &flags); | 463 | sys_data->enter_event->id, &rctx, &flags); |
| 464 | if (!rec) | 464 | if (!rec) |
| 465 | return; | 465 | return; |
| @@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
| 467 | rec->nr = syscall_nr; | 467 | rec->nr = syscall_nr; |
| 468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
| 469 | (unsigned long *)&rec->args); | 469 | (unsigned long *)&rec->args); |
| 470 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); | 470 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
| 471 | } | 471 | } |
| 472 | 472 | ||
| 473 | int prof_sysenter_enable(struct ftrace_event_call *call) | 473 | int perf_sysenter_enable(struct ftrace_event_call *call) |
| 474 | { | 474 | { |
| 475 | int ret = 0; | 475 | int ret = 0; |
| 476 | int num; | 476 | int num; |
| @@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) | |||
| 478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 479 | 479 | ||
| 480 | mutex_lock(&syscall_trace_lock); | 480 | mutex_lock(&syscall_trace_lock); |
| 481 | if (!sys_prof_refcount_enter) | 481 | if (!sys_perf_refcount_enter) |
| 482 | ret = register_trace_sys_enter(prof_syscall_enter); | 482 | ret = register_trace_sys_enter(perf_syscall_enter); |
| 483 | if (ret) { | 483 | if (ret) { |
| 484 | pr_info("event trace: Could not activate" | 484 | pr_info("event trace: Could not activate" |
| 485 | "syscall entry trace point"); | 485 | "syscall entry trace point"); |
| 486 | } else { | 486 | } else { |
| 487 | set_bit(num, enabled_prof_enter_syscalls); | 487 | set_bit(num, enabled_perf_enter_syscalls); |
| 488 | sys_prof_refcount_enter++; | 488 | sys_perf_refcount_enter++; |
| 489 | } | 489 | } |
| 490 | mutex_unlock(&syscall_trace_lock); | 490 | mutex_unlock(&syscall_trace_lock); |
| 491 | return ret; | 491 | return ret; |
| 492 | } | 492 | } |
| 493 | 493 | ||
| 494 | void prof_sysenter_disable(struct ftrace_event_call *call) | 494 | void perf_sysenter_disable(struct ftrace_event_call *call) |
| 495 | { | 495 | { |
| 496 | int num; | 496 | int num; |
| 497 | 497 | ||
| 498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 499 | 499 | ||
| 500 | mutex_lock(&syscall_trace_lock); | 500 | mutex_lock(&syscall_trace_lock); |
| 501 | sys_prof_refcount_enter--; | 501 | sys_perf_refcount_enter--; |
| 502 | clear_bit(num, enabled_prof_enter_syscalls); | 502 | clear_bit(num, enabled_perf_enter_syscalls); |
| 503 | if (!sys_prof_refcount_enter) | 503 | if (!sys_perf_refcount_enter) |
| 504 | unregister_trace_sys_enter(prof_syscall_enter); | 504 | unregister_trace_sys_enter(perf_syscall_enter); |
| 505 | mutex_unlock(&syscall_trace_lock); | 505 | mutex_unlock(&syscall_trace_lock); |
| 506 | } | 506 | } |
| 507 | 507 | ||
| 508 | static void prof_syscall_exit(struct pt_regs *regs, long ret) | 508 | static void perf_syscall_exit(struct pt_regs *regs, long ret) |
| 509 | { | 509 | { |
| 510 | struct syscall_metadata *sys_data; | 510 | struct syscall_metadata *sys_data; |
| 511 | struct syscall_trace_exit *rec; | 511 | struct syscall_trace_exit *rec; |
| @@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 515 | int size; | 515 | int size; |
| 516 | 516 | ||
| 517 | syscall_nr = syscall_get_nr(current, regs); | 517 | syscall_nr = syscall_get_nr(current, regs); |
| 518 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 518 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
| 519 | return; | 519 | return; |
| 520 | 520 | ||
| 521 | sys_data = syscall_nr_to_meta(syscall_nr); | 521 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 530 | * Impossible, but be paranoid with the future | 530 | * Impossible, but be paranoid with the future |
| 531 | * How to put this check outside runtime? | 531 | * How to put this check outside runtime? |
| 532 | */ | 532 | */ |
| 533 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 533 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
| 534 | "exit event has grown above profile buffer size")) | 534 | "exit event has grown above perf buffer size")) |
| 535 | return; | 535 | return; |
| 536 | 536 | ||
| 537 | rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, | 537 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
| 538 | sys_data->exit_event->id, &rctx, &flags); | 538 | sys_data->exit_event->id, &rctx, &flags); |
| 539 | if (!rec) | 539 | if (!rec) |
| 540 | return; | 540 | return; |
| @@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
| 542 | rec->nr = syscall_nr; | 542 | rec->nr = syscall_nr; |
| 543 | rec->ret = syscall_get_return_value(current, regs); | 543 | rec->ret = syscall_get_return_value(current, regs); |
| 544 | 544 | ||
| 545 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); | 545 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | int prof_sysexit_enable(struct ftrace_event_call *call) | 548 | int perf_sysexit_enable(struct ftrace_event_call *call) |
| 549 | { | 549 | { |
| 550 | int ret = 0; | 550 | int ret = 0; |
| 551 | int num; | 551 | int num; |
| @@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) | |||
| 553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 554 | 554 | ||
| 555 | mutex_lock(&syscall_trace_lock); | 555 | mutex_lock(&syscall_trace_lock); |
| 556 | if (!sys_prof_refcount_exit) | 556 | if (!sys_perf_refcount_exit) |
| 557 | ret = register_trace_sys_exit(prof_syscall_exit); | 557 | ret = register_trace_sys_exit(perf_syscall_exit); |
| 558 | if (ret) { | 558 | if (ret) { |
| 559 | pr_info("event trace: Could not activate" | 559 | pr_info("event trace: Could not activate" |
| 560 | "syscall exit trace point"); | 560 | "syscall exit trace point"); |
| 561 | } else { | 561 | } else { |
| 562 | set_bit(num, enabled_prof_exit_syscalls); | 562 | set_bit(num, enabled_perf_exit_syscalls); |
| 563 | sys_prof_refcount_exit++; | 563 | sys_perf_refcount_exit++; |
| 564 | } | 564 | } |
| 565 | mutex_unlock(&syscall_trace_lock); | 565 | mutex_unlock(&syscall_trace_lock); |
| 566 | return ret; | 566 | return ret; |
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | void prof_sysexit_disable(struct ftrace_event_call *call) | 569 | void perf_sysexit_disable(struct ftrace_event_call *call) |
| 570 | { | 570 | { |
| 571 | int num; | 571 | int num; |
| 572 | 572 | ||
| 573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 574 | 574 | ||
| 575 | mutex_lock(&syscall_trace_lock); | 575 | mutex_lock(&syscall_trace_lock); |
| 576 | sys_prof_refcount_exit--; | 576 | sys_perf_refcount_exit--; |
| 577 | clear_bit(num, enabled_prof_exit_syscalls); | 577 | clear_bit(num, enabled_perf_exit_syscalls); |
| 578 | if (!sys_prof_refcount_exit) | 578 | if (!sys_perf_refcount_exit) |
| 579 | unregister_trace_sys_exit(prof_syscall_exit); | 579 | unregister_trace_sys_exit(perf_syscall_exit); |
| 580 | mutex_unlock(&syscall_trace_lock); | 580 | mutex_unlock(&syscall_trace_lock); |
| 581 | } | 581 | } |
| 582 | 582 | ||
