diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 19:52:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-18 19:52:46 -0400 |
commit | f82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (patch) | |
tree | 09fc553c2fb6f527962048d139159dc139e04afc /include | |
parent | c6b9e73f2fee8bb86058f296de808b326473456b (diff) | |
parent | dcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits)
perf: Fix unexported generic perf_arch_fetch_caller_regs
perf record: Don't try to find buildids in a zero sized file
perf: export perf_trace_regs and perf_arch_fetch_caller_regs
perf, x86: Fix hw_perf_enable() event assignment
perf, ppc: Fix compile error due to new cpu notifiers
perf: Make the install relative to DESTDIR if specified
kprobes: Calculate the index correctly when freeing the out-of-line execution slot
perf tools: Fix sparse CPU numbering related bugs
perf_event: Fix oops triggered by cpu offline/online
perf: Drop the obsolete profile naming for trace events
perf: Take a hot regs snapshot for trace events
perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot
perf/x86-64: Use frame pointer to walk on irq and process stacks
lockdep: Move lock events under lockdep recursion protection
perf report: Print the map table just after samples for which no map was found
perf report: Add multiple event support
perf session: Change perf_session post processing functions to take histogram tree
perf session: Add storage for seperating event types in report
perf session: Change add_hist_entry to take the tree root instead of session
perf record: Add ID and to recorded event data when recording multiple events
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/ftrace_event.h | 23 | ||||
-rw-r--r-- | include/linux/perf_event.h | 59 | ||||
-rw-r--r-- | include/linux/syscalls.h | 24 | ||||
-rw-r--r-- | include/trace/ftrace.h | 44 | ||||
-rw-r--r-- | include/trace/syscall.h | 8 |
5 files changed, 111 insertions, 47 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6b7c444ab8f6..c0f4b364c711 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -131,12 +131,12 @@ struct ftrace_event_call { | |||
131 | void *mod; | 131 | void *mod; |
132 | void *data; | 132 | void *data; |
133 | 133 | ||
134 | int profile_count; | 134 | int perf_refcount; |
135 | int (*profile_enable)(struct ftrace_event_call *); | 135 | int (*perf_event_enable)(struct ftrace_event_call *); |
136 | void (*profile_disable)(struct ftrace_event_call *); | 136 | void (*perf_event_disable)(struct ftrace_event_call *); |
137 | }; | 137 | }; |
138 | 138 | ||
139 | #define FTRACE_MAX_PROFILE_SIZE 2048 | 139 | #define PERF_MAX_TRACE_SIZE 2048 |
140 | 140 | ||
141 | #define MAX_FILTER_PRED 32 | 141 | #define MAX_FILTER_PRED 32 |
142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
@@ -187,22 +187,25 @@ do { \ | |||
187 | 187 | ||
188 | #ifdef CONFIG_PERF_EVENTS | 188 | #ifdef CONFIG_PERF_EVENTS |
189 | struct perf_event; | 189 | struct perf_event; |
190 | extern int ftrace_profile_enable(int event_id); | 190 | |
191 | extern void ftrace_profile_disable(int event_id); | 191 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
192 | |||
193 | extern int perf_trace_enable(int event_id); | ||
194 | extern void perf_trace_disable(int event_id); | ||
192 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | 195 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
193 | char *filter_str); | 196 | char *filter_str); |
194 | extern void ftrace_profile_free_filter(struct perf_event *event); | 197 | extern void ftrace_profile_free_filter(struct perf_event *event); |
195 | extern void * | 198 | extern void * |
196 | ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, | 199 | perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, |
197 | unsigned long *irq_flags); | 200 | unsigned long *irq_flags); |
198 | 201 | ||
199 | static inline void | 202 | static inline void |
200 | ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, | 203 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
201 | u64 count, unsigned long irq_flags) | 204 | u64 count, unsigned long irq_flags, struct pt_regs *regs) |
202 | { | 205 | { |
203 | struct trace_entry *entry = raw_data; | 206 | struct trace_entry *entry = raw_data; |
204 | 207 | ||
205 | perf_tp_event(entry->type, addr, count, raw_data, size); | 208 | perf_tp_event(entry->type, addr, count, raw_data, size, regs); |
206 | perf_swevent_put_recursion_context(rctx); | 209 | perf_swevent_put_recursion_context(rctx); |
207 | local_irq_restore(irq_flags); | 210 | local_irq_restore(irq_flags); |
208 | } | 211 | } |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6f8cd7da1a01..95477038a72a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -452,6 +452,8 @@ enum perf_callchain_context { | |||
452 | #include <linux/fs.h> | 452 | #include <linux/fs.h> |
453 | #include <linux/pid_namespace.h> | 453 | #include <linux/pid_namespace.h> |
454 | #include <linux/workqueue.h> | 454 | #include <linux/workqueue.h> |
455 | #include <linux/ftrace.h> | ||
456 | #include <linux/cpu.h> | ||
455 | #include <asm/atomic.h> | 457 | #include <asm/atomic.h> |
456 | 458 | ||
457 | #define PERF_MAX_STACK_DEPTH 255 | 459 | #define PERF_MAX_STACK_DEPTH 255 |
@@ -847,6 +849,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
847 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 849 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
848 | } | 850 | } |
849 | 851 | ||
852 | extern void | ||
853 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
854 | |||
855 | /* | ||
856 | * Take a snapshot of the regs. Skip ip and frame pointer to | ||
857 | * the nth caller. We only need a few of the regs: | ||
858 | * - ip for PERF_SAMPLE_IP | ||
859 | * - cs for user_mode() tests | ||
860 | * - bp for callchains | ||
861 | * - eflags, for future purposes, just in case | ||
862 | */ | ||
863 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | ||
864 | { | ||
865 | unsigned long ip; | ||
866 | |||
867 | memset(regs, 0, sizeof(*regs)); | ||
868 | |||
869 | switch (skip) { | ||
870 | case 1 : | ||
871 | ip = CALLER_ADDR0; | ||
872 | break; | ||
873 | case 2 : | ||
874 | ip = CALLER_ADDR1; | ||
875 | break; | ||
876 | case 3 : | ||
877 | ip = CALLER_ADDR2; | ||
878 | break; | ||
879 | case 4: | ||
880 | ip = CALLER_ADDR3; | ||
881 | break; | ||
882 | /* No need to support further for now */ | ||
883 | default: | ||
884 | ip = 0; | ||
885 | } | ||
886 | |||
887 | return perf_arch_fetch_caller_regs(regs, ip, skip); | ||
888 | } | ||
889 | |||
850 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 890 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
851 | 891 | ||
852 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 892 | static inline void perf_event_mmap(struct vm_area_struct *vma) |
@@ -880,7 +920,8 @@ static inline bool perf_paranoid_kernel(void) | |||
880 | } | 920 | } |
881 | 921 | ||
882 | extern void perf_event_init(void); | 922 | extern void perf_event_init(void); |
883 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); | 923 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
924 | int entry_size, struct pt_regs *regs); | ||
884 | extern void perf_bp_event(struct perf_event *event, void *data); | 925 | extern void perf_bp_event(struct perf_event *event, void *data); |
885 | 926 | ||
886 | #ifndef perf_misc_flags | 927 | #ifndef perf_misc_flags |
@@ -936,5 +977,21 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
936 | #define perf_output_put(handle, x) \ | 977 | #define perf_output_put(handle, x) \ |
937 | perf_output_copy((handle), &(x), sizeof(x)) | 978 | perf_output_copy((handle), &(x), sizeof(x)) |
938 | 979 | ||
980 | /* | ||
981 | * This has to have a higher priority than migration_notifier in sched.c. | ||
982 | */ | ||
983 | #define perf_cpu_notifier(fn) \ | ||
984 | do { \ | ||
985 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
986 | { .notifier_call = fn, .priority = 20 }; \ | ||
987 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
988 | (void *)(unsigned long)smp_processor_id()); \ | ||
989 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
990 | (void *)(unsigned long)smp_processor_id()); \ | ||
991 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
992 | (void *)(unsigned long)smp_processor_id()); \ | ||
993 | register_cpu_notifier(&fn##_nb); \ | ||
994 | } while (0) | ||
995 | |||
939 | #endif /* __KERNEL__ */ | 996 | #endif /* __KERNEL__ */ |
940 | #endif /* _LINUX_PERF_EVENT_H */ | 997 | #endif /* _LINUX_PERF_EVENT_H */ |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 44f2ad0e8825..f994ae58a002 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -105,18 +105,18 @@ struct perf_event_attr; | |||
105 | 105 | ||
106 | #ifdef CONFIG_PERF_EVENTS | 106 | #ifdef CONFIG_PERF_EVENTS |
107 | 107 | ||
108 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 108 | #define TRACE_SYS_ENTER_PERF_INIT(sname) \ |
109 | .profile_enable = prof_sysenter_enable, \ | 109 | .perf_event_enable = perf_sysenter_enable, \ |
110 | .profile_disable = prof_sysenter_disable, | 110 | .perf_event_disable = perf_sysenter_disable, |
111 | 111 | ||
112 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 112 | #define TRACE_SYS_EXIT_PERF_INIT(sname) \ |
113 | .profile_enable = prof_sysexit_enable, \ | 113 | .perf_event_enable = perf_sysexit_enable, \ |
114 | .profile_disable = prof_sysexit_disable, | 114 | .perf_event_disable = perf_sysexit_disable, |
115 | #else | 115 | #else |
116 | #define TRACE_SYS_ENTER_PROFILE(sname) | 116 | #define TRACE_SYS_ENTER_PERF(sname) |
117 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) | 117 | #define TRACE_SYS_ENTER_PERF_INIT(sname) |
118 | #define TRACE_SYS_EXIT_PROFILE(sname) | 118 | #define TRACE_SYS_EXIT_PERF(sname) |
119 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) | 119 | #define TRACE_SYS_EXIT_PERF_INIT(sname) |
120 | #endif /* CONFIG_PERF_EVENTS */ | 120 | #endif /* CONFIG_PERF_EVENTS */ |
121 | 121 | ||
122 | #ifdef CONFIG_FTRACE_SYSCALLS | 122 | #ifdef CONFIG_FTRACE_SYSCALLS |
@@ -153,7 +153,7 @@ struct perf_event_attr; | |||
153 | .regfunc = reg_event_syscall_enter, \ | 153 | .regfunc = reg_event_syscall_enter, \ |
154 | .unregfunc = unreg_event_syscall_enter, \ | 154 | .unregfunc = unreg_event_syscall_enter, \ |
155 | .data = (void *)&__syscall_meta_##sname,\ | 155 | .data = (void *)&__syscall_meta_##sname,\ |
156 | TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 156 | TRACE_SYS_ENTER_PERF_INIT(sname) \ |
157 | } | 157 | } |
158 | 158 | ||
159 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 159 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
@@ -175,7 +175,7 @@ struct perf_event_attr; | |||
175 | .regfunc = reg_event_syscall_exit, \ | 175 | .regfunc = reg_event_syscall_exit, \ |
176 | .unregfunc = unreg_event_syscall_exit, \ | 176 | .unregfunc = unreg_event_syscall_exit, \ |
177 | .data = (void *)&__syscall_meta_##sname,\ | 177 | .data = (void *)&__syscall_meta_##sname,\ |
178 | TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 178 | TRACE_SYS_EXIT_PERF_INIT(sname) \ |
179 | } | 179 | } |
180 | 180 | ||
181 | #define SYSCALL_METADATA(sname, nb) \ | 181 | #define SYSCALL_METADATA(sname, nb) \ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 601ad7744247..ea6f9d4a20e9 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
401 | #undef DEFINE_EVENT | 401 | #undef DEFINE_EVENT |
402 | #define DEFINE_EVENT(template, name, proto, args) \ | 402 | #define DEFINE_EVENT(template, name, proto, args) \ |
403 | \ | 403 | \ |
404 | static void ftrace_profile_##name(proto); \ | 404 | static void perf_trace_##name(proto); \ |
405 | \ | 405 | \ |
406 | static notrace int \ | 406 | static notrace int \ |
407 | ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ | 407 | perf_trace_enable_##name(struct ftrace_event_call *unused) \ |
408 | { \ | 408 | { \ |
409 | return register_trace_##name(ftrace_profile_##name); \ | 409 | return register_trace_##name(perf_trace_##name); \ |
410 | } \ | 410 | } \ |
411 | \ | 411 | \ |
412 | static notrace void \ | 412 | static notrace void \ |
413 | ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ | 413 | perf_trace_disable_##name(struct ftrace_event_call *unused) \ |
414 | { \ | 414 | { \ |
415 | unregister_trace_##name(ftrace_profile_##name); \ | 415 | unregister_trace_##name(perf_trace_##name); \ |
416 | } | 416 | } |
417 | 417 | ||
418 | #undef DEFINE_EVENT_PRINT | 418 | #undef DEFINE_EVENT_PRINT |
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ | |||
507 | 507 | ||
508 | #ifdef CONFIG_PERF_EVENTS | 508 | #ifdef CONFIG_PERF_EVENTS |
509 | 509 | ||
510 | #define _TRACE_PROFILE_INIT(call) \ | 510 | #define _TRACE_PERF_INIT(call) \ |
511 | .profile_enable = ftrace_profile_enable_##call, \ | 511 | .perf_event_enable = perf_trace_enable_##call, \ |
512 | .profile_disable = ftrace_profile_disable_##call, | 512 | .perf_event_disable = perf_trace_disable_##call, |
513 | 513 | ||
514 | #else | 514 | #else |
515 | #define _TRACE_PROFILE_INIT(call) | 515 | #define _TRACE_PERF_INIT(call) |
516 | #endif /* CONFIG_PERF_EVENTS */ | 516 | #endif /* CONFIG_PERF_EVENTS */ |
517 | 517 | ||
518 | #undef __entry | 518 | #undef __entry |
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
638 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 638 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
639 | .print_fmt = print_fmt_##template, \ | 639 | .print_fmt = print_fmt_##template, \ |
640 | .define_fields = ftrace_define_fields_##template, \ | 640 | .define_fields = ftrace_define_fields_##template, \ |
641 | _TRACE_PROFILE_INIT(call) \ | 641 | _TRACE_PERF_INIT(call) \ |
642 | } | 642 | } |
643 | 643 | ||
644 | #undef DEFINE_EVENT_PRINT | 644 | #undef DEFINE_EVENT_PRINT |
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
657 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 657 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
658 | .print_fmt = print_fmt_##call, \ | 658 | .print_fmt = print_fmt_##call, \ |
659 | .define_fields = ftrace_define_fields_##template, \ | 659 | .define_fields = ftrace_define_fields_##template, \ |
660 | _TRACE_PROFILE_INIT(call) \ | 660 | _TRACE_PERF_INIT(call) \ |
661 | } | 661 | } |
662 | 662 | ||
663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
664 | 664 | ||
665 | /* | 665 | /* |
666 | * Define the insertion callback to profile events | 666 | * Define the insertion callback to perf events |
667 | * | 667 | * |
668 | * The job is very similar to ftrace_raw_event_<call> except that we don't | 668 | * The job is very similar to ftrace_raw_event_<call> except that we don't |
669 | * insert in the ring buffer but in a perf counter. | 669 | * insert in the ring buffer but in a perf counter. |
670 | * | 670 | * |
671 | * static void ftrace_profile_<call>(proto) | 671 | * static void ftrace_perf_<call>(proto) |
672 | * { | 672 | * { |
673 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | 673 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; |
674 | * struct ftrace_event_call *event_call = &event_<call>; | 674 | * struct ftrace_event_call *event_call = &event_<call>; |
@@ -757,13 +757,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
757 | #undef DECLARE_EVENT_CLASS | 757 | #undef DECLARE_EVENT_CLASS |
758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
759 | static notrace void \ | 759 | static notrace void \ |
760 | ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | 760 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ |
761 | proto) \ | 761 | proto) \ |
762 | { \ | 762 | { \ |
763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
764 | struct ftrace_raw_##call *entry; \ | 764 | struct ftrace_raw_##call *entry; \ |
765 | u64 __addr = 0, __count = 1; \ | 765 | u64 __addr = 0, __count = 1; \ |
766 | unsigned long irq_flags; \ | 766 | unsigned long irq_flags; \ |
767 | struct pt_regs *__regs; \ | ||
767 | int __entry_size; \ | 768 | int __entry_size; \ |
768 | int __data_size; \ | 769 | int __data_size; \ |
769 | int rctx; \ | 770 | int rctx; \ |
@@ -773,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | |||
773 | sizeof(u64)); \ | 774 | sizeof(u64)); \ |
774 | __entry_size -= sizeof(u32); \ | 775 | __entry_size -= sizeof(u32); \ |
775 | \ | 776 | \ |
776 | if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ | 777 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ |
777 | "profile buffer not large enough")) \ | 778 | "profile buffer not large enough")) \ |
778 | return; \ | 779 | return; \ |
779 | entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ | 780 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ |
780 | __entry_size, event_call->id, &rctx, &irq_flags); \ | 781 | __entry_size, event_call->id, &rctx, &irq_flags); \ |
781 | if (!entry) \ | 782 | if (!entry) \ |
782 | return; \ | 783 | return; \ |
@@ -784,17 +785,20 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | |||
784 | \ | 785 | \ |
785 | { assign; } \ | 786 | { assign; } \ |
786 | \ | 787 | \ |
787 | ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ | 788 | __regs = &__get_cpu_var(perf_trace_regs); \ |
788 | __count, irq_flags); \ | 789 | perf_fetch_caller_regs(__regs, 2); \ |
790 | \ | ||
791 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | ||
792 | __count, irq_flags, __regs); \ | ||
789 | } | 793 | } |
790 | 794 | ||
791 | #undef DEFINE_EVENT | 795 | #undef DEFINE_EVENT |
792 | #define DEFINE_EVENT(template, call, proto, args) \ | 796 | #define DEFINE_EVENT(template, call, proto, args) \ |
793 | static notrace void ftrace_profile_##call(proto) \ | 797 | static notrace void perf_trace_##call(proto) \ |
794 | { \ | 798 | { \ |
795 | struct ftrace_event_call *event_call = &event_##call; \ | 799 | struct ftrace_event_call *event_call = &event_##call; \ |
796 | \ | 800 | \ |
797 | ftrace_profile_templ_##template(event_call, args); \ | 801 | perf_trace_templ_##template(event_call, args); \ |
798 | } | 802 | } |
799 | 803 | ||
800 | #undef DEFINE_EVENT_PRINT | 804 | #undef DEFINE_EVENT_PRINT |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100752f0..e5e5f48dbfb3 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_PERF_EVENTS | 49 | #ifdef CONFIG_PERF_EVENTS |
50 | int prof_sysenter_enable(struct ftrace_event_call *call); | 50 | int perf_sysenter_enable(struct ftrace_event_call *call); |
51 | void prof_sysenter_disable(struct ftrace_event_call *call); | 51 | void perf_sysenter_disable(struct ftrace_event_call *call); |
52 | int prof_sysexit_enable(struct ftrace_event_call *call); | 52 | int perf_sysexit_enable(struct ftrace_event_call *call); |
53 | void prof_sysexit_disable(struct ftrace_event_call *call); | 53 | void perf_sysexit_disable(struct ftrace_event_call *call); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #endif /* _TRACE_SYSCALL_H */ | 56 | #endif /* _TRACE_SYSCALL_H */ |