diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 68 |
1 files changed, 66 insertions, 2 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6f8cd7da1a01..c8e375440403 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -452,6 +452,8 @@ enum perf_callchain_context { | |||
452 | #include <linux/fs.h> | 452 | #include <linux/fs.h> |
453 | #include <linux/pid_namespace.h> | 453 | #include <linux/pid_namespace.h> |
454 | #include <linux/workqueue.h> | 454 | #include <linux/workqueue.h> |
455 | #include <linux/ftrace.h> | ||
456 | #include <linux/cpu.h> | ||
455 | #include <asm/atomic.h> | 457 | #include <asm/atomic.h> |
456 | 458 | ||
457 | #define PERF_MAX_STACK_DEPTH 255 | 459 | #define PERF_MAX_STACK_DEPTH 255 |
@@ -840,11 +842,56 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
840 | 842 | ||
841 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 843 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
842 | 844 | ||
845 | extern void | ||
846 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
847 | |||
848 | /* | ||
849 | * Take a snapshot of the regs. Skip ip and frame pointer to | ||
850 | * the nth caller. We only need a few of the regs: | ||
851 | * - ip for PERF_SAMPLE_IP | ||
852 | * - cs for user_mode() tests | ||
853 | * - bp for callchains | ||
854 | * - eflags, for future purposes, just in case | ||
855 | */ | ||
856 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | ||
857 | { | ||
858 | unsigned long ip; | ||
859 | |||
860 | memset(regs, 0, sizeof(*regs)); | ||
861 | |||
862 | switch (skip) { | ||
863 | case 1 : | ||
864 | ip = CALLER_ADDR0; | ||
865 | break; | ||
866 | case 2 : | ||
867 | ip = CALLER_ADDR1; | ||
868 | break; | ||
869 | case 3 : | ||
870 | ip = CALLER_ADDR2; | ||
871 | break; | ||
872 | case 4: | ||
873 | ip = CALLER_ADDR3; | ||
874 | break; | ||
875 | /* No need to support further for now */ | ||
876 | default: | ||
877 | ip = 0; | ||
878 | } | ||
879 | |||
880 | return perf_arch_fetch_caller_regs(regs, ip, skip); | ||
881 | } | ||
882 | |||
843 | static inline void | 883 | static inline void |
844 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 884 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
845 | { | 885 | { |
846 | if (atomic_read(&perf_swevent_enabled[event_id])) | 886 | if (atomic_read(&perf_swevent_enabled[event_id])) { |
887 | struct pt_regs hot_regs; | ||
888 | |||
889 | if (!regs) { | ||
890 | perf_fetch_caller_regs(&hot_regs, 1); | ||
891 | regs = &hot_regs; | ||
892 | } | ||
847 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 893 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
894 | } | ||
848 | } | 895 | } |
849 | 896 | ||
850 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 897 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
@@ -880,7 +927,8 @@ static inline bool perf_paranoid_kernel(void) | |||
880 | } | 927 | } |
881 | 928 | ||
882 | extern void perf_event_init(void); | 929 | extern void perf_event_init(void); |
883 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); | 930 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
931 | int entry_size, struct pt_regs *regs); | ||
884 | extern void perf_bp_event(struct perf_event *event, void *data); | 932 | extern void perf_bp_event(struct perf_event *event, void *data); |
885 | 933 | ||
886 | #ifndef perf_misc_flags | 934 | #ifndef perf_misc_flags |
@@ -936,5 +984,21 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
936 | #define perf_output_put(handle, x) \ | 984 | #define perf_output_put(handle, x) \ |
937 | perf_output_copy((handle), &(x), sizeof(x)) | 985 | perf_output_copy((handle), &(x), sizeof(x)) |
938 | 986 | ||
987 | /* | ||
988 | * This has to have a higher priority than migration_notifier in sched.c. | ||
989 | */ | ||
990 | #define perf_cpu_notifier(fn) \ | ||
991 | do { \ | ||
992 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
993 | { .notifier_call = fn, .priority = 20 }; \ | ||
994 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
995 | (void *)(unsigned long)smp_processor_id()); \ | ||
996 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
997 | (void *)(unsigned long)smp_processor_id()); \ | ||
998 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
999 | (void *)(unsigned long)smp_processor_id()); \ | ||
1000 | register_cpu_notifier(&fn##_nb); \ | ||
1001 | } while (0) | ||
1002 | |||
939 | #endif /* __KERNEL__ */ | 1003 | #endif /* __KERNEL__ */ |
940 | #endif /* _LINUX_PERF_EVENT_H */ | 1004 | #endif /* _LINUX_PERF_EVENT_H */ |