diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 95 |
1 files changed, 90 insertions, 5 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7b18b4fd5df7..c8e375440403 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -452,6 +452,8 @@ enum perf_callchain_context { | |||
452 | #include <linux/fs.h> | 452 | #include <linux/fs.h> |
453 | #include <linux/pid_namespace.h> | 453 | #include <linux/pid_namespace.h> |
454 | #include <linux/workqueue.h> | 454 | #include <linux/workqueue.h> |
455 | #include <linux/ftrace.h> | ||
456 | #include <linux/cpu.h> | ||
455 | #include <asm/atomic.h> | 457 | #include <asm/atomic.h> |
456 | 458 | ||
457 | #define PERF_MAX_STACK_DEPTH 255 | 459 | #define PERF_MAX_STACK_DEPTH 255 |
@@ -487,9 +489,8 @@ struct hw_perf_event { | |||
487 | struct hrtimer hrtimer; | 489 | struct hrtimer hrtimer; |
488 | }; | 490 | }; |
489 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 491 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
490 | union { /* breakpoint */ | 492 | /* breakpoint */ |
491 | struct arch_hw_breakpoint info; | 493 | struct arch_hw_breakpoint info; |
492 | }; | ||
493 | #endif | 494 | #endif |
494 | }; | 495 | }; |
495 | atomic64_t prev_count; | 496 | atomic64_t prev_count; |
@@ -802,6 +803,13 @@ struct perf_sample_data { | |||
802 | struct perf_raw_record *raw; | 803 | struct perf_raw_record *raw; |
803 | }; | 804 | }; |
804 | 805 | ||
806 | static inline | ||
807 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
808 | { | ||
809 | data->addr = addr; | ||
810 | data->raw = NULL; | ||
811 | } | ||
812 | |||
805 | extern void perf_output_sample(struct perf_output_handle *handle, | 813 | extern void perf_output_sample(struct perf_output_handle *handle, |
806 | struct perf_event_header *header, | 814 | struct perf_event_header *header, |
807 | struct perf_sample_data *data, | 815 | struct perf_sample_data *data, |
@@ -834,11 +842,56 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
834 | 842 | ||
835 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 843 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
836 | 844 | ||
845 | extern void | ||
846 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
847 | |||
848 | /* | ||
849 | * Take a snapshot of the regs. Skip ip and frame pointer to | ||
850 | * the nth caller. We only need a few of the regs: | ||
851 | * - ip for PERF_SAMPLE_IP | ||
852 | * - cs for user_mode() tests | ||
853 | * - bp for callchains | ||
854 | * - eflags, for future purposes, just in case | ||
855 | */ | ||
856 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | ||
857 | { | ||
858 | unsigned long ip; | ||
859 | |||
860 | memset(regs, 0, sizeof(*regs)); | ||
861 | |||
862 | switch (skip) { | ||
863 | case 1 : | ||
864 | ip = CALLER_ADDR0; | ||
865 | break; | ||
866 | case 2 : | ||
867 | ip = CALLER_ADDR1; | ||
868 | break; | ||
869 | case 3 : | ||
870 | ip = CALLER_ADDR2; | ||
871 | break; | ||
872 | case 4: | ||
873 | ip = CALLER_ADDR3; | ||
874 | break; | ||
875 | /* No need to support further for now */ | ||
876 | default: | ||
877 | ip = 0; | ||
878 | } | ||
879 | |||
880 | return perf_arch_fetch_caller_regs(regs, ip, skip); | ||
881 | } | ||
882 | |||
837 | static inline void | 883 | static inline void |
838 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 884 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
839 | { | 885 | { |
840 | if (atomic_read(&perf_swevent_enabled[event_id])) | 886 | if (atomic_read(&perf_swevent_enabled[event_id])) { |
887 | struct pt_regs hot_regs; | ||
888 | |||
889 | if (!regs) { | ||
890 | perf_fetch_caller_regs(&hot_regs, 1); | ||
891 | regs = &hot_regs; | ||
892 | } | ||
841 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 893 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
894 | } | ||
842 | } | 895 | } |
843 | 896 | ||
844 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 897 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
@@ -858,8 +911,24 @@ extern int sysctl_perf_event_paranoid; | |||
858 | extern int sysctl_perf_event_mlock; | 911 | extern int sysctl_perf_event_mlock; |
859 | extern int sysctl_perf_event_sample_rate; | 912 | extern int sysctl_perf_event_sample_rate; |
860 | 913 | ||
914 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
915 | { | ||
916 | return sysctl_perf_event_paranoid > -1; | ||
917 | } | ||
918 | |||
919 | static inline bool perf_paranoid_cpu(void) | ||
920 | { | ||
921 | return sysctl_perf_event_paranoid > 0; | ||
922 | } | ||
923 | |||
924 | static inline bool perf_paranoid_kernel(void) | ||
925 | { | ||
926 | return sysctl_perf_event_paranoid > 1; | ||
927 | } | ||
928 | |||
861 | extern void perf_event_init(void); | 929 | extern void perf_event_init(void); |
862 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); | 930 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
931 | int entry_size, struct pt_regs *regs); | ||
863 | extern void perf_bp_event(struct perf_event *event, void *data); | 932 | extern void perf_bp_event(struct perf_event *event, void *data); |
864 | 933 | ||
865 | #ifndef perf_misc_flags | 934 | #ifndef perf_misc_flags |
@@ -915,5 +984,21 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
915 | #define perf_output_put(handle, x) \ | 984 | #define perf_output_put(handle, x) \ |
916 | perf_output_copy((handle), &(x), sizeof(x)) | 985 | perf_output_copy((handle), &(x), sizeof(x)) |
917 | 986 | ||
987 | /* | ||
988 | * This has to have a higher priority than migration_notifier in sched.c. | ||
989 | */ | ||
990 | #define perf_cpu_notifier(fn) \ | ||
991 | do { \ | ||
992 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
993 | { .notifier_call = fn, .priority = 20 }; \ | ||
994 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
995 | (void *)(unsigned long)smp_processor_id()); \ | ||
996 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
997 | (void *)(unsigned long)smp_processor_id()); \ | ||
998 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
999 | (void *)(unsigned long)smp_processor_id()); \ | ||
1000 | register_cpu_notifier(&fn##_nb); \ | ||
1001 | } while (0) | ||
1002 | |||
918 | #endif /* __KERNEL__ */ | 1003 | #endif /* __KERNEL__ */ |
919 | #endif /* _LINUX_PERF_EVENT_H */ | 1004 | #endif /* _LINUX_PERF_EVENT_H */ |