diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 57 |
1 files changed, 41 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 40150f345982..dda5b0a3ff60 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -215,8 +215,9 @@ struct perf_event_attr { | |||
215 | */ | 215 | */ |
216 | precise_ip : 2, /* skid constraint */ | 216 | precise_ip : 2, /* skid constraint */ |
217 | mmap_data : 1, /* non-exec mmap data */ | 217 | mmap_data : 1, /* non-exec mmap data */ |
218 | sample_id_all : 1, /* sample_type all events */ | ||
218 | 219 | ||
219 | __reserved_1 : 46; | 220 | __reserved_1 : 45; |
220 | 221 | ||
221 | union { | 222 | union { |
222 | __u32 wakeup_events; /* wakeup every n events */ | 223 | __u32 wakeup_events; /* wakeup every n events */ |
@@ -327,6 +328,15 @@ struct perf_event_header { | |||
327 | enum perf_event_type { | 328 | enum perf_event_type { |
328 | 329 | ||
329 | /* | 330 | /* |
331 | * If perf_event_attr.sample_id_all is set then all event types will | ||
332 | * have the sample_type selected fields related to where/when | ||
333 | * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID) | ||
334 | * described in PERF_RECORD_SAMPLE below, it will be stashed just after | ||
335 | * the perf_event_header and the fields already present for the existing | ||
336 | * fields, i.e. at the end of the payload. That way a newer perf.data | ||
337 | * file will be supported by older perf tools, with these new optional | ||
338 | * fields being ignored. | ||
339 | * | ||
330 | * The MMAP events record the PROT_EXEC mappings so that we can | 340 | * The MMAP events record the PROT_EXEC mappings so that we can |
331 | * correlate userspace IPs to code. They have the following structure: | 341 | * correlate userspace IPs to code. They have the following structure: |
332 | * | 342 | * |
@@ -578,6 +588,10 @@ struct perf_event; | |||
578 | struct pmu { | 588 | struct pmu { |
579 | struct list_head entry; | 589 | struct list_head entry; |
580 | 590 | ||
591 | struct device *dev; | ||
592 | char *name; | ||
593 | int type; | ||
594 | |||
581 | int * __percpu pmu_disable_count; | 595 | int * __percpu pmu_disable_count; |
582 | struct perf_cpu_context * __percpu pmu_cpu_context; | 596 | struct perf_cpu_context * __percpu pmu_cpu_context; |
583 | int task_ctx_nr; | 597 | int task_ctx_nr; |
@@ -758,6 +772,9 @@ struct perf_event { | |||
758 | u64 shadow_ctx_time; | 772 | u64 shadow_ctx_time; |
759 | 773 | ||
760 | struct perf_event_attr attr; | 774 | struct perf_event_attr attr; |
775 | u16 header_size; | ||
776 | u16 id_header_size; | ||
777 | u16 read_size; | ||
761 | struct hw_perf_event hw; | 778 | struct hw_perf_event hw; |
762 | 779 | ||
763 | struct perf_event_context *ctx; | 780 | struct perf_event_context *ctx; |
@@ -850,6 +867,7 @@ struct perf_event_context { | |||
850 | int nr_active; | 867 | int nr_active; |
851 | int is_active; | 868 | int is_active; |
852 | int nr_stat; | 869 | int nr_stat; |
870 | int rotate_disable; | ||
853 | atomic_t refcount; | 871 | atomic_t refcount; |
854 | struct task_struct *task; | 872 | struct task_struct *task; |
855 | 873 | ||
@@ -886,6 +904,7 @@ struct perf_cpu_context { | |||
886 | int exclusive; | 904 | int exclusive; |
887 | struct list_head rotation_list; | 905 | struct list_head rotation_list; |
888 | int jiffies_interval; | 906 | int jiffies_interval; |
907 | struct pmu *active_pmu; | ||
889 | }; | 908 | }; |
890 | 909 | ||
891 | struct perf_output_handle { | 910 | struct perf_output_handle { |
@@ -901,27 +920,13 @@ struct perf_output_handle { | |||
901 | 920 | ||
902 | #ifdef CONFIG_PERF_EVENTS | 921 | #ifdef CONFIG_PERF_EVENTS |
903 | 922 | ||
904 | extern int perf_pmu_register(struct pmu *pmu); | 923 | extern int perf_pmu_register(struct pmu *pmu, char *name, int type); |
905 | extern void perf_pmu_unregister(struct pmu *pmu); | 924 | extern void perf_pmu_unregister(struct pmu *pmu); |
906 | 925 | ||
907 | extern int perf_num_counters(void); | 926 | extern int perf_num_counters(void); |
908 | extern const char *perf_pmu_name(void); | 927 | extern const char *perf_pmu_name(void); |
909 | extern void __perf_event_task_sched_in(struct task_struct *task); | 928 | extern void __perf_event_task_sched_in(struct task_struct *task); |
910 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 929 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
911 | |||
912 | extern atomic_t perf_task_events; | ||
913 | |||
914 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
915 | { | ||
916 | COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); | ||
917 | } | ||
918 | |||
919 | static inline | ||
920 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
921 | { | ||
922 | COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); | ||
923 | } | ||
924 | |||
925 | extern int perf_event_init_task(struct task_struct *child); | 930 | extern int perf_event_init_task(struct task_struct *child); |
926 | extern void perf_event_exit_task(struct task_struct *child); | 931 | extern void perf_event_exit_task(struct task_struct *child); |
927 | extern void perf_event_free_task(struct task_struct *task); | 932 | extern void perf_event_free_task(struct task_struct *task); |
@@ -982,6 +987,11 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
982 | struct perf_sample_data *data, | 987 | struct perf_sample_data *data, |
983 | struct pt_regs *regs); | 988 | struct pt_regs *regs); |
984 | 989 | ||
990 | static inline bool is_sampling_event(struct perf_event *event) | ||
991 | { | ||
992 | return event->attr.sample_period != 0; | ||
993 | } | ||
994 | |||
985 | /* | 995 | /* |
986 | * Return 1 for a software event, 0 for a hardware event | 996 | * Return 1 for a software event, 0 for a hardware event |
987 | */ | 997 | */ |
@@ -1030,6 +1040,21 @@ have_event: | |||
1030 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 1040 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
1031 | } | 1041 | } |
1032 | 1042 | ||
1043 | extern atomic_t perf_task_events; | ||
1044 | |||
1045 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
1046 | { | ||
1047 | COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); | ||
1048 | } | ||
1049 | |||
1050 | static inline | ||
1051 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
1052 | { | ||
1053 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | ||
1054 | |||
1055 | COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); | ||
1056 | } | ||
1057 | |||
1033 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1058 | extern void perf_event_mmap(struct vm_area_struct *vma); |
1034 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 1059 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
1035 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 1060 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |