aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-01-07 08:14:15 -0500
committerIngo Molnar <mingo@elte.hu>2011-01-07 08:14:15 -0500
commit1c2a48cf65580a276552151eb8f78d78c55b828e (patch)
tree68ed0628a276b33cb5aa0ad4899c1afe0a33a69d /include/linux/perf_event.h
parent0aa002fe602939370e9476e5ec32b562000a0425 (diff)
parentcb600d2f83c854ec3d6660063e4466431999489b (diff)
Merge branch 'linus' into x86/apic-cleanups
Conflicts: arch/x86/include/asm/io_apic.h Merge reason: Resolve the conflict, update to a more recent -rc base Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h57
1 files changed, 41 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 40150f345982..dda5b0a3ff60 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -215,8 +215,9 @@ struct perf_event_attr {
215 */ 215 */
216 precise_ip : 2, /* skid constraint */ 216 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */ 217 mmap_data : 1, /* non-exec mmap data */
218 sample_id_all : 1, /* sample_type all events */
218 219
219 __reserved_1 : 46; 220 __reserved_1 : 45;
220 221
221 union { 222 union {
222 __u32 wakeup_events; /* wakeup every n events */ 223 __u32 wakeup_events; /* wakeup every n events */
@@ -327,6 +328,15 @@ struct perf_event_header {
327enum perf_event_type { 328enum perf_event_type {
328 329
329 /* 330 /*
331 * If perf_event_attr.sample_id_all is set then all event types will
332 * have the sample_type selected fields related to where/when
333 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
334 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
335 * the perf_event_header and the fields already present for the existing
336 * fields, i.e. at the end of the payload. That way a newer perf.data
337 * file will be supported by older perf tools, with these new optional
338 * fields being ignored.
339 *
330 * The MMAP events record the PROT_EXEC mappings so that we can 340 * The MMAP events record the PROT_EXEC mappings so that we can
331 * correlate userspace IPs to code. They have the following structure: 341 * correlate userspace IPs to code. They have the following structure:
332 * 342 *
@@ -578,6 +588,10 @@ struct perf_event;
578struct pmu { 588struct pmu {
579 struct list_head entry; 589 struct list_head entry;
580 590
591 struct device *dev;
592 char *name;
593 int type;
594
581 int * __percpu pmu_disable_count; 595 int * __percpu pmu_disable_count;
582 struct perf_cpu_context * __percpu pmu_cpu_context; 596 struct perf_cpu_context * __percpu pmu_cpu_context;
583 int task_ctx_nr; 597 int task_ctx_nr;
@@ -758,6 +772,9 @@ struct perf_event {
758 u64 shadow_ctx_time; 772 u64 shadow_ctx_time;
759 773
760 struct perf_event_attr attr; 774 struct perf_event_attr attr;
775 u16 header_size;
776 u16 id_header_size;
777 u16 read_size;
761 struct hw_perf_event hw; 778 struct hw_perf_event hw;
762 779
763 struct perf_event_context *ctx; 780 struct perf_event_context *ctx;
@@ -850,6 +867,7 @@ struct perf_event_context {
850 int nr_active; 867 int nr_active;
851 int is_active; 868 int is_active;
852 int nr_stat; 869 int nr_stat;
870 int rotate_disable;
853 atomic_t refcount; 871 atomic_t refcount;
854 struct task_struct *task; 872 struct task_struct *task;
855 873
@@ -886,6 +904,7 @@ struct perf_cpu_context {
886 int exclusive; 904 int exclusive;
887 struct list_head rotation_list; 905 struct list_head rotation_list;
888 int jiffies_interval; 906 int jiffies_interval;
907 struct pmu *active_pmu;
889}; 908};
890 909
891struct perf_output_handle { 910struct perf_output_handle {
@@ -901,27 +920,13 @@ struct perf_output_handle {
901 920
902#ifdef CONFIG_PERF_EVENTS 921#ifdef CONFIG_PERF_EVENTS
903 922
904extern int perf_pmu_register(struct pmu *pmu); 923extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
905extern void perf_pmu_unregister(struct pmu *pmu); 924extern void perf_pmu_unregister(struct pmu *pmu);
906 925
907extern int perf_num_counters(void); 926extern int perf_num_counters(void);
908extern const char *perf_pmu_name(void); 927extern const char *perf_pmu_name(void);
909extern void __perf_event_task_sched_in(struct task_struct *task); 928extern void __perf_event_task_sched_in(struct task_struct *task);
910extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 929extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
911
912extern atomic_t perf_task_events;
913
914static inline void perf_event_task_sched_in(struct task_struct *task)
915{
916 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
917}
918
919static inline
920void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
921{
922 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
923}
924
925extern int perf_event_init_task(struct task_struct *child); 930extern int perf_event_init_task(struct task_struct *child);
926extern void perf_event_exit_task(struct task_struct *child); 931extern void perf_event_exit_task(struct task_struct *child);
927extern void perf_event_free_task(struct task_struct *task); 932extern void perf_event_free_task(struct task_struct *task);
@@ -982,6 +987,11 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
982 struct perf_sample_data *data, 987 struct perf_sample_data *data,
983 struct pt_regs *regs); 988 struct pt_regs *regs);
984 989
990static inline bool is_sampling_event(struct perf_event *event)
991{
992 return event->attr.sample_period != 0;
993}
994
985/* 995/*
986 * Return 1 for a software event, 0 for a hardware event 996 * Return 1 for a software event, 0 for a hardware event
987 */ 997 */
@@ -1030,6 +1040,21 @@ have_event:
1030 __perf_sw_event(event_id, nr, nmi, regs, addr); 1040 __perf_sw_event(event_id, nr, nmi, regs, addr);
1031} 1041}
1032 1042
1043extern atomic_t perf_task_events;
1044
1045static inline void perf_event_task_sched_in(struct task_struct *task)
1046{
1047 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1048}
1049
1050static inline
1051void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1052{
1053 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1054
1055 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1056}
1057
1033extern void perf_event_mmap(struct vm_area_struct *vma); 1058extern void perf_event_mmap(struct vm_area_struct *vma);
1034extern struct perf_guest_info_callbacks *perf_guest_cbs; 1059extern struct perf_guest_info_callbacks *perf_guest_cbs;
1035extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1060extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);