diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 99 |
1 files changed, 65 insertions, 34 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c66b34f75ee..6f8cd7da1a0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -211,11 +211,9 @@ struct perf_event_attr { | |||
211 | __u32 wakeup_watermark; /* bytes before wakeup */ | 211 | __u32 wakeup_watermark; /* bytes before wakeup */ |
212 | }; | 212 | }; |
213 | 213 | ||
214 | __u32 __reserved_2; | ||
215 | |||
216 | __u64 bp_addr; | ||
217 | __u32 bp_type; | 214 | __u32 bp_type; |
218 | __u32 bp_len; | 215 | __u64 bp_addr; |
216 | __u64 bp_len; | ||
219 | }; | 217 | }; |
220 | 218 | ||
221 | /* | 219 | /* |
@@ -290,7 +288,7 @@ struct perf_event_mmap_page { | |||
290 | }; | 288 | }; |
291 | 289 | ||
292 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | 290 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) |
293 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 291 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) |
294 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 292 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
295 | #define PERF_RECORD_MISC_USER (2 << 0) | 293 | #define PERF_RECORD_MISC_USER (2 << 0) |
296 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 294 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) |
@@ -356,8 +354,8 @@ enum perf_event_type { | |||
356 | * u64 stream_id; | 354 | * u64 stream_id; |
357 | * }; | 355 | * }; |
358 | */ | 356 | */ |
359 | PERF_RECORD_THROTTLE = 5, | 357 | PERF_RECORD_THROTTLE = 5, |
360 | PERF_RECORD_UNTHROTTLE = 6, | 358 | PERF_RECORD_UNTHROTTLE = 6, |
361 | 359 | ||
362 | /* | 360 | /* |
363 | * struct { | 361 | * struct { |
@@ -371,10 +369,10 @@ enum perf_event_type { | |||
371 | 369 | ||
372 | /* | 370 | /* |
373 | * struct { | 371 | * struct { |
374 | * struct perf_event_header header; | 372 | * struct perf_event_header header; |
375 | * u32 pid, tid; | 373 | * u32 pid, tid; |
376 | * | 374 | * |
377 | * struct read_format values; | 375 | * struct read_format values; |
378 | * }; | 376 | * }; |
379 | */ | 377 | */ |
380 | PERF_RECORD_READ = 8, | 378 | PERF_RECORD_READ = 8, |
@@ -412,7 +410,7 @@ enum perf_event_type { | |||
412 | * char data[size];}&& PERF_SAMPLE_RAW | 410 | * char data[size];}&& PERF_SAMPLE_RAW |
413 | * }; | 411 | * }; |
414 | */ | 412 | */ |
415 | PERF_RECORD_SAMPLE = 9, | 413 | PERF_RECORD_SAMPLE = 9, |
416 | 414 | ||
417 | PERF_RECORD_MAX, /* non-ABI */ | 415 | PERF_RECORD_MAX, /* non-ABI */ |
418 | }; | 416 | }; |
@@ -478,18 +476,19 @@ struct hw_perf_event { | |||
478 | union { | 476 | union { |
479 | struct { /* hardware */ | 477 | struct { /* hardware */ |
480 | u64 config; | 478 | u64 config; |
479 | u64 last_tag; | ||
481 | unsigned long config_base; | 480 | unsigned long config_base; |
482 | unsigned long event_base; | 481 | unsigned long event_base; |
483 | int idx; | 482 | int idx; |
483 | int last_cpu; | ||
484 | }; | 484 | }; |
485 | struct { /* software */ | 485 | struct { /* software */ |
486 | s64 remaining; | 486 | s64 remaining; |
487 | struct hrtimer hrtimer; | 487 | struct hrtimer hrtimer; |
488 | }; | 488 | }; |
489 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 489 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
490 | union { /* breakpoint */ | 490 | /* breakpoint */ |
491 | struct arch_hw_breakpoint info; | 491 | struct arch_hw_breakpoint info; |
492 | }; | ||
493 | #endif | 492 | #endif |
494 | }; | 493 | }; |
495 | atomic64_t prev_count; | 494 | atomic64_t prev_count; |
@@ -498,9 +497,8 @@ struct hw_perf_event { | |||
498 | atomic64_t period_left; | 497 | atomic64_t period_left; |
499 | u64 interrupts; | 498 | u64 interrupts; |
500 | 499 | ||
501 | u64 freq_count; | 500 | u64 freq_time_stamp; |
502 | u64 freq_interrupts; | 501 | u64 freq_count_stamp; |
503 | u64 freq_stamp; | ||
504 | #endif | 502 | #endif |
505 | }; | 503 | }; |
506 | 504 | ||
@@ -512,6 +510,8 @@ struct perf_event; | |||
512 | struct pmu { | 510 | struct pmu { |
513 | int (*enable) (struct perf_event *event); | 511 | int (*enable) (struct perf_event *event); |
514 | void (*disable) (struct perf_event *event); | 512 | void (*disable) (struct perf_event *event); |
513 | int (*start) (struct perf_event *event); | ||
514 | void (*stop) (struct perf_event *event); | ||
515 | void (*read) (struct perf_event *event); | 515 | void (*read) (struct perf_event *event); |
516 | void (*unthrottle) (struct perf_event *event); | 516 | void (*unthrottle) (struct perf_event *event); |
517 | }; | 517 | }; |
@@ -565,6 +565,10 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
565 | struct perf_sample_data *, | 565 | struct perf_sample_data *, |
566 | struct pt_regs *regs); | 566 | struct pt_regs *regs); |
567 | 567 | ||
568 | enum perf_group_flag { | ||
569 | PERF_GROUP_SOFTWARE = 0x1, | ||
570 | }; | ||
571 | |||
568 | /** | 572 | /** |
569 | * struct perf_event - performance event kernel representation: | 573 | * struct perf_event - performance event kernel representation: |
570 | */ | 574 | */ |
@@ -574,6 +578,7 @@ struct perf_event { | |||
574 | struct list_head event_entry; | 578 | struct list_head event_entry; |
575 | struct list_head sibling_list; | 579 | struct list_head sibling_list; |
576 | int nr_siblings; | 580 | int nr_siblings; |
581 | int group_flags; | ||
577 | struct perf_event *group_leader; | 582 | struct perf_event *group_leader; |
578 | struct perf_event *output; | 583 | struct perf_event *output; |
579 | const struct pmu *pmu; | 584 | const struct pmu *pmu; |
@@ -658,7 +663,7 @@ struct perf_event { | |||
658 | 663 | ||
659 | perf_overflow_handler_t overflow_handler; | 664 | perf_overflow_handler_t overflow_handler; |
660 | 665 | ||
661 | #ifdef CONFIG_EVENT_PROFILE | 666 | #ifdef CONFIG_EVENT_TRACING |
662 | struct event_filter *filter; | 667 | struct event_filter *filter; |
663 | #endif | 668 | #endif |
664 | 669 | ||
@@ -683,7 +688,8 @@ struct perf_event_context { | |||
683 | */ | 688 | */ |
684 | struct mutex mutex; | 689 | struct mutex mutex; |
685 | 690 | ||
686 | struct list_head group_list; | 691 | struct list_head pinned_groups; |
692 | struct list_head flexible_groups; | ||
687 | struct list_head event_list; | 693 | struct list_head event_list; |
688 | int nr_events; | 694 | int nr_events; |
689 | int nr_active; | 695 | int nr_active; |
@@ -746,10 +752,9 @@ extern int perf_max_events; | |||
746 | 752 | ||
747 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 753 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
748 | 754 | ||
749 | extern void perf_event_task_sched_in(struct task_struct *task, int cpu); | 755 | extern void perf_event_task_sched_in(struct task_struct *task); |
750 | extern void perf_event_task_sched_out(struct task_struct *task, | 756 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
751 | struct task_struct *next, int cpu); | 757 | extern void perf_event_task_tick(struct task_struct *task); |
752 | extern void perf_event_task_tick(struct task_struct *task, int cpu); | ||
753 | extern int perf_event_init_task(struct task_struct *child); | 758 | extern int perf_event_init_task(struct task_struct *child); |
754 | extern void perf_event_exit_task(struct task_struct *child); | 759 | extern void perf_event_exit_task(struct task_struct *child); |
755 | extern void perf_event_free_task(struct task_struct *task); | 760 | extern void perf_event_free_task(struct task_struct *task); |
@@ -764,7 +769,7 @@ extern int perf_event_task_disable(void); | |||
764 | extern int perf_event_task_enable(void); | 769 | extern int perf_event_task_enable(void); |
765 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | 770 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, |
766 | struct perf_cpu_context *cpuctx, | 771 | struct perf_cpu_context *cpuctx, |
767 | struct perf_event_context *ctx, int cpu); | 772 | struct perf_event_context *ctx); |
768 | extern void perf_event_update_userpage(struct perf_event *event); | 773 | extern void perf_event_update_userpage(struct perf_event *event); |
769 | extern int perf_event_release_kernel(struct perf_event *event); | 774 | extern int perf_event_release_kernel(struct perf_event *event); |
770 | extern struct perf_event * | 775 | extern struct perf_event * |
@@ -796,6 +801,13 @@ struct perf_sample_data { | |||
796 | struct perf_raw_record *raw; | 801 | struct perf_raw_record *raw; |
797 | }; | 802 | }; |
798 | 803 | ||
804 | static inline | ||
805 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
806 | { | ||
807 | data->addr = addr; | ||
808 | data->raw = NULL; | ||
809 | } | ||
810 | |||
799 | extern void perf_output_sample(struct perf_output_handle *handle, | 811 | extern void perf_output_sample(struct perf_output_handle *handle, |
800 | struct perf_event_header *header, | 812 | struct perf_event_header *header, |
801 | struct perf_sample_data *data, | 813 | struct perf_sample_data *data, |
@@ -814,9 +826,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
814 | */ | 826 | */ |
815 | static inline int is_software_event(struct perf_event *event) | 827 | static inline int is_software_event(struct perf_event *event) |
816 | { | 828 | { |
817 | return (event->attr.type != PERF_TYPE_RAW) && | 829 | switch (event->attr.type) { |
818 | (event->attr.type != PERF_TYPE_HARDWARE) && | 830 | case PERF_TYPE_SOFTWARE: |
819 | (event->attr.type != PERF_TYPE_HW_CACHE); | 831 | case PERF_TYPE_TRACEPOINT: |
832 | /* for now the breakpoint stuff also works as software event */ | ||
833 | case PERF_TYPE_BREAKPOINT: | ||
834 | return 1; | ||
835 | } | ||
836 | return 0; | ||
820 | } | 837 | } |
821 | 838 | ||
822 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 839 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
@@ -847,9 +864,23 @@ extern int sysctl_perf_event_paranoid; | |||
847 | extern int sysctl_perf_event_mlock; | 864 | extern int sysctl_perf_event_mlock; |
848 | extern int sysctl_perf_event_sample_rate; | 865 | extern int sysctl_perf_event_sample_rate; |
849 | 866 | ||
867 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
868 | { | ||
869 | return sysctl_perf_event_paranoid > -1; | ||
870 | } | ||
871 | |||
872 | static inline bool perf_paranoid_cpu(void) | ||
873 | { | ||
874 | return sysctl_perf_event_paranoid > 0; | ||
875 | } | ||
876 | |||
877 | static inline bool perf_paranoid_kernel(void) | ||
878 | { | ||
879 | return sysctl_perf_event_paranoid > 1; | ||
880 | } | ||
881 | |||
850 | extern void perf_event_init(void); | 882 | extern void perf_event_init(void); |
851 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 883 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); |
852 | void *record, int entry_size); | ||
853 | extern void perf_bp_event(struct perf_event *event, void *data); | 884 | extern void perf_bp_event(struct perf_event *event, void *data); |
854 | 885 | ||
855 | #ifndef perf_misc_flags | 886 | #ifndef perf_misc_flags |
@@ -870,12 +901,12 @@ extern void perf_event_enable(struct perf_event *event); | |||
870 | extern void perf_event_disable(struct perf_event *event); | 901 | extern void perf_event_disable(struct perf_event *event); |
871 | #else | 902 | #else |
872 | static inline void | 903 | static inline void |
873 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 904 | perf_event_task_sched_in(struct task_struct *task) { } |
874 | static inline void | 905 | static inline void |
875 | perf_event_task_sched_out(struct task_struct *task, | 906 | perf_event_task_sched_out(struct task_struct *task, |
876 | struct task_struct *next, int cpu) { } | 907 | struct task_struct *next) { } |
877 | static inline void | 908 | static inline void |
878 | perf_event_task_tick(struct task_struct *task, int cpu) { } | 909 | perf_event_task_tick(struct task_struct *task) { } |
879 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 910 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
880 | static inline void perf_event_exit_task(struct task_struct *child) { } | 911 | static inline void perf_event_exit_task(struct task_struct *child) { } |
881 | static inline void perf_event_free_task(struct task_struct *task) { } | 912 | static inline void perf_event_free_task(struct task_struct *task) { } |
@@ -890,13 +921,13 @@ static inline void | |||
890 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 921 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
891 | struct pt_regs *regs, u64 addr) { } | 922 | struct pt_regs *regs, u64 addr) { } |
892 | static inline void | 923 | static inline void |
893 | perf_bp_event(struct perf_event *event, void *data) { } | 924 | perf_bp_event(struct perf_event *event, void *data) { } |
894 | 925 | ||
895 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 926 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
896 | static inline void perf_event_comm(struct task_struct *tsk) { } | 927 | static inline void perf_event_comm(struct task_struct *tsk) { } |
897 | static inline void perf_event_fork(struct task_struct *tsk) { } | 928 | static inline void perf_event_fork(struct task_struct *tsk) { } |
898 | static inline void perf_event_init(void) { } | 929 | static inline void perf_event_init(void) { } |
899 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | 930 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
900 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 931 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
901 | static inline void perf_event_enable(struct perf_event *event) { } | 932 | static inline void perf_event_enable(struct perf_event *event) { } |
902 | static inline void perf_event_disable(struct perf_event *event) { } | 933 | static inline void perf_event_disable(struct perf_event *event) { } |