diff options
| author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-06 12:00:33 -0500 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-06 12:00:33 -0500 |
| commit | 3d14b5beba35250c548d3851a2b84fce742d8311 (patch) | |
| tree | 065e3d93c3fcbc5ee4c44fa78662393cddbdf6de /include/linux/perf_event.h | |
| parent | 0719dc341389882cc834ed18fc9b7fc6006b2b85 (diff) | |
| parent | 1bf8e6219552d5dd27012d567ec8c4bb9c2d86b4 (diff) | |
Merge branch 'sa1100' into devel
Diffstat (limited to 'include/linux/perf_event.h')
| -rw-r--r-- | include/linux/perf_event.h | 59 |
1 files changed, 56 insertions, 3 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9e7012689a84..43adbd7f0010 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -18,6 +18,10 @@ | |||
| 18 | #include <linux/ioctl.h> | 18 | #include <linux/ioctl.h> |
| 19 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
| 20 | 20 | ||
| 21 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 22 | #include <asm/hw_breakpoint.h> | ||
| 23 | #endif | ||
| 24 | |||
| 21 | /* | 25 | /* |
| 22 | * User-space ABI bits: | 26 | * User-space ABI bits: |
| 23 | */ | 27 | */ |
| @@ -31,6 +35,7 @@ enum perf_type_id { | |||
| 31 | PERF_TYPE_TRACEPOINT = 2, | 35 | PERF_TYPE_TRACEPOINT = 2, |
| 32 | PERF_TYPE_HW_CACHE = 3, | 36 | PERF_TYPE_HW_CACHE = 3, |
| 33 | PERF_TYPE_RAW = 4, | 37 | PERF_TYPE_RAW = 4, |
| 38 | PERF_TYPE_BREAKPOINT = 5, | ||
| 34 | 39 | ||
| 35 | PERF_TYPE_MAX, /* non-ABI */ | 40 | PERF_TYPE_MAX, /* non-ABI */ |
| 36 | }; | 41 | }; |
| @@ -102,6 +107,8 @@ enum perf_sw_ids { | |||
| 102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 107 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
| 103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 108 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
| 104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 109 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
| 110 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
| 111 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
| 105 | 112 | ||
| 106 | PERF_COUNT_SW_MAX, /* non-ABI */ | 113 | PERF_COUNT_SW_MAX, /* non-ABI */ |
| 107 | }; | 114 | }; |
| @@ -207,6 +214,15 @@ struct perf_event_attr { | |||
| 207 | __u32 wakeup_events; /* wakeup every n events */ | 214 | __u32 wakeup_events; /* wakeup every n events */ |
| 208 | __u32 wakeup_watermark; /* bytes before wakeup */ | 215 | __u32 wakeup_watermark; /* bytes before wakeup */ |
| 209 | }; | 216 | }; |
| 217 | |||
| 218 | union { | ||
| 219 | struct { /* Hardware breakpoint info */ | ||
| 220 | __u64 bp_addr; | ||
| 221 | __u32 bp_type; | ||
| 222 | __u32 bp_len; | ||
| 223 | }; | ||
| 224 | }; | ||
| 225 | |||
| 210 | __u32 __reserved_2; | 226 | __u32 __reserved_2; |
| 211 | 227 | ||
| 212 | __u64 __reserved_3; | 228 | __u64 __reserved_3; |
| @@ -219,8 +235,9 @@ struct perf_event_attr { | |||
| 219 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) | 235 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) |
| 220 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) | 236 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) |
| 221 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) | 237 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) |
| 222 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) | 238 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) |
| 223 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) | 239 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) |
| 240 | #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) | ||
| 224 | 241 | ||
| 225 | enum perf_event_ioc_flags { | 242 | enum perf_event_ioc_flags { |
| 226 | PERF_IOC_FLAG_GROUP = 1U << 0, | 243 | PERF_IOC_FLAG_GROUP = 1U << 0, |
| @@ -475,6 +492,11 @@ struct hw_perf_event { | |||
| 475 | s64 remaining; | 492 | s64 remaining; |
| 476 | struct hrtimer hrtimer; | 493 | struct hrtimer hrtimer; |
| 477 | }; | 494 | }; |
| 495 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 496 | union { /* breakpoint */ | ||
| 497 | struct arch_hw_breakpoint info; | ||
| 498 | }; | ||
| 499 | #endif | ||
| 478 | }; | 500 | }; |
| 479 | atomic64_t prev_count; | 501 | atomic64_t prev_count; |
| 480 | u64 sample_period; | 502 | u64 sample_period; |
| @@ -543,6 +565,10 @@ struct perf_pending_entry { | |||
| 543 | void (*func)(struct perf_pending_entry *); | 565 | void (*func)(struct perf_pending_entry *); |
| 544 | }; | 566 | }; |
| 545 | 567 | ||
| 568 | typedef void (*perf_callback_t)(struct perf_event *, void *); | ||
| 569 | |||
| 570 | struct perf_sample_data; | ||
| 571 | |||
| 546 | /** | 572 | /** |
| 547 | * struct perf_event - performance event kernel representation: | 573 | * struct perf_event - performance event kernel representation: |
| 548 | */ | 574 | */ |
| @@ -585,7 +611,7 @@ struct perf_event { | |||
| 585 | u64 tstamp_running; | 611 | u64 tstamp_running; |
| 586 | u64 tstamp_stopped; | 612 | u64 tstamp_stopped; |
| 587 | 613 | ||
| 588 | struct perf_event_attr attr; | 614 | struct perf_event_attr attr; |
| 589 | struct hw_perf_event hw; | 615 | struct hw_perf_event hw; |
| 590 | 616 | ||
| 591 | struct perf_event_context *ctx; | 617 | struct perf_event_context *ctx; |
| @@ -633,7 +659,20 @@ struct perf_event { | |||
| 633 | 659 | ||
| 634 | struct pid_namespace *ns; | 660 | struct pid_namespace *ns; |
| 635 | u64 id; | 661 | u64 id; |
| 662 | |||
| 663 | void (*overflow_handler)(struct perf_event *event, | ||
| 664 | int nmi, struct perf_sample_data *data, | ||
| 665 | struct pt_regs *regs); | ||
| 666 | |||
| 667 | #ifdef CONFIG_EVENT_PROFILE | ||
| 668 | struct event_filter *filter; | ||
| 636 | #endif | 669 | #endif |
| 670 | |||
| 671 | perf_callback_t callback; | ||
| 672 | |||
| 673 | perf_callback_t event_callback; | ||
| 674 | |||
| 675 | #endif /* CONFIG_PERF_EVENTS */ | ||
| 637 | }; | 676 | }; |
| 638 | 677 | ||
| 639 | /** | 678 | /** |
| @@ -706,7 +745,6 @@ struct perf_output_handle { | |||
| 706 | int nmi; | 745 | int nmi; |
| 707 | int sample; | 746 | int sample; |
| 708 | int locked; | 747 | int locked; |
| 709 | unsigned long flags; | ||
| 710 | }; | 748 | }; |
| 711 | 749 | ||
| 712 | #ifdef CONFIG_PERF_EVENTS | 750 | #ifdef CONFIG_PERF_EVENTS |
| @@ -738,6 +776,14 @@ extern int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
| 738 | struct perf_cpu_context *cpuctx, | 776 | struct perf_cpu_context *cpuctx, |
| 739 | struct perf_event_context *ctx, int cpu); | 777 | struct perf_event_context *ctx, int cpu); |
| 740 | extern void perf_event_update_userpage(struct perf_event *event); | 778 | extern void perf_event_update_userpage(struct perf_event *event); |
| 779 | extern int perf_event_release_kernel(struct perf_event *event); | ||
| 780 | extern struct perf_event * | ||
| 781 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | ||
| 782 | int cpu, | ||
| 783 | pid_t pid, | ||
| 784 | perf_callback_t callback); | ||
| 785 | extern u64 perf_event_read_value(struct perf_event *event, | ||
| 786 | u64 *enabled, u64 *running); | ||
| 741 | 787 | ||
| 742 | struct perf_sample_data { | 788 | struct perf_sample_data { |
| 743 | u64 type; | 789 | u64 type; |
| @@ -814,6 +860,7 @@ extern int sysctl_perf_event_sample_rate; | |||
| 814 | extern void perf_event_init(void); | 860 | extern void perf_event_init(void); |
| 815 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 861 | extern void perf_tp_event(int event_id, u64 addr, u64 count, |
| 816 | void *record, int entry_size); | 862 | void *record, int entry_size); |
| 863 | extern void perf_bp_event(struct perf_event *event, void *data); | ||
| 817 | 864 | ||
| 818 | #ifndef perf_misc_flags | 865 | #ifndef perf_misc_flags |
| 819 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 866 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ |
| @@ -827,6 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle, | |||
| 827 | extern void perf_output_end(struct perf_output_handle *handle); | 874 | extern void perf_output_end(struct perf_output_handle *handle); |
| 828 | extern void perf_output_copy(struct perf_output_handle *handle, | 875 | extern void perf_output_copy(struct perf_output_handle *handle, |
| 829 | const void *buf, unsigned int len); | 876 | const void *buf, unsigned int len); |
| 877 | extern int perf_swevent_get_recursion_context(void); | ||
| 878 | extern void perf_swevent_put_recursion_context(int rctx); | ||
| 830 | #else | 879 | #else |
| 831 | static inline void | 880 | static inline void |
| 832 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 881 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } |
| @@ -848,11 +897,15 @@ static inline int perf_event_task_enable(void) { return -EINVAL; } | |||
| 848 | static inline void | 897 | static inline void |
| 849 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 898 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
| 850 | struct pt_regs *regs, u64 addr) { } | 899 | struct pt_regs *regs, u64 addr) { } |
| 900 | static inline void | ||
| 901 | perf_bp_event(struct perf_event *event, void *data) { } | ||
| 851 | 902 | ||
| 852 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 903 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
| 853 | static inline void perf_event_comm(struct task_struct *tsk) { } | 904 | static inline void perf_event_comm(struct task_struct *tsk) { } |
| 854 | static inline void perf_event_fork(struct task_struct *tsk) { } | 905 | static inline void perf_event_fork(struct task_struct *tsk) { } |
| 855 | static inline void perf_event_init(void) { } | 906 | static inline void perf_event_init(void) { } |
| 907 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | ||
| 908 | static inline void perf_swevent_put_recursion_context(int rctx) { } | ||
| 856 | 909 | ||
| 857 | #endif | 910 | #endif |
| 858 | 911 | ||
