diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 54 |
1 files changed, 26 insertions, 28 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 43adbd7f0010..8fa71874113f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -18,10 +18,6 @@ | |||
18 | #include <linux/ioctl.h> | 18 | #include <linux/ioctl.h> |
19 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
20 | 20 | ||
21 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
22 | #include <asm/hw_breakpoint.h> | ||
23 | #endif | ||
24 | |||
25 | /* | 21 | /* |
26 | * User-space ABI bits: | 22 | * User-space ABI bits: |
27 | */ | 23 | */ |
@@ -215,17 +211,11 @@ struct perf_event_attr { | |||
215 | __u32 wakeup_watermark; /* bytes before wakeup */ | 211 | __u32 wakeup_watermark; /* bytes before wakeup */ |
216 | }; | 212 | }; |
217 | 213 | ||
218 | union { | ||
219 | struct { /* Hardware breakpoint info */ | ||
220 | __u64 bp_addr; | ||
221 | __u32 bp_type; | ||
222 | __u32 bp_len; | ||
223 | }; | ||
224 | }; | ||
225 | |||
226 | __u32 __reserved_2; | 214 | __u32 __reserved_2; |
227 | 215 | ||
228 | __u64 __reserved_3; | 216 | __u64 bp_addr; |
217 | __u32 bp_type; | ||
218 | __u32 bp_len; | ||
229 | }; | 219 | }; |
230 | 220 | ||
231 | /* | 221 | /* |
@@ -451,6 +441,10 @@ enum perf_callchain_context { | |||
451 | # include <asm/perf_event.h> | 441 | # include <asm/perf_event.h> |
452 | #endif | 442 | #endif |
453 | 443 | ||
444 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
445 | #include <asm/hw_breakpoint.h> | ||
446 | #endif | ||
447 | |||
454 | #include <linux/list.h> | 448 | #include <linux/list.h> |
455 | #include <linux/mutex.h> | 449 | #include <linux/mutex.h> |
456 | #include <linux/rculist.h> | 450 | #include <linux/rculist.h> |
@@ -565,10 +559,12 @@ struct perf_pending_entry { | |||
565 | void (*func)(struct perf_pending_entry *); | 559 | void (*func)(struct perf_pending_entry *); |
566 | }; | 560 | }; |
567 | 561 | ||
568 | typedef void (*perf_callback_t)(struct perf_event *, void *); | ||
569 | |||
570 | struct perf_sample_data; | 562 | struct perf_sample_data; |
571 | 563 | ||
564 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | ||
565 | struct perf_sample_data *, | ||
566 | struct pt_regs *regs); | ||
567 | |||
572 | /** | 568 | /** |
573 | * struct perf_event - performance event kernel representation: | 569 | * struct perf_event - performance event kernel representation: |
574 | */ | 570 | */ |
@@ -660,18 +656,12 @@ struct perf_event { | |||
660 | struct pid_namespace *ns; | 656 | struct pid_namespace *ns; |
661 | u64 id; | 657 | u64 id; |
662 | 658 | ||
663 | void (*overflow_handler)(struct perf_event *event, | 659 | perf_overflow_handler_t overflow_handler; |
664 | int nmi, struct perf_sample_data *data, | ||
665 | struct pt_regs *regs); | ||
666 | 660 | ||
667 | #ifdef CONFIG_EVENT_PROFILE | 661 | #ifdef CONFIG_EVENT_PROFILE |
668 | struct event_filter *filter; | 662 | struct event_filter *filter; |
669 | #endif | 663 | #endif |
670 | 664 | ||
671 | perf_callback_t callback; | ||
672 | |||
673 | perf_callback_t event_callback; | ||
674 | |||
675 | #endif /* CONFIG_PERF_EVENTS */ | 665 | #endif /* CONFIG_PERF_EVENTS */ |
676 | }; | 666 | }; |
677 | 667 | ||
@@ -685,7 +675,7 @@ struct perf_event_context { | |||
685 | * Protect the states of the events in the list, | 675 | * Protect the states of the events in the list, |
686 | * nr_active, and the list: | 676 | * nr_active, and the list: |
687 | */ | 677 | */ |
688 | spinlock_t lock; | 678 | raw_spinlock_t lock; |
689 | /* | 679 | /* |
690 | * Protect the list of events. Locking either mutex or lock | 680 | * Protect the list of events. Locking either mutex or lock |
691 | * is sufficient to ensure the list doesn't change; to change | 681 | * is sufficient to ensure the list doesn't change; to change |
@@ -781,7 +771,7 @@ extern struct perf_event * | |||
781 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 771 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
782 | int cpu, | 772 | int cpu, |
783 | pid_t pid, | 773 | pid_t pid, |
784 | perf_callback_t callback); | 774 | perf_overflow_handler_t callback); |
785 | extern u64 perf_event_read_value(struct perf_event *event, | 775 | extern u64 perf_event_read_value(struct perf_event *event, |
786 | u64 *enabled, u64 *running); | 776 | u64 *enabled, u64 *running); |
787 | 777 | ||
@@ -824,9 +814,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
824 | */ | 814 | */ |
825 | static inline int is_software_event(struct perf_event *event) | 815 | static inline int is_software_event(struct perf_event *event) |
826 | { | 816 | { |
827 | return (event->attr.type != PERF_TYPE_RAW) && | 817 | switch (event->attr.type) { |
828 | (event->attr.type != PERF_TYPE_HARDWARE) && | 818 | case PERF_TYPE_SOFTWARE: |
829 | (event->attr.type != PERF_TYPE_HW_CACHE); | 819 | case PERF_TYPE_TRACEPOINT: |
820 | /* for now the breakpoint stuff also works as software event */ | ||
821 | case PERF_TYPE_BREAKPOINT: | ||
822 | return 1; | ||
823 | } | ||
824 | return 0; | ||
830 | } | 825 | } |
831 | 826 | ||
832 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 827 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
@@ -876,6 +871,8 @@ extern void perf_output_copy(struct perf_output_handle *handle, | |||
876 | const void *buf, unsigned int len); | 871 | const void *buf, unsigned int len); |
877 | extern int perf_swevent_get_recursion_context(void); | 872 | extern int perf_swevent_get_recursion_context(void); |
878 | extern void perf_swevent_put_recursion_context(int rctx); | 873 | extern void perf_swevent_put_recursion_context(int rctx); |
874 | extern void perf_event_enable(struct perf_event *event); | ||
875 | extern void perf_event_disable(struct perf_event *event); | ||
879 | #else | 876 | #else |
880 | static inline void | 877 | static inline void |
881 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 878 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } |
@@ -906,7 +903,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { } | |||
906 | static inline void perf_event_init(void) { } | 903 | static inline void perf_event_init(void) { } |
907 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | 904 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
908 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 905 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
909 | 906 | static inline void perf_event_enable(struct perf_event *event) { } | |
907 | static inline void perf_event_disable(struct perf_event *event) { } | ||
910 | #endif | 908 | #endif |
911 | 909 | ||
912 | #define perf_output_put(handle, x) \ | 910 | #define perf_output_put(handle, x) \ |