diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 43 |
1 files changed, 18 insertions, 25 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 43adbd7f0010..c66b34f75eea 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -18,10 +18,6 @@ | |||
18 | #include <linux/ioctl.h> | 18 | #include <linux/ioctl.h> |
19 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
20 | 20 | ||
21 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
22 | #include <asm/hw_breakpoint.h> | ||
23 | #endif | ||
24 | |||
25 | /* | 21 | /* |
26 | * User-space ABI bits: | 22 | * User-space ABI bits: |
27 | */ | 23 | */ |
@@ -215,17 +211,11 @@ struct perf_event_attr { | |||
215 | __u32 wakeup_watermark; /* bytes before wakeup */ | 211 | __u32 wakeup_watermark; /* bytes before wakeup */ |
216 | }; | 212 | }; |
217 | 213 | ||
218 | union { | ||
219 | struct { /* Hardware breakpoint info */ | ||
220 | __u64 bp_addr; | ||
221 | __u32 bp_type; | ||
222 | __u32 bp_len; | ||
223 | }; | ||
224 | }; | ||
225 | |||
226 | __u32 __reserved_2; | 214 | __u32 __reserved_2; |
227 | 215 | ||
228 | __u64 __reserved_3; | 216 | __u64 bp_addr; |
217 | __u32 bp_type; | ||
218 | __u32 bp_len; | ||
229 | }; | 219 | }; |
230 | 220 | ||
231 | /* | 221 | /* |
@@ -451,6 +441,10 @@ enum perf_callchain_context { | |||
451 | # include <asm/perf_event.h> | 441 | # include <asm/perf_event.h> |
452 | #endif | 442 | #endif |
453 | 443 | ||
444 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
445 | #include <asm/hw_breakpoint.h> | ||
446 | #endif | ||
447 | |||
454 | #include <linux/list.h> | 448 | #include <linux/list.h> |
455 | #include <linux/mutex.h> | 449 | #include <linux/mutex.h> |
456 | #include <linux/rculist.h> | 450 | #include <linux/rculist.h> |
@@ -565,10 +559,12 @@ struct perf_pending_entry { | |||
565 | void (*func)(struct perf_pending_entry *); | 559 | void (*func)(struct perf_pending_entry *); |
566 | }; | 560 | }; |
567 | 561 | ||
568 | typedef void (*perf_callback_t)(struct perf_event *, void *); | ||
569 | |||
570 | struct perf_sample_data; | 562 | struct perf_sample_data; |
571 | 563 | ||
564 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | ||
565 | struct perf_sample_data *, | ||
566 | struct pt_regs *regs); | ||
567 | |||
572 | /** | 568 | /** |
573 | * struct perf_event - performance event kernel representation: | 569 | * struct perf_event - performance event kernel representation: |
574 | */ | 570 | */ |
@@ -660,18 +656,12 @@ struct perf_event { | |||
660 | struct pid_namespace *ns; | 656 | struct pid_namespace *ns; |
661 | u64 id; | 657 | u64 id; |
662 | 658 | ||
663 | void (*overflow_handler)(struct perf_event *event, | 659 | perf_overflow_handler_t overflow_handler; |
664 | int nmi, struct perf_sample_data *data, | ||
665 | struct pt_regs *regs); | ||
666 | 660 | ||
667 | #ifdef CONFIG_EVENT_PROFILE | 661 | #ifdef CONFIG_EVENT_PROFILE |
668 | struct event_filter *filter; | 662 | struct event_filter *filter; |
669 | #endif | 663 | #endif |
670 | 664 | ||
671 | perf_callback_t callback; | ||
672 | |||
673 | perf_callback_t event_callback; | ||
674 | |||
675 | #endif /* CONFIG_PERF_EVENTS */ | 665 | #endif /* CONFIG_PERF_EVENTS */ |
676 | }; | 666 | }; |
677 | 667 | ||
@@ -685,7 +675,7 @@ struct perf_event_context { | |||
685 | * Protect the states of the events in the list, | 675 | * Protect the states of the events in the list, |
686 | * nr_active, and the list: | 676 | * nr_active, and the list: |
687 | */ | 677 | */ |
688 | spinlock_t lock; | 678 | raw_spinlock_t lock; |
689 | /* | 679 | /* |
690 | * Protect the list of events. Locking either mutex or lock | 680 | * Protect the list of events. Locking either mutex or lock |
691 | * is sufficient to ensure the list doesn't change; to change | 681 | * is sufficient to ensure the list doesn't change; to change |
@@ -781,7 +771,7 @@ extern struct perf_event * | |||
781 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 771 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
782 | int cpu, | 772 | int cpu, |
783 | pid_t pid, | 773 | pid_t pid, |
784 | perf_callback_t callback); | 774 | perf_overflow_handler_t callback); |
785 | extern u64 perf_event_read_value(struct perf_event *event, | 775 | extern u64 perf_event_read_value(struct perf_event *event, |
786 | u64 *enabled, u64 *running); | 776 | u64 *enabled, u64 *running); |
787 | 777 | ||
@@ -876,6 +866,8 @@ extern void perf_output_copy(struct perf_output_handle *handle, | |||
876 | const void *buf, unsigned int len); | 866 | const void *buf, unsigned int len); |
877 | extern int perf_swevent_get_recursion_context(void); | 867 | extern int perf_swevent_get_recursion_context(void); |
878 | extern void perf_swevent_put_recursion_context(int rctx); | 868 | extern void perf_swevent_put_recursion_context(int rctx); |
869 | extern void perf_event_enable(struct perf_event *event); | ||
870 | extern void perf_event_disable(struct perf_event *event); | ||
879 | #else | 871 | #else |
880 | static inline void | 872 | static inline void |
881 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 873 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } |
@@ -906,7 +898,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { } | |||
906 | static inline void perf_event_init(void) { } | 898 | static inline void perf_event_init(void) { } |
907 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | 899 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
908 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 900 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
909 | 901 | static inline void perf_event_enable(struct perf_event *event) { } | |
902 | static inline void perf_event_disable(struct perf_event *event) { } | ||
910 | #endif | 903 | #endif |
911 | 904 | ||
912 | #define perf_output_put(handle, x) \ | 905 | #define perf_output_put(handle, x) \ |