diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 91 |
1 files changed, 39 insertions, 52 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3412684ce5d5..245bafdafd5e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -61,7 +61,7 @@ enum perf_hw_id { | |||
61 | /* | 61 | /* |
62 | * Generalized hardware cache events: | 62 | * Generalized hardware cache events: |
63 | * | 63 | * |
64 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | 64 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x |
65 | * { read, write, prefetch } x | 65 | * { read, write, prefetch } x |
66 | * { accesses, misses } | 66 | * { accesses, misses } |
67 | */ | 67 | */ |
@@ -72,6 +72,7 @@ enum perf_hw_cache_id { | |||
72 | PERF_COUNT_HW_CACHE_DTLB = 3, | 72 | PERF_COUNT_HW_CACHE_DTLB = 3, |
73 | PERF_COUNT_HW_CACHE_ITLB = 4, | 73 | PERF_COUNT_HW_CACHE_ITLB = 4, |
74 | PERF_COUNT_HW_CACHE_BPU = 5, | 74 | PERF_COUNT_HW_CACHE_BPU = 5, |
75 | PERF_COUNT_HW_CACHE_NODE = 6, | ||
75 | 76 | ||
76 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | 77 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ |
77 | }; | 78 | }; |
@@ -137,14 +138,14 @@ enum perf_event_sample_format { | |||
137 | * | 138 | * |
138 | * struct read_format { | 139 | * struct read_format { |
139 | * { u64 value; | 140 | * { u64 value; |
140 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 141 | * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED |
141 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 142 | * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING |
142 | * { u64 id; } && PERF_FORMAT_ID | 143 | * { u64 id; } && PERF_FORMAT_ID |
143 | * } && !PERF_FORMAT_GROUP | 144 | * } && !PERF_FORMAT_GROUP |
144 | * | 145 | * |
145 | * { u64 nr; | 146 | * { u64 nr; |
146 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 147 | * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED |
147 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 148 | * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING |
148 | * { u64 value; | 149 | * { u64 value; |
149 | * { u64 id; } && PERF_FORMAT_ID | 150 | * { u64 id; } && PERF_FORMAT_ID |
150 | * } cntr[nr]; | 151 | * } cntr[nr]; |
@@ -508,7 +509,7 @@ struct perf_guest_info_callbacks { | |||
508 | #include <linux/cpu.h> | 509 | #include <linux/cpu.h> |
509 | #include <linux/irq_work.h> | 510 | #include <linux/irq_work.h> |
510 | #include <linux/jump_label.h> | 511 | #include <linux/jump_label.h> |
511 | #include <asm/atomic.h> | 512 | #include <linux/atomic.h> |
512 | #include <asm/local.h> | 513 | #include <asm/local.h> |
513 | 514 | ||
514 | #define PERF_MAX_STACK_DEPTH 255 | 515 | #define PERF_MAX_STACK_DEPTH 255 |
@@ -536,6 +537,16 @@ struct perf_branch_stack { | |||
536 | 537 | ||
537 | struct task_struct; | 538 | struct task_struct; |
538 | 539 | ||
540 | /* | ||
541 | * extra PMU register associated with an event | ||
542 | */ | ||
543 | struct hw_perf_event_extra { | ||
544 | u64 config; /* register value */ | ||
545 | unsigned int reg; /* register address or index */ | ||
546 | int alloc; /* extra register already allocated */ | ||
547 | int idx; /* index in shared_regs->regs[] */ | ||
548 | }; | ||
549 | |||
539 | /** | 550 | /** |
540 | * struct hw_perf_event - performance event hardware details: | 551 | * struct hw_perf_event - performance event hardware details: |
541 | */ | 552 | */ |
@@ -549,9 +560,7 @@ struct hw_perf_event { | |||
549 | unsigned long event_base; | 560 | unsigned long event_base; |
550 | int idx; | 561 | int idx; |
551 | int last_cpu; | 562 | int last_cpu; |
552 | unsigned int extra_reg; | 563 | struct hw_perf_event_extra extra_reg; |
553 | u64 extra_config; | ||
554 | int extra_alloc; | ||
555 | }; | 564 | }; |
556 | struct { /* software */ | 565 | struct { /* software */ |
557 | struct hrtimer hrtimer; | 566 | struct hrtimer hrtimer; |
@@ -680,36 +689,9 @@ enum perf_event_active_state { | |||
680 | }; | 689 | }; |
681 | 690 | ||
682 | struct file; | 691 | struct file; |
683 | |||
684 | #define PERF_BUFFER_WRITABLE 0x01 | ||
685 | |||
686 | struct perf_buffer { | ||
687 | atomic_t refcount; | ||
688 | struct rcu_head rcu_head; | ||
689 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
690 | struct work_struct work; | ||
691 | int page_order; /* allocation order */ | ||
692 | #endif | ||
693 | int nr_pages; /* nr of data pages */ | ||
694 | int writable; /* are we writable */ | ||
695 | |||
696 | atomic_t poll; /* POLL_ for wakeups */ | ||
697 | |||
698 | local_t head; /* write position */ | ||
699 | local_t nest; /* nested writers */ | ||
700 | local_t events; /* event limit */ | ||
701 | local_t wakeup; /* wakeup stamp */ | ||
702 | local_t lost; /* nr records lost */ | ||
703 | |||
704 | long watermark; /* wakeup watermark */ | ||
705 | |||
706 | struct perf_event_mmap_page *user_page; | ||
707 | void *data_pages[0]; | ||
708 | }; | ||
709 | |||
710 | struct perf_sample_data; | 692 | struct perf_sample_data; |
711 | 693 | ||
712 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 694 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
713 | struct perf_sample_data *, | 695 | struct perf_sample_data *, |
714 | struct pt_regs *regs); | 696 | struct pt_regs *regs); |
715 | 697 | ||
@@ -745,6 +727,8 @@ struct perf_cgroup { | |||
745 | }; | 727 | }; |
746 | #endif | 728 | #endif |
747 | 729 | ||
730 | struct ring_buffer; | ||
731 | |||
748 | /** | 732 | /** |
749 | * struct perf_event - performance event kernel representation: | 733 | * struct perf_event - performance event kernel representation: |
750 | */ | 734 | */ |
@@ -834,7 +818,7 @@ struct perf_event { | |||
834 | atomic_t mmap_count; | 818 | atomic_t mmap_count; |
835 | int mmap_locked; | 819 | int mmap_locked; |
836 | struct user_struct *mmap_user; | 820 | struct user_struct *mmap_user; |
837 | struct perf_buffer *buffer; | 821 | struct ring_buffer *rb; |
838 | 822 | ||
839 | /* poll related */ | 823 | /* poll related */ |
840 | wait_queue_head_t waitq; | 824 | wait_queue_head_t waitq; |
@@ -855,6 +839,7 @@ struct perf_event { | |||
855 | u64 id; | 839 | u64 id; |
856 | 840 | ||
857 | perf_overflow_handler_t overflow_handler; | 841 | perf_overflow_handler_t overflow_handler; |
842 | void *overflow_handler_context; | ||
858 | 843 | ||
859 | #ifdef CONFIG_EVENT_TRACING | 844 | #ifdef CONFIG_EVENT_TRACING |
860 | struct ftrace_event_call *tp_event; | 845 | struct ftrace_event_call *tp_event; |
@@ -919,8 +904,8 @@ struct perf_event_context { | |||
919 | u64 parent_gen; | 904 | u64 parent_gen; |
920 | u64 generation; | 905 | u64 generation; |
921 | int pin_count; | 906 | int pin_count; |
922 | struct rcu_head rcu_head; | ||
923 | int nr_cgroups; /* cgroup events present */ | 907 | int nr_cgroups; /* cgroup events present */ |
908 | struct rcu_head rcu_head; | ||
924 | }; | 909 | }; |
925 | 910 | ||
926 | /* | 911 | /* |
@@ -945,13 +930,11 @@ struct perf_cpu_context { | |||
945 | 930 | ||
946 | struct perf_output_handle { | 931 | struct perf_output_handle { |
947 | struct perf_event *event; | 932 | struct perf_event *event; |
948 | struct perf_buffer *buffer; | 933 | struct ring_buffer *rb; |
949 | unsigned long wakeup; | 934 | unsigned long wakeup; |
950 | unsigned long size; | 935 | unsigned long size; |
951 | void *addr; | 936 | void *addr; |
952 | int page; | 937 | int page; |
953 | int nmi; | ||
954 | int sample; | ||
955 | }; | 938 | }; |
956 | 939 | ||
957 | #ifdef CONFIG_PERF_EVENTS | 940 | #ifdef CONFIG_PERF_EVENTS |
@@ -972,13 +955,15 @@ extern void perf_pmu_disable(struct pmu *pmu); | |||
972 | extern void perf_pmu_enable(struct pmu *pmu); | 955 | extern void perf_pmu_enable(struct pmu *pmu); |
973 | extern int perf_event_task_disable(void); | 956 | extern int perf_event_task_disable(void); |
974 | extern int perf_event_task_enable(void); | 957 | extern int perf_event_task_enable(void); |
958 | extern int perf_event_refresh(struct perf_event *event, int refresh); | ||
975 | extern void perf_event_update_userpage(struct perf_event *event); | 959 | extern void perf_event_update_userpage(struct perf_event *event); |
976 | extern int perf_event_release_kernel(struct perf_event *event); | 960 | extern int perf_event_release_kernel(struct perf_event *event); |
977 | extern struct perf_event * | 961 | extern struct perf_event * |
978 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 962 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
979 | int cpu, | 963 | int cpu, |
980 | struct task_struct *task, | 964 | struct task_struct *task, |
981 | perf_overflow_handler_t callback); | 965 | perf_overflow_handler_t callback, |
966 | void *context); | ||
982 | extern u64 perf_event_read_value(struct perf_event *event, | 967 | extern u64 perf_event_read_value(struct perf_event *event, |
983 | u64 *enabled, u64 *running); | 968 | u64 *enabled, u64 *running); |
984 | 969 | ||
@@ -1018,7 +1003,7 @@ extern void perf_prepare_sample(struct perf_event_header *header, | |||
1018 | struct perf_event *event, | 1003 | struct perf_event *event, |
1019 | struct pt_regs *regs); | 1004 | struct pt_regs *regs); |
1020 | 1005 | ||
1021 | extern int perf_event_overflow(struct perf_event *event, int nmi, | 1006 | extern int perf_event_overflow(struct perf_event *event, |
1022 | struct perf_sample_data *data, | 1007 | struct perf_sample_data *data, |
1023 | struct pt_regs *regs); | 1008 | struct pt_regs *regs); |
1024 | 1009 | ||
@@ -1037,7 +1022,7 @@ static inline int is_software_event(struct perf_event *event) | |||
1037 | 1022 | ||
1038 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1023 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1039 | 1024 | ||
1040 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1025 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
1041 | 1026 | ||
1042 | #ifndef perf_arch_fetch_caller_regs | 1027 | #ifndef perf_arch_fetch_caller_regs |
1043 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | 1028 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
@@ -1059,7 +1044,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
1059 | } | 1044 | } |
1060 | 1045 | ||
1061 | static __always_inline void | 1046 | static __always_inline void |
1062 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 1047 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
1063 | { | 1048 | { |
1064 | struct pt_regs hot_regs; | 1049 | struct pt_regs hot_regs; |
1065 | 1050 | ||
@@ -1068,7 +1053,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1068 | perf_fetch_caller_regs(&hot_regs); | 1053 | perf_fetch_caller_regs(&hot_regs); |
1069 | regs = &hot_regs; | 1054 | regs = &hot_regs; |
1070 | } | 1055 | } |
1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 1056 | __perf_sw_event(event_id, nr, regs, addr); |
1072 | } | 1057 | } |
1073 | } | 1058 | } |
1074 | 1059 | ||
@@ -1082,7 +1067,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task) | |||
1082 | 1067 | ||
1083 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | 1068 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1084 | { | 1069 | { |
1085 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1070 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
1086 | 1071 | ||
1087 | __perf_event_task_sched_out(task, next); | 1072 | __perf_event_task_sched_out(task, next); |
1088 | } | 1073 | } |
@@ -1143,8 +1128,7 @@ extern void perf_bp_event(struct perf_event *event, void *data); | |||
1143 | #endif | 1128 | #endif |
1144 | 1129 | ||
1145 | extern int perf_output_begin(struct perf_output_handle *handle, | 1130 | extern int perf_output_begin(struct perf_output_handle *handle, |
1146 | struct perf_event *event, unsigned int size, | 1131 | struct perf_event *event, unsigned int size); |
1147 | int nmi, int sample); | ||
1148 | extern void perf_output_end(struct perf_output_handle *handle); | 1132 | extern void perf_output_end(struct perf_output_handle *handle); |
1149 | extern void perf_output_copy(struct perf_output_handle *handle, | 1133 | extern void perf_output_copy(struct perf_output_handle *handle, |
1150 | const void *buf, unsigned int len); | 1134 | const void *buf, unsigned int len); |
@@ -1166,10 +1150,13 @@ static inline void perf_event_delayed_put(struct task_struct *task) { } | |||
1166 | static inline void perf_event_print_debug(void) { } | 1150 | static inline void perf_event_print_debug(void) { } |
1167 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1151 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1168 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1152 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1153 | static inline int perf_event_refresh(struct perf_event *event, int refresh) | ||
1154 | { | ||
1155 | return -EINVAL; | ||
1156 | } | ||
1169 | 1157 | ||
1170 | static inline void | 1158 | static inline void |
1171 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 1159 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
1172 | struct pt_regs *regs, u64 addr) { } | ||
1173 | static inline void | 1160 | static inline void |
1174 | perf_bp_event(struct perf_event *event, void *data) { } | 1161 | perf_bp_event(struct perf_event *event, void *data) { } |
1175 | 1162 | ||