aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h67
1 files changed, 51 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22a8323..dda5b0a3ff60 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -215,8 +215,9 @@ struct perf_event_attr {
215 */ 215 */
216 precise_ip : 2, /* skid constraint */ 216 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */ 217 mmap_data : 1, /* non-exec mmap data */
218 sample_id_all : 1, /* sample_type all events */
218 219
219 __reserved_1 : 46; 220 __reserved_1 : 45;
220 221
221 union { 222 union {
222 __u32 wakeup_events; /* wakeup every n events */ 223 __u32 wakeup_events; /* wakeup every n events */
@@ -327,6 +328,15 @@ struct perf_event_header {
327enum perf_event_type { 328enum perf_event_type {
328 329
329 /* 330 /*
331 * If perf_event_attr.sample_id_all is set then all event types will
332 * have the sample_type selected fields related to where/when
333 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
334 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
335 * the perf_event_header and the fields already present for the existing
336 * fields, i.e. at the end of the payload. That way a newer perf.data
337 * file will be supported by older perf tools, with these new optional
338 * fields being ignored.
339 *
330 * The MMAP events record the PROT_EXEC mappings so that we can 340 * The MMAP events record the PROT_EXEC mappings so that we can
331 * correlate userspace IPs to code. They have the following structure: 341 * correlate userspace IPs to code. They have the following structure:
332 * 342 *
@@ -578,6 +588,10 @@ struct perf_event;
578struct pmu { 588struct pmu {
579 struct list_head entry; 589 struct list_head entry;
580 590
591 struct device *dev;
592 char *name;
593 int type;
594
581 int * __percpu pmu_disable_count; 595 int * __percpu pmu_disable_count;
582 struct perf_cpu_context * __percpu pmu_cpu_context; 596 struct perf_cpu_context * __percpu pmu_cpu_context;
583 int task_ctx_nr; 597 int task_ctx_nr;
@@ -747,7 +761,20 @@ struct perf_event {
747 u64 tstamp_running; 761 u64 tstamp_running;
748 u64 tstamp_stopped; 762 u64 tstamp_stopped;
749 763
764 /*
765 * timestamp shadows the actual context timing but it can
766 * be safely used in NMI interrupt context. It reflects the
767 * context time as it was when the event was last scheduled in.
768 *
769 * ctx_time already accounts for ctx->timestamp. Therefore to
770 * compute ctx_time for a sample, simply add perf_clock().
771 */
772 u64 shadow_ctx_time;
773
750 struct perf_event_attr attr; 774 struct perf_event_attr attr;
775 u16 header_size;
776 u16 id_header_size;
777 u16 read_size;
751 struct hw_perf_event hw; 778 struct hw_perf_event hw;
752 779
753 struct perf_event_context *ctx; 780 struct perf_event_context *ctx;
@@ -840,6 +867,7 @@ struct perf_event_context {
840 int nr_active; 867 int nr_active;
841 int is_active; 868 int is_active;
842 int nr_stat; 869 int nr_stat;
870 int rotate_disable;
843 atomic_t refcount; 871 atomic_t refcount;
844 struct task_struct *task; 872 struct task_struct *task;
845 873
@@ -876,6 +904,7 @@ struct perf_cpu_context {
876 int exclusive; 904 int exclusive;
877 struct list_head rotation_list; 905 struct list_head rotation_list;
878 int jiffies_interval; 906 int jiffies_interval;
907 struct pmu *active_pmu;
879}; 908};
880 909
881struct perf_output_handle { 910struct perf_output_handle {
@@ -891,27 +920,13 @@ struct perf_output_handle {
891 920
892#ifdef CONFIG_PERF_EVENTS 921#ifdef CONFIG_PERF_EVENTS
893 922
894extern int perf_pmu_register(struct pmu *pmu); 923extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
895extern void perf_pmu_unregister(struct pmu *pmu); 924extern void perf_pmu_unregister(struct pmu *pmu);
896 925
897extern int perf_num_counters(void); 926extern int perf_num_counters(void);
898extern const char *perf_pmu_name(void); 927extern const char *perf_pmu_name(void);
899extern void __perf_event_task_sched_in(struct task_struct *task); 928extern void __perf_event_task_sched_in(struct task_struct *task);
900extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 929extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
901
902extern atomic_t perf_task_events;
903
904static inline void perf_event_task_sched_in(struct task_struct *task)
905{
906 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
907}
908
909static inline
910void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
911{
912 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
913}
914
915extern int perf_event_init_task(struct task_struct *child); 930extern int perf_event_init_task(struct task_struct *child);
916extern void perf_event_exit_task(struct task_struct *child); 931extern void perf_event_exit_task(struct task_struct *child);
917extern void perf_event_free_task(struct task_struct *task); 932extern void perf_event_free_task(struct task_struct *task);
@@ -972,6 +987,11 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
972 struct perf_sample_data *data, 987 struct perf_sample_data *data,
973 struct pt_regs *regs); 988 struct pt_regs *regs);
974 989
990static inline bool is_sampling_event(struct perf_event *event)
991{
992 return event->attr.sample_period != 0;
993}
994
975/* 995/*
976 * Return 1 for a software event, 0 for a hardware event 996 * Return 1 for a software event, 0 for a hardware event
977 */ 997 */
@@ -1020,6 +1040,21 @@ have_event:
1020 __perf_sw_event(event_id, nr, nmi, regs, addr); 1040 __perf_sw_event(event_id, nr, nmi, regs, addr);
1021} 1041}
1022 1042
1043extern atomic_t perf_task_events;
1044
1045static inline void perf_event_task_sched_in(struct task_struct *task)
1046{
1047 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1048}
1049
1050static inline
1051void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1052{
1053 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1054
1055 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1056}
1057
1023extern void perf_event_mmap(struct vm_area_struct *vma); 1058extern void perf_event_mmap(struct vm_area_struct *vma);
1024extern struct perf_guest_info_callbacks *perf_guest_cbs; 1059extern struct perf_guest_info_callbacks *perf_guest_cbs;
1025extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1060extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);