diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 125 |
1 files changed, 59 insertions, 66 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 311b4dc785a1..3412684ce5d5 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events: | 2 | * Performance events: |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
7 | * | 7 | * |
8 | * Data type definitions, declarations, prototypes. | 8 | * Data type definitions, declarations, prototypes. |
9 | * | 9 | * |
@@ -52,6 +52,8 @@ enum perf_hw_id { | |||
52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
53 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 53 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
54 | PERF_COUNT_HW_BUS_CYCLES = 6, | 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, | ||
56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, | ||
55 | 57 | ||
56 | PERF_COUNT_HW_MAX, /* non-ABI */ | 58 | PERF_COUNT_HW_MAX, /* non-ABI */ |
57 | }; | 59 | }; |
@@ -468,9 +470,9 @@ enum perf_callchain_context { | |||
468 | PERF_CONTEXT_MAX = (__u64)-4095, | 470 | PERF_CONTEXT_MAX = (__u64)-4095, |
469 | }; | 471 | }; |
470 | 472 | ||
471 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 473 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
472 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 474 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
473 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ | 475 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ |
474 | 476 | ||
475 | #ifdef __KERNEL__ | 477 | #ifdef __KERNEL__ |
476 | /* | 478 | /* |
@@ -484,9 +486,9 @@ enum perf_callchain_context { | |||
484 | #endif | 486 | #endif |
485 | 487 | ||
486 | struct perf_guest_info_callbacks { | 488 | struct perf_guest_info_callbacks { |
487 | int (*is_in_guest) (void); | 489 | int (*is_in_guest)(void); |
488 | int (*is_user_mode) (void); | 490 | int (*is_user_mode)(void); |
489 | unsigned long (*get_guest_ip) (void); | 491 | unsigned long (*get_guest_ip)(void); |
490 | }; | 492 | }; |
491 | 493 | ||
492 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 494 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
@@ -505,7 +507,7 @@ struct perf_guest_info_callbacks { | |||
505 | #include <linux/ftrace.h> | 507 | #include <linux/ftrace.h> |
506 | #include <linux/cpu.h> | 508 | #include <linux/cpu.h> |
507 | #include <linux/irq_work.h> | 509 | #include <linux/irq_work.h> |
508 | #include <linux/jump_label_ref.h> | 510 | #include <linux/jump_label.h> |
509 | #include <asm/atomic.h> | 511 | #include <asm/atomic.h> |
510 | #include <asm/local.h> | 512 | #include <asm/local.h> |
511 | 513 | ||
@@ -652,19 +654,19 @@ struct pmu { | |||
652 | * Start the transaction, after this ->add() doesn't need to | 654 | * Start the transaction, after this ->add() doesn't need to |
653 | * do schedulability tests. | 655 | * do schedulability tests. |
654 | */ | 656 | */ |
655 | void (*start_txn) (struct pmu *pmu); /* optional */ | 657 | void (*start_txn) (struct pmu *pmu); /* optional */ |
656 | /* | 658 | /* |
657 | * If ->start_txn() disabled the ->add() schedulability test | 659 | * If ->start_txn() disabled the ->add() schedulability test |
658 | * then ->commit_txn() is required to perform one. On success | 660 | * then ->commit_txn() is required to perform one. On success |
659 | * the transaction is closed. On error the transaction is kept | 661 | * the transaction is closed. On error the transaction is kept |
660 | * open until ->cancel_txn() is called. | 662 | * open until ->cancel_txn() is called. |
661 | */ | 663 | */ |
662 | int (*commit_txn) (struct pmu *pmu); /* optional */ | 664 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
663 | /* | 665 | /* |
664 | * Will cancel the transaction, assumes ->del() is called | 666 | * Will cancel the transaction, assumes ->del() is called |
665 | * for each successfull ->add() during the transaction. | 667 | * for each successful ->add() during the transaction. |
666 | */ | 668 | */ |
667 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 669 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
668 | }; | 670 | }; |
669 | 671 | ||
670 | /** | 672 | /** |
@@ -712,15 +714,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
712 | struct pt_regs *regs); | 714 | struct pt_regs *regs); |
713 | 715 | ||
714 | enum perf_group_flag { | 716 | enum perf_group_flag { |
715 | PERF_GROUP_SOFTWARE = 0x1, | 717 | PERF_GROUP_SOFTWARE = 0x1, |
716 | }; | 718 | }; |
717 | 719 | ||
718 | #define SWEVENT_HLIST_BITS 8 | 720 | #define SWEVENT_HLIST_BITS 8 |
719 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | 721 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
720 | 722 | ||
721 | struct swevent_hlist { | 723 | struct swevent_hlist { |
722 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | 724 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
723 | struct rcu_head rcu_head; | 725 | struct rcu_head rcu_head; |
724 | }; | 726 | }; |
725 | 727 | ||
726 | #define PERF_ATTACH_CONTEXT 0x01 | 728 | #define PERF_ATTACH_CONTEXT 0x01 |
@@ -733,13 +735,13 @@ struct swevent_hlist { | |||
733 | * This is a per-cpu dynamically allocated data structure. | 735 | * This is a per-cpu dynamically allocated data structure. |
734 | */ | 736 | */ |
735 | struct perf_cgroup_info { | 737 | struct perf_cgroup_info { |
736 | u64 time; | 738 | u64 time; |
737 | u64 timestamp; | 739 | u64 timestamp; |
738 | }; | 740 | }; |
739 | 741 | ||
740 | struct perf_cgroup { | 742 | struct perf_cgroup { |
741 | struct cgroup_subsys_state css; | 743 | struct cgroup_subsys_state css; |
742 | struct perf_cgroup_info *info; /* timing info, one per cpu */ | 744 | struct perf_cgroup_info *info; /* timing info, one per cpu */ |
743 | }; | 745 | }; |
744 | #endif | 746 | #endif |
745 | 747 | ||
@@ -923,7 +925,7 @@ struct perf_event_context { | |||
923 | 925 | ||
924 | /* | 926 | /* |
925 | * Number of contexts where an event can trigger: | 927 | * Number of contexts where an event can trigger: |
926 | * task, softirq, hardirq, nmi. | 928 | * task, softirq, hardirq, nmi. |
927 | */ | 929 | */ |
928 | #define PERF_NR_CONTEXTS 4 | 930 | #define PERF_NR_CONTEXTS 4 |
929 | 931 | ||
@@ -1001,8 +1003,7 @@ struct perf_sample_data { | |||
1001 | struct perf_raw_record *raw; | 1003 | struct perf_raw_record *raw; |
1002 | }; | 1004 | }; |
1003 | 1005 | ||
1004 | static inline | 1006 | static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) |
1005 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
1006 | { | 1007 | { |
1007 | data->addr = addr; | 1008 | data->addr = addr; |
1008 | data->raw = NULL; | 1009 | data->raw = NULL; |
@@ -1034,13 +1035,12 @@ static inline int is_software_event(struct perf_event *event) | |||
1034 | return event->pmu->task_ctx_nr == perf_sw_context; | 1035 | return event->pmu->task_ctx_nr == perf_sw_context; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1038 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1038 | 1039 | ||
1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1040 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
1040 | 1041 | ||
1041 | #ifndef perf_arch_fetch_caller_regs | 1042 | #ifndef perf_arch_fetch_caller_regs |
1042 | static inline void | 1043 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
1043 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | ||
1044 | #endif | 1044 | #endif |
1045 | 1045 | ||
1046 | /* | 1046 | /* |
@@ -1063,30 +1063,28 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
1063 | { | 1063 | { |
1064 | struct pt_regs hot_regs; | 1064 | struct pt_regs hot_regs; |
1065 | 1065 | ||
1066 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | 1066 | if (static_branch(&perf_swevent_enabled[event_id])) { |
1067 | return; | 1067 | if (!regs) { |
1068 | 1068 | perf_fetch_caller_regs(&hot_regs); | |
1069 | have_event: | 1069 | regs = &hot_regs; |
1070 | if (!regs) { | 1070 | } |
1071 | perf_fetch_caller_regs(&hot_regs); | 1071 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
1072 | regs = &hot_regs; | ||
1073 | } | 1072 | } |
1074 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
1075 | } | 1073 | } |
1076 | 1074 | ||
1077 | extern atomic_t perf_sched_events; | 1075 | extern struct jump_label_key perf_sched_events; |
1078 | 1076 | ||
1079 | static inline void perf_event_task_sched_in(struct task_struct *task) | 1077 | static inline void perf_event_task_sched_in(struct task_struct *task) |
1080 | { | 1078 | { |
1081 | COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); | 1079 | if (static_branch(&perf_sched_events)) |
1080 | __perf_event_task_sched_in(task); | ||
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | static inline | 1083 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1085 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
1086 | { | 1084 | { |
1087 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1085 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1088 | 1086 | ||
1089 | COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); | 1087 | __perf_event_task_sched_out(task, next); |
1090 | } | 1088 | } |
1091 | 1089 | ||
1092 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1090 | extern void perf_event_mmap(struct vm_area_struct *vma); |
@@ -1100,14 +1098,10 @@ extern void perf_event_fork(struct task_struct *tsk); | |||
1100 | /* Callchains */ | 1098 | /* Callchains */ |
1101 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | 1099 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
1102 | 1100 | ||
1103 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | 1101 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1104 | struct pt_regs *regs); | 1102 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1105 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1106 | struct pt_regs *regs); | ||
1107 | |||
1108 | 1103 | ||
1109 | static inline void | 1104 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
1110 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1111 | { | 1105 | { |
1112 | if (entry->nr < PERF_MAX_STACK_DEPTH) | 1106 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
1113 | entry->ip[entry->nr++] = ip; | 1107 | entry->ip[entry->nr++] = ip; |
@@ -1143,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, | |||
1143 | extern void perf_bp_event(struct perf_event *event, void *data); | 1137 | extern void perf_bp_event(struct perf_event *event, void *data); |
1144 | 1138 | ||
1145 | #ifndef perf_misc_flags | 1139 | #ifndef perf_misc_flags |
1146 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 1140 | # define perf_misc_flags(regs) \ |
1147 | PERF_RECORD_MISC_KERNEL) | 1141 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
1148 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | 1142 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
1149 | #endif | 1143 | #endif |
1150 | 1144 | ||
1151 | extern int perf_output_begin(struct perf_output_handle *handle, | 1145 | extern int perf_output_begin(struct perf_output_handle *handle, |
@@ -1180,9 +1174,9 @@ static inline void | |||
1180 | perf_bp_event(struct perf_event *event, void *data) { } | 1174 | perf_bp_event(struct perf_event *event, void *data) { } |
1181 | 1175 | ||
1182 | static inline int perf_register_guest_info_callbacks | 1176 | static inline int perf_register_guest_info_callbacks |
1183 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1177 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1184 | static inline int perf_unregister_guest_info_callbacks | 1178 | static inline int perf_unregister_guest_info_callbacks |
1185 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1179 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1186 | 1180 | ||
1187 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1181 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
1188 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1182 | static inline void perf_event_comm(struct task_struct *tsk) { } |
@@ -1195,23 +1189,22 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
1195 | static inline void perf_event_task_tick(void) { } | 1189 | static inline void perf_event_task_tick(void) { } |
1196 | #endif | 1190 | #endif |
1197 | 1191 | ||
1198 | #define perf_output_put(handle, x) \ | 1192 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
1199 | perf_output_copy((handle), &(x), sizeof(x)) | ||
1200 | 1193 | ||
1201 | /* | 1194 | /* |
1202 | * This has to have a higher priority than migration_notifier in sched.c. | 1195 | * This has to have a higher priority than migration_notifier in sched.c. |
1203 | */ | 1196 | */ |
1204 | #define perf_cpu_notifier(fn) \ | 1197 | #define perf_cpu_notifier(fn) \ |
1205 | do { \ | 1198 | do { \ |
1206 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1199 | static struct notifier_block fn##_nb __cpuinitdata = \ |
1207 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | 1200 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
1208 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1201 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
1209 | (void *)(unsigned long)smp_processor_id()); \ | 1202 | (void *)(unsigned long)smp_processor_id()); \ |
1210 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1203 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
1211 | (void *)(unsigned long)smp_processor_id()); \ | 1204 | (void *)(unsigned long)smp_processor_id()); \ |
1212 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | 1205 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
1213 | (void *)(unsigned long)smp_processor_id()); \ | 1206 | (void *)(unsigned long)smp_processor_id()); \ |
1214 | register_cpu_notifier(&fn##_nb); \ | 1207 | register_cpu_notifier(&fn##_nb); \ |
1215 | } while (0) | 1208 | } while (0) |
1216 | 1209 | ||
1217 | #endif /* __KERNEL__ */ | 1210 | #endif /* __KERNEL__ */ |