diff options
-rw-r--r-- | include/linux/perf_event.h | 96 | ||||
-rw-r--r-- | kernel/events/core.c | 40 |
2 files changed, 64 insertions, 72 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9eec53d97370..207c16976a17 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events: | 2 | * Performance events: |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
7 | * | 7 | * |
8 | * Data type definitions, declarations, prototypes. | 8 | * Data type definitions, declarations, prototypes. |
9 | * | 9 | * |
@@ -468,9 +468,9 @@ enum perf_callchain_context { | |||
468 | PERF_CONTEXT_MAX = (__u64)-4095, | 468 | PERF_CONTEXT_MAX = (__u64)-4095, |
469 | }; | 469 | }; |
470 | 470 | ||
471 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 471 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
472 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 472 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
473 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ | 473 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ |
474 | 474 | ||
475 | #ifdef __KERNEL__ | 475 | #ifdef __KERNEL__ |
476 | /* | 476 | /* |
@@ -484,9 +484,9 @@ enum perf_callchain_context { | |||
484 | #endif | 484 | #endif |
485 | 485 | ||
486 | struct perf_guest_info_callbacks { | 486 | struct perf_guest_info_callbacks { |
487 | int (*is_in_guest) (void); | 487 | int (*is_in_guest)(void); |
488 | int (*is_user_mode) (void); | 488 | int (*is_user_mode)(void); |
489 | unsigned long (*get_guest_ip) (void); | 489 | unsigned long (*get_guest_ip)(void); |
490 | }; | 490 | }; |
491 | 491 | ||
492 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 492 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
@@ -652,19 +652,19 @@ struct pmu { | |||
652 | * Start the transaction, after this ->add() doesn't need to | 652 | * Start the transaction, after this ->add() doesn't need to |
653 | * do schedulability tests. | 653 | * do schedulability tests. |
654 | */ | 654 | */ |
655 | void (*start_txn) (struct pmu *pmu); /* optional */ | 655 | void (*start_txn) (struct pmu *pmu); /* optional */ |
656 | /* | 656 | /* |
657 | * If ->start_txn() disabled the ->add() schedulability test | 657 | * If ->start_txn() disabled the ->add() schedulability test |
658 | * then ->commit_txn() is required to perform one. On success | 658 | * then ->commit_txn() is required to perform one. On success |
659 | * the transaction is closed. On error the transaction is kept | 659 | * the transaction is closed. On error the transaction is kept |
660 | * open until ->cancel_txn() is called. | 660 | * open until ->cancel_txn() is called. |
661 | */ | 661 | */ |
662 | int (*commit_txn) (struct pmu *pmu); /* optional */ | 662 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
663 | /* | 663 | /* |
664 | * Will cancel the transaction, assumes ->del() is called | 664 | * Will cancel the transaction, assumes ->del() is called |
665 | * for each successful ->add() during the transaction. | 665 | * for each successful ->add() during the transaction. |
666 | */ | 666 | */ |
667 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 667 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
668 | }; | 668 | }; |
669 | 669 | ||
670 | /** | 670 | /** |
@@ -712,15 +712,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
712 | struct pt_regs *regs); | 712 | struct pt_regs *regs); |
713 | 713 | ||
714 | enum perf_group_flag { | 714 | enum perf_group_flag { |
715 | PERF_GROUP_SOFTWARE = 0x1, | 715 | PERF_GROUP_SOFTWARE = 0x1, |
716 | }; | 716 | }; |
717 | 717 | ||
718 | #define SWEVENT_HLIST_BITS 8 | 718 | #define SWEVENT_HLIST_BITS 8 |
719 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | 719 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
720 | 720 | ||
721 | struct swevent_hlist { | 721 | struct swevent_hlist { |
722 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | 722 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
723 | struct rcu_head rcu_head; | 723 | struct rcu_head rcu_head; |
724 | }; | 724 | }; |
725 | 725 | ||
726 | #define PERF_ATTACH_CONTEXT 0x01 | 726 | #define PERF_ATTACH_CONTEXT 0x01 |
@@ -733,13 +733,13 @@ struct swevent_hlist { | |||
733 | * This is a per-cpu dynamically allocated data structure. | 733 | * This is a per-cpu dynamically allocated data structure. |
734 | */ | 734 | */ |
735 | struct perf_cgroup_info { | 735 | struct perf_cgroup_info { |
736 | u64 time; | 736 | u64 time; |
737 | u64 timestamp; | 737 | u64 timestamp; |
738 | }; | 738 | }; |
739 | 739 | ||
740 | struct perf_cgroup { | 740 | struct perf_cgroup { |
741 | struct cgroup_subsys_state css; | 741 | struct cgroup_subsys_state css; |
742 | struct perf_cgroup_info *info; /* timing info, one per cpu */ | 742 | struct perf_cgroup_info *info; /* timing info, one per cpu */ |
743 | }; | 743 | }; |
744 | #endif | 744 | #endif |
745 | 745 | ||
@@ -923,7 +923,7 @@ struct perf_event_context { | |||
923 | 923 | ||
924 | /* | 924 | /* |
925 | * Number of contexts where an event can trigger: | 925 | * Number of contexts where an event can trigger: |
926 | * task, softirq, hardirq, nmi. | 926 | * task, softirq, hardirq, nmi. |
927 | */ | 927 | */ |
928 | #define PERF_NR_CONTEXTS 4 | 928 | #define PERF_NR_CONTEXTS 4 |
929 | 929 | ||
@@ -1001,8 +1001,7 @@ struct perf_sample_data { | |||
1001 | struct perf_raw_record *raw; | 1001 | struct perf_raw_record *raw; |
1002 | }; | 1002 | }; |
1003 | 1003 | ||
1004 | static inline | 1004 | static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) |
1005 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
1006 | { | 1005 | { |
1007 | data->addr = addr; | 1006 | data->addr = addr; |
1008 | data->raw = NULL; | 1007 | data->raw = NULL; |
@@ -1039,8 +1038,7 @@ extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
1039 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1038 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
1040 | 1039 | ||
1041 | #ifndef perf_arch_fetch_caller_regs | 1040 | #ifndef perf_arch_fetch_caller_regs |
1042 | static inline void | 1041 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
1043 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | ||
1044 | #endif | 1042 | #endif |
1045 | 1043 | ||
1046 | /* | 1044 | /* |
@@ -1080,8 +1078,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task) | |||
1080 | __perf_event_task_sched_in(task); | 1078 | __perf_event_task_sched_in(task); |
1081 | } | 1079 | } |
1082 | 1080 | ||
1083 | static inline | 1081 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1084 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
1085 | { | 1082 | { |
1086 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | 1083 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1087 | 1084 | ||
@@ -1099,14 +1096,10 @@ extern void perf_event_fork(struct task_struct *tsk); | |||
1099 | /* Callchains */ | 1096 | /* Callchains */ |
1100 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | 1097 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
1101 | 1098 | ||
1102 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | 1099 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1103 | struct pt_regs *regs); | 1100 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); |
1104 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1105 | struct pt_regs *regs); | ||
1106 | |||
1107 | 1101 | ||
1108 | static inline void | 1102 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) |
1109 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1110 | { | 1103 | { |
1111 | if (entry->nr < PERF_MAX_STACK_DEPTH) | 1104 | if (entry->nr < PERF_MAX_STACK_DEPTH) |
1112 | entry->ip[entry->nr++] = ip; | 1105 | entry->ip[entry->nr++] = ip; |
@@ -1142,9 +1135,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, | |||
1142 | extern void perf_bp_event(struct perf_event *event, void *data); | 1135 | extern void perf_bp_event(struct perf_event *event, void *data); |
1143 | 1136 | ||
1144 | #ifndef perf_misc_flags | 1137 | #ifndef perf_misc_flags |
1145 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 1138 | # define perf_misc_flags(regs) \ |
1146 | PERF_RECORD_MISC_KERNEL) | 1139 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
1147 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | 1140 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
1148 | #endif | 1141 | #endif |
1149 | 1142 | ||
1150 | extern int perf_output_begin(struct perf_output_handle *handle, | 1143 | extern int perf_output_begin(struct perf_output_handle *handle, |
@@ -1179,9 +1172,9 @@ static inline void | |||
1179 | perf_bp_event(struct perf_event *event, void *data) { } | 1172 | perf_bp_event(struct perf_event *event, void *data) { } |
1180 | 1173 | ||
1181 | static inline int perf_register_guest_info_callbacks | 1174 | static inline int perf_register_guest_info_callbacks |
1182 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1175 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1183 | static inline int perf_unregister_guest_info_callbacks | 1176 | static inline int perf_unregister_guest_info_callbacks |
1184 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1177 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1185 | 1178 | ||
1186 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1179 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
1187 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1180 | static inline void perf_event_comm(struct task_struct *tsk) { } |
@@ -1194,23 +1187,22 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
1194 | static inline void perf_event_task_tick(void) { } | 1187 | static inline void perf_event_task_tick(void) { } |
1195 | #endif | 1188 | #endif |
1196 | 1189 | ||
1197 | #define perf_output_put(handle, x) \ | 1190 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
1198 | perf_output_copy((handle), &(x), sizeof(x)) | ||
1199 | 1191 | ||
1200 | /* | 1192 | /* |
1201 | * This has to have a higher priority than migration_notifier in sched.c. | 1193 | * This has to have a higher priority than migration_notifier in sched.c. |
1202 | */ | 1194 | */ |
1203 | #define perf_cpu_notifier(fn) \ | 1195 | #define perf_cpu_notifier(fn) \ |
1204 | do { \ | 1196 | do { \ |
1205 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1197 | static struct notifier_block fn##_nb __cpuinitdata = \ |
1206 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | 1198 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
1207 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1199 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
1208 | (void *)(unsigned long)smp_processor_id()); \ | 1200 | (void *)(unsigned long)smp_processor_id()); \ |
1209 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1201 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
1210 | (void *)(unsigned long)smp_processor_id()); \ | 1202 | (void *)(unsigned long)smp_processor_id()); \ |
1211 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | 1203 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
1212 | (void *)(unsigned long)smp_processor_id()); \ | 1204 | (void *)(unsigned long)smp_processor_id()); \ |
1213 | register_cpu_notifier(&fn##_nb); \ | 1205 | register_cpu_notifier(&fn##_nb); \ |
1214 | } while (0) | 1206 | } while (0) |
1215 | 1207 | ||
1216 | #endif /* __KERNEL__ */ | 1208 | #endif /* __KERNEL__ */ |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 440bc485bbff..0fc34a370ba4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events core code: | 2 | * Performance events core code: |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
8 | * | 8 | * |
9 | * For licensing details see kernel-base/COPYING | 9 | * For licensing details see kernel-base/COPYING |
@@ -39,10 +39,10 @@ | |||
39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
40 | 40 | ||
41 | struct remote_function_call { | 41 | struct remote_function_call { |
42 | struct task_struct *p; | 42 | struct task_struct *p; |
43 | int (*func)(void *info); | 43 | int (*func)(void *info); |
44 | void *info; | 44 | void *info; |
45 | int ret; | 45 | int ret; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static void remote_function(void *data) | 48 | static void remote_function(void *data) |
@@ -76,10 +76,10 @@ static int | |||
76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | 76 | task_function_call(struct task_struct *p, int (*func) (void *info), void *info) |
77 | { | 77 | { |
78 | struct remote_function_call data = { | 78 | struct remote_function_call data = { |
79 | .p = p, | 79 | .p = p, |
80 | .func = func, | 80 | .func = func, |
81 | .info = info, | 81 | .info = info, |
82 | .ret = -ESRCH, /* No such (running) process */ | 82 | .ret = -ESRCH, /* No such (running) process */ |
83 | }; | 83 | }; |
84 | 84 | ||
85 | if (task_curr(p)) | 85 | if (task_curr(p)) |
@@ -100,10 +100,10 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info) | |||
100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) | 100 | static int cpu_function_call(int cpu, int (*func) (void *info), void *info) |
101 | { | 101 | { |
102 | struct remote_function_call data = { | 102 | struct remote_function_call data = { |
103 | .p = NULL, | 103 | .p = NULL, |
104 | .func = func, | 104 | .func = func, |
105 | .info = info, | 105 | .info = info, |
106 | .ret = -ENXIO, /* No such CPU */ | 106 | .ret = -ENXIO, /* No such CPU */ |
107 | }; | 107 | }; |
108 | 108 | ||
109 | smp_call_function_single(cpu, remote_function, &data, 1); | 109 | smp_call_function_single(cpu, remote_function, &data, 1); |
@@ -7445,11 +7445,11 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7445 | } | 7445 | } |
7446 | 7446 | ||
7447 | struct cgroup_subsys perf_subsys = { | 7447 | struct cgroup_subsys perf_subsys = { |
7448 | .name = "perf_event", | 7448 | .name = "perf_event", |
7449 | .subsys_id = perf_subsys_id, | 7449 | .subsys_id = perf_subsys_id, |
7450 | .create = perf_cgroup_create, | 7450 | .create = perf_cgroup_create, |
7451 | .destroy = perf_cgroup_destroy, | 7451 | .destroy = perf_cgroup_destroy, |
7452 | .exit = perf_cgroup_exit, | 7452 | .exit = perf_cgroup_exit, |
7453 | .attach = perf_cgroup_attach, | 7453 | .attach = perf_cgroup_attach, |
7454 | }; | 7454 | }; |
7455 | #endif /* CONFIG_CGROUP_PERF */ | 7455 | #endif /* CONFIG_CGROUP_PERF */ |