diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 36 | ||||
-rw-r--r-- | include/linux/perf_counter.h | 9 | ||||
-rw-r--r-- | kernel/perf_counter.c | 95 |
3 files changed, 92 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 718b635dece6..43c8e9a38b4e 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -178,35 +178,6 @@ static void x86_perf_counter_enable(struct perf_counter *counter) | |||
178 | __x86_perf_counter_enable(hwc, idx); | 178 | __x86_perf_counter_enable(hwc, idx); |
179 | } | 179 | } |
180 | 180 | ||
181 | #ifdef CONFIG_X86_64 | ||
182 | static inline void atomic64_counter_set(struct perf_counter *counter, u64 val) | ||
183 | { | ||
184 | atomic64_set(&counter->count, val); | ||
185 | } | ||
186 | |||
187 | static inline u64 atomic64_counter_read(struct perf_counter *counter) | ||
188 | { | ||
189 | return atomic64_read(&counter->count); | ||
190 | } | ||
191 | #else | ||
192 | /* | ||
193 | * Todo: add proper atomic64_t support to 32-bit x86: | ||
194 | */ | ||
195 | static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64) | ||
196 | { | ||
197 | u32 *val32 = (void *)&val64; | ||
198 | |||
199 | atomic_set(counter->count32 + 0, *(val32 + 0)); | ||
200 | atomic_set(counter->count32 + 1, *(val32 + 1)); | ||
201 | } | ||
202 | |||
203 | static inline u64 atomic64_counter_read(struct perf_counter *counter) | ||
204 | { | ||
205 | return atomic_read(counter->count32 + 0) | | ||
206 | (u64) atomic_read(counter->count32 + 1) << 32; | ||
207 | } | ||
208 | #endif | ||
209 | |||
210 | static void __hw_perf_save_counter(struct perf_counter *counter, | 181 | static void __hw_perf_save_counter(struct perf_counter *counter, |
211 | struct hw_perf_counter *hwc, int idx) | 182 | struct hw_perf_counter *hwc, int idx) |
212 | { | 183 | { |
@@ -309,7 +280,7 @@ static void x86_perf_counter_read(struct perf_counter *counter) | |||
309 | } while (offs != hwc->prev_count); | 280 | } while (offs != hwc->prev_count); |
310 | 281 | ||
311 | val32 = (s32) val; | 282 | val32 = (s32) val; |
312 | val = (s64)hwc->irq_period + (s64)val32; | 283 | val = (s64)hwc->irq_period + (s64)val32; |
313 | atomic64_counter_set(counter, hwc->prev_count + val); | 284 | atomic64_counter_set(counter, hwc->prev_count + val); |
314 | } | 285 | } |
315 | 286 | ||
@@ -573,13 +544,14 @@ void __init init_hw_perf_counters(void) | |||
573 | perf_counters_initialized = true; | 544 | perf_counters_initialized = true; |
574 | } | 545 | } |
575 | 546 | ||
576 | static struct hw_perf_counter_ops x86_perf_counter_ops = { | 547 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
577 | .hw_perf_counter_enable = x86_perf_counter_enable, | 548 | .hw_perf_counter_enable = x86_perf_counter_enable, |
578 | .hw_perf_counter_disable = x86_perf_counter_disable, | 549 | .hw_perf_counter_disable = x86_perf_counter_disable, |
579 | .hw_perf_counter_read = x86_perf_counter_read, | 550 | .hw_perf_counter_read = x86_perf_counter_read, |
580 | }; | 551 | }; |
581 | 552 | ||
582 | struct hw_perf_counter_ops *hw_perf_counter_init(struct perf_counter *counter) | 553 | const struct hw_perf_counter_ops * |
554 | hw_perf_counter_init(struct perf_counter *counter) | ||
583 | { | 555 | { |
584 | int err; | 556 | int err; |
585 | 557 | ||
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 27385641ecb6..9a1713a1be27 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -131,7 +131,7 @@ struct perf_counter { | |||
131 | struct list_head list_entry; | 131 | struct list_head list_entry; |
132 | struct list_head sibling_list; | 132 | struct list_head sibling_list; |
133 | struct perf_counter *group_leader; | 133 | struct perf_counter *group_leader; |
134 | struct hw_perf_counter_ops *hw_ops; | 134 | const struct hw_perf_counter_ops *hw_ops; |
135 | 135 | ||
136 | int active; | 136 | int active; |
137 | #if BITS_PER_LONG == 64 | 137 | #if BITS_PER_LONG == 64 |
@@ -197,7 +197,7 @@ struct perf_cpu_context { | |||
197 | extern int perf_max_counters; | 197 | extern int perf_max_counters; |
198 | 198 | ||
199 | #ifdef CONFIG_PERF_COUNTERS | 199 | #ifdef CONFIG_PERF_COUNTERS |
200 | extern struct hw_perf_counter_ops * | 200 | extern const struct hw_perf_counter_ops * |
201 | hw_perf_counter_init(struct perf_counter *counter); | 201 | hw_perf_counter_init(struct perf_counter *counter); |
202 | 202 | ||
203 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | 203 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
@@ -208,6 +208,9 @@ extern void perf_counter_notify(struct pt_regs *regs); | |||
208 | extern void perf_counter_print_debug(void); | 208 | extern void perf_counter_print_debug(void); |
209 | extern void hw_perf_restore_ctrl(u64 ctrl); | 209 | extern void hw_perf_restore_ctrl(u64 ctrl); |
210 | extern u64 hw_perf_disable_all(void); | 210 | extern u64 hw_perf_disable_all(void); |
211 | extern void atomic64_counter_set(struct perf_counter *counter, u64 val64); | ||
212 | extern u64 atomic64_counter_read(struct perf_counter *counter); | ||
213 | |||
211 | #else | 214 | #else |
212 | static inline void | 215 | static inline void |
213 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | 216 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } |
@@ -219,7 +222,7 @@ static inline void perf_counter_init_task(struct task_struct *task) { } | |||
219 | static inline void perf_counter_notify(struct pt_regs *regs) { } | 222 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
220 | static inline void perf_counter_print_debug(void) { } | 223 | static inline void perf_counter_print_debug(void) { } |
221 | static inline void hw_perf_restore_ctrl(u64 ctrl) { } | 224 | static inline void hw_perf_restore_ctrl(u64 ctrl) { } |
222 | static inline u64 hw_perf_disable_all(void) { return 0; } | 225 | static inline u64 hw_perf_disable_all(void) { return 0; } |
223 | #endif | 226 | #endif |
224 | 227 | ||
225 | #endif /* _LINUX_PERF_COUNTER_H */ | 228 | #endif /* _LINUX_PERF_COUNTER_H */ |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e6e41ca95463..506286e5ba63 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -37,15 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex); | |||
37 | /* | 37 | /* |
38 | * Architecture provided APIs - weak aliases: | 38 | * Architecture provided APIs - weak aliases: |
39 | */ | 39 | */ |
40 | extern __weak struct hw_perf_counter_ops * | 40 | extern __weak const struct hw_perf_counter_ops * |
41 | hw_perf_counter_init(struct perf_counter *counter) | 41 | hw_perf_counter_init(struct perf_counter *counter) |
42 | { | 42 | { |
43 | return ERR_PTR(-EINVAL); | 43 | return ERR_PTR(-EINVAL); |
44 | } | 44 | } |
45 | 45 | ||
46 | void __weak hw_perf_disable_all(void) { } | 46 | u64 __weak hw_perf_disable_all(void) { return 0; } |
47 | void __weak hw_perf_enable_all(void) { } | 47 | void __weak hw_perf_restore_ctrl(u64 ctrl) { } |
48 | void __weak hw_perf_counter_setup(void) { } | 48 | void __weak hw_perf_counter_setup(void) { } |
49 | 49 | ||
50 | #if BITS_PER_LONG == 64 | 50 | #if BITS_PER_LONG == 64 |
51 | 51 | ||
@@ -58,6 +58,16 @@ static inline u64 perf_counter_read_safe(struct perf_counter *counter) | |||
58 | return (u64) atomic64_read(&counter->count); | 58 | return (u64) atomic64_read(&counter->count); |
59 | } | 59 | } |
60 | 60 | ||
61 | void atomic64_counter_set(struct perf_counter *counter, u64 val) | ||
62 | { | ||
63 | atomic64_set(&counter->count, val); | ||
64 | } | ||
65 | |||
66 | u64 atomic64_counter_read(struct perf_counter *counter) | ||
67 | { | ||
68 | return atomic64_read(&counter->count); | ||
69 | } | ||
70 | |||
61 | #else | 71 | #else |
62 | 72 | ||
63 | /* | 73 | /* |
@@ -79,6 +89,20 @@ static u64 perf_counter_read_safe(struct perf_counter *counter) | |||
79 | return cntl | ((u64) cnth) << 32; | 89 | return cntl | ((u64) cnth) << 32; |
80 | } | 90 | } |
81 | 91 | ||
92 | void atomic64_counter_set(struct perf_counter *counter, u64 val64) | ||
93 | { | ||
94 | u32 *val32 = (void *)&val64; | ||
95 | |||
96 | atomic_set(counter->count32 + 0, *(val32 + 0)); | ||
97 | atomic_set(counter->count32 + 1, *(val32 + 1)); | ||
98 | } | ||
99 | |||
100 | u64 atomic64_counter_read(struct perf_counter *counter) | ||
101 | { | ||
102 | return atomic_read(counter->count32 + 0) | | ||
103 | (u64) atomic_read(counter->count32 + 1) << 32; | ||
104 | } | ||
105 | |||
82 | #endif | 106 | #endif |
83 | 107 | ||
84 | static void | 108 | static void |
@@ -131,6 +155,7 @@ static void __perf_counter_remove_from_context(void *info) | |||
131 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 155 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
132 | struct perf_counter *counter = info; | 156 | struct perf_counter *counter = info; |
133 | struct perf_counter_context *ctx = counter->ctx; | 157 | struct perf_counter_context *ctx = counter->ctx; |
158 | u64 perf_flags; | ||
134 | 159 | ||
135 | /* | 160 | /* |
136 | * If this is a task context, we need to check whether it is | 161 | * If this is a task context, we need to check whether it is |
@@ -155,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) | |||
155 | * Protect the list operation against NMI by disabling the | 180 | * Protect the list operation against NMI by disabling the |
156 | * counters on a global level. NOP for non NMI based counters. | 181 | * counters on a global level. NOP for non NMI based counters. |
157 | */ | 182 | */ |
158 | hw_perf_disable_all(); | 183 | perf_flags = hw_perf_disable_all(); |
159 | list_del_counter(counter, ctx); | 184 | list_del_counter(counter, ctx); |
160 | hw_perf_enable_all(); | 185 | hw_perf_restore_ctrl(perf_flags); |
161 | 186 | ||
162 | if (!ctx->task) { | 187 | if (!ctx->task) { |
163 | /* | 188 | /* |
@@ -232,6 +257,7 @@ static void __perf_install_in_context(void *info) | |||
232 | struct perf_counter *counter = info; | 257 | struct perf_counter *counter = info; |
233 | struct perf_counter_context *ctx = counter->ctx; | 258 | struct perf_counter_context *ctx = counter->ctx; |
234 | int cpu = smp_processor_id(); | 259 | int cpu = smp_processor_id(); |
260 | u64 perf_flags; | ||
235 | 261 | ||
236 | /* | 262 | /* |
237 | * If this is a task context, we need to check whether it is | 263 | * If this is a task context, we need to check whether it is |
@@ -247,9 +273,9 @@ static void __perf_install_in_context(void *info) | |||
247 | * Protect the list operation against NMI by disabling the | 273 | * Protect the list operation against NMI by disabling the |
248 | * counters on a global level. NOP for non NMI based counters. | 274 | * counters on a global level. NOP for non NMI based counters. |
249 | */ | 275 | */ |
250 | hw_perf_disable_all(); | 276 | perf_flags = hw_perf_disable_all(); |
251 | list_add_counter(counter, ctx); | 277 | list_add_counter(counter, ctx); |
252 | hw_perf_enable_all(); | 278 | hw_perf_restore_ctrl(perf_flags); |
253 | 279 | ||
254 | ctx->nr_counters++; | 280 | ctx->nr_counters++; |
255 | 281 | ||
@@ -457,6 +483,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
457 | { | 483 | { |
458 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | 484 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; |
459 | struct perf_counter *counter; | 485 | struct perf_counter *counter; |
486 | u64 perf_flags; | ||
460 | 487 | ||
461 | if (likely(!ctx->nr_counters)) | 488 | if (likely(!ctx->nr_counters)) |
462 | return; | 489 | return; |
@@ -468,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
468 | /* | 495 | /* |
469 | * Rotate the first entry last (works just fine for group counters too): | 496 | * Rotate the first entry last (works just fine for group counters too): |
470 | */ | 497 | */ |
471 | hw_perf_disable_all(); | 498 | perf_flags = hw_perf_disable_all(); |
472 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 499 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { |
473 | list_del(&counter->list_entry); | 500 | list_del(&counter->list_entry); |
474 | list_add_tail(&counter->list_entry, &ctx->counter_list); | 501 | list_add_tail(&counter->list_entry, &ctx->counter_list); |
475 | break; | 502 | break; |
476 | } | 503 | } |
477 | hw_perf_enable_all(); | 504 | hw_perf_restore_ctrl(perf_flags); |
478 | 505 | ||
479 | spin_unlock(&ctx->lock); | 506 | spin_unlock(&ctx->lock); |
480 | 507 | ||
@@ -807,6 +834,42 @@ static const struct file_operations perf_fops = { | |||
807 | .poll = perf_poll, | 834 | .poll = perf_poll, |
808 | }; | 835 | }; |
809 | 836 | ||
837 | static void cpu_clock_perf_counter_enable(struct perf_counter *counter) | ||
838 | { | ||
839 | } | ||
840 | |||
841 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | ||
842 | { | ||
843 | } | ||
844 | |||
845 | static void cpu_clock_perf_counter_read(struct perf_counter *counter) | ||
846 | { | ||
847 | int cpu = raw_smp_processor_id(); | ||
848 | |||
849 | atomic64_counter_set(counter, cpu_clock(cpu)); | ||
850 | } | ||
851 | |||
852 | static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | ||
853 | .hw_perf_counter_enable = cpu_clock_perf_counter_enable, | ||
854 | .hw_perf_counter_disable = cpu_clock_perf_counter_disable, | ||
855 | .hw_perf_counter_read = cpu_clock_perf_counter_read, | ||
856 | }; | ||
857 | |||
858 | static const struct hw_perf_counter_ops * | ||
859 | sw_perf_counter_init(struct perf_counter *counter) | ||
860 | { | ||
861 | const struct hw_perf_counter_ops *hw_ops = NULL; | ||
862 | |||
863 | switch (counter->hw_event.type) { | ||
864 | case PERF_COUNT_CPU_CLOCK: | ||
865 | hw_ops = &perf_ops_cpu_clock; | ||
866 | break; | ||
867 | default: | ||
868 | break; | ||
869 | } | ||
870 | return hw_ops; | ||
871 | } | ||
872 | |||
810 | /* | 873 | /* |
811 | * Allocate and initialize a counter structure | 874 | * Allocate and initialize a counter structure |
812 | */ | 875 | */ |
@@ -815,7 +878,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
815 | int cpu, | 878 | int cpu, |
816 | struct perf_counter *group_leader) | 879 | struct perf_counter *group_leader) |
817 | { | 880 | { |
818 | struct hw_perf_counter_ops *hw_ops; | 881 | const struct hw_perf_counter_ops *hw_ops; |
819 | struct perf_counter *counter; | 882 | struct perf_counter *counter; |
820 | 883 | ||
821 | counter = kzalloc(sizeof(*counter), GFP_KERNEL); | 884 | counter = kzalloc(sizeof(*counter), GFP_KERNEL); |
@@ -842,7 +905,13 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
842 | counter->group_leader = group_leader; | 905 | counter->group_leader = group_leader; |
843 | counter->hw_ops = NULL; | 906 | counter->hw_ops = NULL; |
844 | 907 | ||
845 | hw_ops = hw_perf_counter_init(counter); | 908 | hw_ops = NULL; |
909 | if (!hw_event->raw && hw_event->type < 0) | ||
910 | hw_ops = sw_perf_counter_init(counter); | ||
911 | if (!hw_ops) { | ||
912 | hw_ops = hw_perf_counter_init(counter); | ||
913 | } | ||
914 | |||
846 | if (!hw_ops) { | 915 | if (!hw_ops) { |
847 | kfree(counter); | 916 | kfree(counter); |
848 | return NULL; | 917 | return NULL; |
@@ -912,7 +981,7 @@ asmlinkage int sys_perf_counter_open( | |||
912 | goto err_put_context; | 981 | goto err_put_context; |
913 | } | 982 | } |
914 | 983 | ||
915 | ret = -ENOMEM; | 984 | ret = -EINVAL; |
916 | counter = perf_counter_alloc(&hw_event, cpu, group_leader); | 985 | counter = perf_counter_alloc(&hw_event, cpu, group_leader); |
917 | if (!counter) | 986 | if (!counter) |
918 | goto err_put_context; | 987 | goto err_put_context; |