diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 95 |
1 files changed, 82 insertions, 13 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e6e41ca95463..506286e5ba63 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -37,15 +37,15 @@ static DEFINE_MUTEX(perf_resource_mutex); | |||
37 | /* | 37 | /* |
38 | * Architecture provided APIs - weak aliases: | 38 | * Architecture provided APIs - weak aliases: |
39 | */ | 39 | */ |
40 | extern __weak struct hw_perf_counter_ops * | 40 | extern __weak const struct hw_perf_counter_ops * |
41 | hw_perf_counter_init(struct perf_counter *counter) | 41 | hw_perf_counter_init(struct perf_counter *counter) |
42 | { | 42 | { |
43 | return ERR_PTR(-EINVAL); | 43 | return ERR_PTR(-EINVAL); |
44 | } | 44 | } |
45 | 45 | ||
46 | void __weak hw_perf_disable_all(void) { } | 46 | u64 __weak hw_perf_disable_all(void) { return 0; } |
47 | void __weak hw_perf_enable_all(void) { } | 47 | void __weak hw_perf_restore_ctrl(u64 ctrl) { } |
48 | void __weak hw_perf_counter_setup(void) { } | 48 | void __weak hw_perf_counter_setup(void) { } |
49 | 49 | ||
50 | #if BITS_PER_LONG == 64 | 50 | #if BITS_PER_LONG == 64 |
51 | 51 | ||
@@ -58,6 +58,16 @@ static inline u64 perf_counter_read_safe(struct perf_counter *counter) | |||
58 | return (u64) atomic64_read(&counter->count); | 58 | return (u64) atomic64_read(&counter->count); |
59 | } | 59 | } |
60 | 60 | ||
61 | void atomic64_counter_set(struct perf_counter *counter, u64 val) | ||
62 | { | ||
63 | atomic64_set(&counter->count, val); | ||
64 | } | ||
65 | |||
66 | u64 atomic64_counter_read(struct perf_counter *counter) | ||
67 | { | ||
68 | return atomic64_read(&counter->count); | ||
69 | } | ||
70 | |||
61 | #else | 71 | #else |
62 | 72 | ||
63 | /* | 73 | /* |
@@ -79,6 +89,20 @@ static u64 perf_counter_read_safe(struct perf_counter *counter) | |||
79 | return cntl | ((u64) cnth) << 32; | 89 | return cntl | ((u64) cnth) << 32; |
80 | } | 90 | } |
81 | 91 | ||
92 | void atomic64_counter_set(struct perf_counter *counter, u64 val64) | ||
93 | { | ||
94 | u32 *val32 = (void *)&val64; | ||
95 | |||
96 | atomic_set(counter->count32 + 0, *(val32 + 0)); | ||
97 | atomic_set(counter->count32 + 1, *(val32 + 1)); | ||
98 | } | ||
99 | |||
100 | u64 atomic64_counter_read(struct perf_counter *counter) | ||
101 | { | ||
102 | return atomic_read(counter->count32 + 0) | | ||
103 | (u64) atomic_read(counter->count32 + 1) << 32; | ||
104 | } | ||
105 | |||
82 | #endif | 106 | #endif |
83 | 107 | ||
84 | static void | 108 | static void |
@@ -131,6 +155,7 @@ static void __perf_counter_remove_from_context(void *info) | |||
131 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 155 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
132 | struct perf_counter *counter = info; | 156 | struct perf_counter *counter = info; |
133 | struct perf_counter_context *ctx = counter->ctx; | 157 | struct perf_counter_context *ctx = counter->ctx; |
158 | u64 perf_flags; | ||
134 | 159 | ||
135 | /* | 160 | /* |
136 | * If this is a task context, we need to check whether it is | 161 | * If this is a task context, we need to check whether it is |
@@ -155,9 +180,9 @@ static void __perf_counter_remove_from_context(void *info) | |||
155 | * Protect the list operation against NMI by disabling the | 180 | * Protect the list operation against NMI by disabling the |
156 | * counters on a global level. NOP for non NMI based counters. | 181 | * counters on a global level. NOP for non NMI based counters. |
157 | */ | 182 | */ |
158 | hw_perf_disable_all(); | 183 | perf_flags = hw_perf_disable_all(); |
159 | list_del_counter(counter, ctx); | 184 | list_del_counter(counter, ctx); |
160 | hw_perf_enable_all(); | 185 | hw_perf_restore_ctrl(perf_flags); |
161 | 186 | ||
162 | if (!ctx->task) { | 187 | if (!ctx->task) { |
163 | /* | 188 | /* |
@@ -232,6 +257,7 @@ static void __perf_install_in_context(void *info) | |||
232 | struct perf_counter *counter = info; | 257 | struct perf_counter *counter = info; |
233 | struct perf_counter_context *ctx = counter->ctx; | 258 | struct perf_counter_context *ctx = counter->ctx; |
234 | int cpu = smp_processor_id(); | 259 | int cpu = smp_processor_id(); |
260 | u64 perf_flags; | ||
235 | 261 | ||
236 | /* | 262 | /* |
237 | * If this is a task context, we need to check whether it is | 263 | * If this is a task context, we need to check whether it is |
@@ -247,9 +273,9 @@ static void __perf_install_in_context(void *info) | |||
247 | * Protect the list operation against NMI by disabling the | 273 | * Protect the list operation against NMI by disabling the |
248 | * counters on a global level. NOP for non NMI based counters. | 274 | * counters on a global level. NOP for non NMI based counters. |
249 | */ | 275 | */ |
250 | hw_perf_disable_all(); | 276 | perf_flags = hw_perf_disable_all(); |
251 | list_add_counter(counter, ctx); | 277 | list_add_counter(counter, ctx); |
252 | hw_perf_enable_all(); | 278 | hw_perf_restore_ctrl(perf_flags); |
253 | 279 | ||
254 | ctx->nr_counters++; | 280 | ctx->nr_counters++; |
255 | 281 | ||
@@ -457,6 +483,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
457 | { | 483 | { |
458 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | 484 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; |
459 | struct perf_counter *counter; | 485 | struct perf_counter *counter; |
486 | u64 perf_flags; | ||
460 | 487 | ||
461 | if (likely(!ctx->nr_counters)) | 488 | if (likely(!ctx->nr_counters)) |
462 | return; | 489 | return; |
@@ -468,13 +495,13 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
468 | /* | 495 | /* |
469 | * Rotate the first entry last (works just fine for group counters too): | 496 | * Rotate the first entry last (works just fine for group counters too): |
470 | */ | 497 | */ |
471 | hw_perf_disable_all(); | 498 | perf_flags = hw_perf_disable_all(); |
472 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | 499 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { |
473 | list_del(&counter->list_entry); | 500 | list_del(&counter->list_entry); |
474 | list_add_tail(&counter->list_entry, &ctx->counter_list); | 501 | list_add_tail(&counter->list_entry, &ctx->counter_list); |
475 | break; | 502 | break; |
476 | } | 503 | } |
477 | hw_perf_enable_all(); | 504 | hw_perf_restore_ctrl(perf_flags); |
478 | 505 | ||
479 | spin_unlock(&ctx->lock); | 506 | spin_unlock(&ctx->lock); |
480 | 507 | ||
@@ -807,6 +834,42 @@ static const struct file_operations perf_fops = { | |||
807 | .poll = perf_poll, | 834 | .poll = perf_poll, |
808 | }; | 835 | }; |
809 | 836 | ||
837 | static void cpu_clock_perf_counter_enable(struct perf_counter *counter) | ||
838 | { | ||
839 | } | ||
840 | |||
841 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | ||
842 | { | ||
843 | } | ||
844 | |||
845 | static void cpu_clock_perf_counter_read(struct perf_counter *counter) | ||
846 | { | ||
847 | int cpu = raw_smp_processor_id(); | ||
848 | |||
849 | atomic64_counter_set(counter, cpu_clock(cpu)); | ||
850 | } | ||
851 | |||
852 | static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | ||
853 | .hw_perf_counter_enable = cpu_clock_perf_counter_enable, | ||
854 | .hw_perf_counter_disable = cpu_clock_perf_counter_disable, | ||
855 | .hw_perf_counter_read = cpu_clock_perf_counter_read, | ||
856 | }; | ||
857 | |||
858 | static const struct hw_perf_counter_ops * | ||
859 | sw_perf_counter_init(struct perf_counter *counter) | ||
860 | { | ||
861 | const struct hw_perf_counter_ops *hw_ops = NULL; | ||
862 | |||
863 | switch (counter->hw_event.type) { | ||
864 | case PERF_COUNT_CPU_CLOCK: | ||
865 | hw_ops = &perf_ops_cpu_clock; | ||
866 | break; | ||
867 | default: | ||
868 | break; | ||
869 | } | ||
870 | return hw_ops; | ||
871 | } | ||
872 | |||
810 | /* | 873 | /* |
811 | * Allocate and initialize a counter structure | 874 | * Allocate and initialize a counter structure |
812 | */ | 875 | */ |
@@ -815,7 +878,7 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
815 | int cpu, | 878 | int cpu, |
816 | struct perf_counter *group_leader) | 879 | struct perf_counter *group_leader) |
817 | { | 880 | { |
818 | struct hw_perf_counter_ops *hw_ops; | 881 | const struct hw_perf_counter_ops *hw_ops; |
819 | struct perf_counter *counter; | 882 | struct perf_counter *counter; |
820 | 883 | ||
821 | counter = kzalloc(sizeof(*counter), GFP_KERNEL); | 884 | counter = kzalloc(sizeof(*counter), GFP_KERNEL); |
@@ -842,7 +905,13 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
842 | counter->group_leader = group_leader; | 905 | counter->group_leader = group_leader; |
843 | counter->hw_ops = NULL; | 906 | counter->hw_ops = NULL; |
844 | 907 | ||
845 | hw_ops = hw_perf_counter_init(counter); | 908 | hw_ops = NULL; |
909 | if (!hw_event->raw && hw_event->type < 0) | ||
910 | hw_ops = sw_perf_counter_init(counter); | ||
911 | if (!hw_ops) { | ||
912 | hw_ops = hw_perf_counter_init(counter); | ||
913 | } | ||
914 | |||
846 | if (!hw_ops) { | 915 | if (!hw_ops) { |
847 | kfree(counter); | 916 | kfree(counter); |
848 | return NULL; | 917 | return NULL; |
@@ -912,7 +981,7 @@ asmlinkage int sys_perf_counter_open( | |||
912 | goto err_put_context; | 981 | goto err_put_context; |
913 | } | 982 | } |
914 | 983 | ||
915 | ret = -ENOMEM; | 984 | ret = -EINVAL; |
916 | counter = perf_counter_alloc(&hw_event, cpu, group_leader); | 985 | counter = perf_counter_alloc(&hw_event, cpu, group_leader); |
917 | if (!counter) | 986 | if (!counter) |
918 | goto err_put_context; | 987 | goto err_put_context; |