diff options
-rw-r--r-- | arch/arm/oprofile/Makefile | 4 | ||||
-rw-r--r-- | arch/arm/oprofile/common.c | 319 | ||||
-rw-r--r-- | drivers/oprofile/oprofile_perf.c | 326 | ||||
-rw-r--r-- | include/linux/oprofile.h | 3 |
4 files changed, 333 insertions, 319 deletions
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile index e666eafed152..b2215c61cdf0 100644 --- a/arch/arm/oprofile/Makefile +++ b/arch/arm/oprofile/Makefile | |||
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | |||
6 | oprofilefs.o oprofile_stats.o \ | 6 | oprofilefs.o oprofile_stats.o \ |
7 | timer_int.o ) | 7 | timer_int.o ) |
8 | 8 | ||
9 | ifeq ($(CONFIG_HW_PERF_EVENTS),y) | ||
10 | DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o) | ||
11 | endif | ||
12 | |||
9 | oprofile-y := $(DRIVER_OBJS) common.o | 13 | oprofile-y := $(DRIVER_OBJS) common.o |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 8718311cb530..8aa974491dfc 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -25,136 +25,6 @@ | |||
25 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
26 | 26 | ||
27 | #ifdef CONFIG_HW_PERF_EVENTS | 27 | #ifdef CONFIG_HW_PERF_EVENTS |
28 | /* | ||
29 | * Per performance monitor configuration as set via oprofilefs. | ||
30 | */ | ||
31 | struct op_counter_config { | ||
32 | unsigned long count; | ||
33 | unsigned long enabled; | ||
34 | unsigned long event; | ||
35 | unsigned long unit_mask; | ||
36 | unsigned long kernel; | ||
37 | unsigned long user; | ||
38 | struct perf_event_attr attr; | ||
39 | }; | ||
40 | |||
41 | static int oprofile_perf_enabled; | ||
42 | static DEFINE_MUTEX(oprofile_perf_mutex); | ||
43 | |||
44 | static struct op_counter_config *counter_config; | ||
45 | static struct perf_event **perf_events[nr_cpumask_bits]; | ||
46 | static int num_counters; | ||
47 | |||
48 | /* | ||
49 | * Overflow callback for oprofile. | ||
50 | */ | ||
51 | static void op_overflow_handler(struct perf_event *event, int unused, | ||
52 | struct perf_sample_data *data, struct pt_regs *regs) | ||
53 | { | ||
54 | int id; | ||
55 | u32 cpu = smp_processor_id(); | ||
56 | |||
57 | for (id = 0; id < num_counters; ++id) | ||
58 | if (perf_events[cpu][id] == event) | ||
59 | break; | ||
60 | |||
61 | if (id != num_counters) | ||
62 | oprofile_add_sample(regs, id); | ||
63 | else | ||
64 | pr_warning("oprofile: ignoring spurious overflow " | ||
65 | "on cpu %u\n", cpu); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile | ||
70 | * settings in counter_config. Attributes are created as `pinned' events and | ||
71 | * so are permanently scheduled on the PMU. | ||
72 | */ | ||
73 | static void op_perf_setup(void) | ||
74 | { | ||
75 | int i; | ||
76 | u32 size = sizeof(struct perf_event_attr); | ||
77 | struct perf_event_attr *attr; | ||
78 | |||
79 | for (i = 0; i < num_counters; ++i) { | ||
80 | attr = &counter_config[i].attr; | ||
81 | memset(attr, 0, size); | ||
82 | attr->type = PERF_TYPE_RAW; | ||
83 | attr->size = size; | ||
84 | attr->config = counter_config[i].event; | ||
85 | attr->sample_period = counter_config[i].count; | ||
86 | attr->pinned = 1; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static int op_create_counter(int cpu, int event) | ||
91 | { | ||
92 | int ret = 0; | ||
93 | struct perf_event *pevent; | ||
94 | |||
95 | if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL)) | ||
96 | return ret; | ||
97 | |||
98 | pevent = perf_event_create_kernel_counter(&counter_config[event].attr, | ||
99 | cpu, -1, | ||
100 | op_overflow_handler); | ||
101 | |||
102 | if (IS_ERR(pevent)) { | ||
103 | ret = PTR_ERR(pevent); | ||
104 | } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { | ||
105 | pr_warning("oprofile: failed to enable event %d " | ||
106 | "on CPU %d\n", event, cpu); | ||
107 | ret = -EBUSY; | ||
108 | } else { | ||
109 | perf_events[cpu][event] = pevent; | ||
110 | } | ||
111 | |||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | static void op_destroy_counter(int cpu, int event) | ||
116 | { | ||
117 | struct perf_event *pevent = perf_events[cpu][event]; | ||
118 | |||
119 | if (pevent) { | ||
120 | perf_event_release_kernel(pevent); | ||
121 | perf_events[cpu][event] = NULL; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Called by oprofile_perf_start to create active perf events based on the | ||
127 | * perviously configured attributes. | ||
128 | */ | ||
129 | static int op_perf_start(void) | ||
130 | { | ||
131 | int cpu, event, ret = 0; | ||
132 | |||
133 | for_each_online_cpu(cpu) { | ||
134 | for (event = 0; event < num_counters; ++event) { | ||
135 | ret = op_create_counter(cpu, event); | ||
136 | if (ret) | ||
137 | goto out; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | out: | ||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Called by oprofile_perf_stop at the end of a profiling run. | ||
147 | */ | ||
148 | static void op_perf_stop(void) | ||
149 | { | ||
150 | int cpu, event; | ||
151 | |||
152 | for_each_online_cpu(cpu) | ||
153 | for (event = 0; event < num_counters; ++event) | ||
154 | op_destroy_counter(cpu, event); | ||
155 | } | ||
156 | |||
157 | |||
158 | char *op_name_from_perf_id(void) | 28 | char *op_name_from_perf_id(void) |
159 | { | 29 | { |
160 | enum arm_perf_pmu_ids id = armpmu_get_pmu_id(); | 30 | enum arm_perf_pmu_ids id = armpmu_get_pmu_id(); |
@@ -177,116 +47,6 @@ char *op_name_from_perf_id(void) | |||
177 | } | 47 | } |
178 | } | 48 | } |
179 | 49 | ||
180 | static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root) | ||
181 | { | ||
182 | unsigned int i; | ||
183 | |||
184 | for (i = 0; i < num_counters; i++) { | ||
185 | struct dentry *dir; | ||
186 | char buf[4]; | ||
187 | |||
188 | snprintf(buf, sizeof buf, "%d", i); | ||
189 | dir = oprofilefs_mkdir(sb, root, buf); | ||
190 | oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); | ||
191 | oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); | ||
192 | oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); | ||
193 | oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); | ||
194 | oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); | ||
195 | oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); | ||
196 | } | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int oprofile_perf_setup(void) | ||
202 | { | ||
203 | spin_lock(&oprofilefs_lock); | ||
204 | op_perf_setup(); | ||
205 | spin_unlock(&oprofilefs_lock); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int oprofile_perf_start(void) | ||
210 | { | ||
211 | int ret = -EBUSY; | ||
212 | |||
213 | mutex_lock(&oprofile_perf_mutex); | ||
214 | if (!oprofile_perf_enabled) { | ||
215 | ret = 0; | ||
216 | op_perf_start(); | ||
217 | oprofile_perf_enabled = 1; | ||
218 | } | ||
219 | mutex_unlock(&oprofile_perf_mutex); | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | static void oprofile_perf_stop(void) | ||
224 | { | ||
225 | mutex_lock(&oprofile_perf_mutex); | ||
226 | if (oprofile_perf_enabled) | ||
227 | op_perf_stop(); | ||
228 | oprofile_perf_enabled = 0; | ||
229 | mutex_unlock(&oprofile_perf_mutex); | ||
230 | } | ||
231 | |||
232 | #ifdef CONFIG_PM | ||
233 | static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state) | ||
234 | { | ||
235 | mutex_lock(&oprofile_perf_mutex); | ||
236 | if (oprofile_perf_enabled) | ||
237 | op_perf_stop(); | ||
238 | mutex_unlock(&oprofile_perf_mutex); | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int oprofile_perf_resume(struct platform_device *dev) | ||
243 | { | ||
244 | mutex_lock(&oprofile_perf_mutex); | ||
245 | if (oprofile_perf_enabled && op_perf_start()) | ||
246 | oprofile_perf_enabled = 0; | ||
247 | mutex_unlock(&oprofile_perf_mutex); | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static struct platform_driver oprofile_driver = { | ||
252 | .driver = { | ||
253 | .name = "oprofile-perf", | ||
254 | }, | ||
255 | .resume = oprofile_perf_resume, | ||
256 | .suspend = oprofile_perf_suspend, | ||
257 | }; | ||
258 | |||
259 | static struct platform_device *oprofile_pdev; | ||
260 | |||
261 | static int __init init_driverfs(void) | ||
262 | { | ||
263 | int ret; | ||
264 | |||
265 | ret = platform_driver_register(&oprofile_driver); | ||
266 | if (ret) | ||
267 | goto out; | ||
268 | |||
269 | oprofile_pdev = platform_device_register_simple( | ||
270 | oprofile_driver.driver.name, 0, NULL, 0); | ||
271 | if (IS_ERR(oprofile_pdev)) { | ||
272 | ret = PTR_ERR(oprofile_pdev); | ||
273 | platform_driver_unregister(&oprofile_driver); | ||
274 | } | ||
275 | |||
276 | out: | ||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | static void __exit exit_driverfs(void) | ||
281 | { | ||
282 | platform_device_unregister(oprofile_pdev); | ||
283 | platform_driver_unregister(&oprofile_driver); | ||
284 | } | ||
285 | #else | ||
286 | static int __init init_driverfs(void) { return 0; } | ||
287 | #define exit_driverfs() do { } while (0) | ||
288 | #endif /* CONFIG_PM */ | ||
289 | |||
290 | static int report_trace(struct stackframe *frame, void *d) | 50 | static int report_trace(struct stackframe *frame, void *d) |
291 | { | 51 | { |
292 | unsigned int *depth = d; | 52 | unsigned int *depth = d; |
@@ -349,66 +109,6 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
349 | tail = user_backtrace(tail); | 109 | tail = user_backtrace(tail); |
350 | } | 110 | } |
351 | 111 | ||
352 | int __init oprofile_perf_init(struct oprofile_operations *ops) | ||
353 | { | ||
354 | int cpu, ret = 0; | ||
355 | |||
356 | memset(&perf_events, 0, sizeof(perf_events)); | ||
357 | |||
358 | num_counters = perf_num_counters(); | ||
359 | if (num_counters <= 0) { | ||
360 | pr_info("oprofile: no performance counters\n"); | ||
361 | ret = -ENODEV; | ||
362 | goto out; | ||
363 | } | ||
364 | |||
365 | counter_config = kcalloc(num_counters, | ||
366 | sizeof(struct op_counter_config), GFP_KERNEL); | ||
367 | |||
368 | if (!counter_config) { | ||
369 | pr_info("oprofile: failed to allocate %d " | ||
370 | "counters\n", num_counters); | ||
371 | ret = -ENOMEM; | ||
372 | goto out; | ||
373 | } | ||
374 | |||
375 | ret = init_driverfs(); | ||
376 | if (ret) | ||
377 | goto out; | ||
378 | |||
379 | for_each_possible_cpu(cpu) { | ||
380 | perf_events[cpu] = kcalloc(num_counters, | ||
381 | sizeof(struct perf_event *), GFP_KERNEL); | ||
382 | if (!perf_events[cpu]) { | ||
383 | pr_info("oprofile: failed to allocate %d perf events " | ||
384 | "for cpu %d\n", num_counters, cpu); | ||
385 | ret = -ENOMEM; | ||
386 | goto out; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | ops->create_files = oprofile_perf_create_files; | ||
391 | ops->setup = oprofile_perf_setup; | ||
392 | ops->start = oprofile_perf_start; | ||
393 | ops->stop = oprofile_perf_stop; | ||
394 | ops->shutdown = oprofile_perf_stop; | ||
395 | ops->cpu_type = op_name_from_perf_id(); | ||
396 | |||
397 | if (!ops->cpu_type) | ||
398 | ret = -ENODEV; | ||
399 | else | ||
400 | pr_info("oprofile: using %s\n", ops->cpu_type); | ||
401 | |||
402 | out: | ||
403 | if (ret) { | ||
404 | for_each_possible_cpu(cpu) | ||
405 | kfree(perf_events[cpu]); | ||
406 | kfree(counter_config); | ||
407 | } | ||
408 | |||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 112 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
413 | { | 113 | { |
414 | ops->backtrace = arm_backtrace; | 114 | ops->backtrace = arm_backtrace; |
@@ -416,25 +116,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
416 | return oprofile_perf_init(ops); | 116 | return oprofile_perf_init(ops); |
417 | } | 117 | } |
418 | 118 | ||
419 | void __exit oprofile_perf_exit(void) | ||
420 | { | ||
421 | int cpu, id; | ||
422 | struct perf_event *event; | ||
423 | |||
424 | for_each_possible_cpu(cpu) { | ||
425 | for (id = 0; id < num_counters; ++id) { | ||
426 | event = perf_events[cpu][id]; | ||
427 | if (event) | ||
428 | perf_event_release_kernel(event); | ||
429 | } | ||
430 | |||
431 | kfree(perf_events[cpu]); | ||
432 | } | ||
433 | |||
434 | kfree(counter_config); | ||
435 | exit_driverfs(); | ||
436 | } | ||
437 | |||
438 | void __exit oprofile_arch_exit(void) | 119 | void __exit oprofile_arch_exit(void) |
439 | { | 120 | { |
440 | oprofile_perf_exit(); | 121 | oprofile_perf_exit(); |
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c new file mode 100644 index 000000000000..ebb40cb87474 --- /dev/null +++ b/drivers/oprofile/oprofile_perf.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * Copyright 2010 ARM Ltd. | ||
3 | * | ||
4 | * Perf-events backend for OProfile. | ||
5 | */ | ||
6 | #include <linux/perf_event.h> | ||
7 | #include <linux/oprofile.h> | ||
8 | #include <linux/slab.h> | ||
9 | |||
10 | /* | ||
11 | * Per performance monitor configuration as set via oprofilefs. | ||
12 | */ | ||
13 | struct op_counter_config { | ||
14 | unsigned long count; | ||
15 | unsigned long enabled; | ||
16 | unsigned long event; | ||
17 | unsigned long unit_mask; | ||
18 | unsigned long kernel; | ||
19 | unsigned long user; | ||
20 | struct perf_event_attr attr; | ||
21 | }; | ||
22 | |||
23 | static int oprofile_perf_enabled; | ||
24 | static DEFINE_MUTEX(oprofile_perf_mutex); | ||
25 | |||
26 | static struct op_counter_config *counter_config; | ||
27 | static struct perf_event **perf_events[nr_cpumask_bits]; | ||
28 | static int num_counters; | ||
29 | |||
30 | /* | ||
31 | * Overflow callback for oprofile. | ||
32 | */ | ||
33 | static void op_overflow_handler(struct perf_event *event, int unused, | ||
34 | struct perf_sample_data *data, struct pt_regs *regs) | ||
35 | { | ||
36 | int id; | ||
37 | u32 cpu = smp_processor_id(); | ||
38 | |||
39 | for (id = 0; id < num_counters; ++id) | ||
40 | if (perf_events[cpu][id] == event) | ||
41 | break; | ||
42 | |||
43 | if (id != num_counters) | ||
44 | oprofile_add_sample(regs, id); | ||
45 | else | ||
46 | pr_warning("oprofile: ignoring spurious overflow " | ||
47 | "on cpu %u\n", cpu); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile | ||
52 | * settings in counter_config. Attributes are created as `pinned' events and | ||
53 | * so are permanently scheduled on the PMU. | ||
54 | */ | ||
55 | static void op_perf_setup(void) | ||
56 | { | ||
57 | int i; | ||
58 | u32 size = sizeof(struct perf_event_attr); | ||
59 | struct perf_event_attr *attr; | ||
60 | |||
61 | for (i = 0; i < num_counters; ++i) { | ||
62 | attr = &counter_config[i].attr; | ||
63 | memset(attr, 0, size); | ||
64 | attr->type = PERF_TYPE_RAW; | ||
65 | attr->size = size; | ||
66 | attr->config = counter_config[i].event; | ||
67 | attr->sample_period = counter_config[i].count; | ||
68 | attr->pinned = 1; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static int op_create_counter(int cpu, int event) | ||
73 | { | ||
74 | int ret = 0; | ||
75 | struct perf_event *pevent; | ||
76 | |||
77 | if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL)) | ||
78 | return ret; | ||
79 | |||
80 | pevent = perf_event_create_kernel_counter(&counter_config[event].attr, | ||
81 | cpu, -1, | ||
82 | op_overflow_handler); | ||
83 | |||
84 | if (IS_ERR(pevent)) { | ||
85 | ret = PTR_ERR(pevent); | ||
86 | } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { | ||
87 | pr_warning("oprofile: failed to enable event %d " | ||
88 | "on CPU %d\n", event, cpu); | ||
89 | ret = -EBUSY; | ||
90 | } else { | ||
91 | perf_events[cpu][event] = pevent; | ||
92 | } | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | static void op_destroy_counter(int cpu, int event) | ||
98 | { | ||
99 | struct perf_event *pevent = perf_events[cpu][event]; | ||
100 | |||
101 | if (pevent) { | ||
102 | perf_event_release_kernel(pevent); | ||
103 | perf_events[cpu][event] = NULL; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Called by oprofile_perf_start to create active perf events based on the | ||
109 | * perviously configured attributes. | ||
110 | */ | ||
111 | static int op_perf_start(void) | ||
112 | { | ||
113 | int cpu, event, ret = 0; | ||
114 | |||
115 | for_each_online_cpu(cpu) { | ||
116 | for (event = 0; event < num_counters; ++event) { | ||
117 | ret = op_create_counter(cpu, event); | ||
118 | if (ret) | ||
119 | goto out; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | out: | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Called by oprofile_perf_stop at the end of a profiling run. | ||
129 | */ | ||
130 | static void op_perf_stop(void) | ||
131 | { | ||
132 | int cpu, event; | ||
133 | |||
134 | for_each_online_cpu(cpu) | ||
135 | for (event = 0; event < num_counters; ++event) | ||
136 | op_destroy_counter(cpu, event); | ||
137 | } | ||
138 | |||
139 | static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root) | ||
140 | { | ||
141 | unsigned int i; | ||
142 | |||
143 | for (i = 0; i < num_counters; i++) { | ||
144 | struct dentry *dir; | ||
145 | char buf[4]; | ||
146 | |||
147 | snprintf(buf, sizeof buf, "%d", i); | ||
148 | dir = oprofilefs_mkdir(sb, root, buf); | ||
149 | oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); | ||
150 | oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); | ||
151 | oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); | ||
152 | oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); | ||
153 | oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); | ||
154 | oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); | ||
155 | } | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static int oprofile_perf_setup(void) | ||
161 | { | ||
162 | spin_lock(&oprofilefs_lock); | ||
163 | op_perf_setup(); | ||
164 | spin_unlock(&oprofilefs_lock); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static int oprofile_perf_start(void) | ||
169 | { | ||
170 | int ret = -EBUSY; | ||
171 | |||
172 | mutex_lock(&oprofile_perf_mutex); | ||
173 | if (!oprofile_perf_enabled) { | ||
174 | ret = 0; | ||
175 | op_perf_start(); | ||
176 | oprofile_perf_enabled = 1; | ||
177 | } | ||
178 | mutex_unlock(&oprofile_perf_mutex); | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | static void oprofile_perf_stop(void) | ||
183 | { | ||
184 | mutex_lock(&oprofile_perf_mutex); | ||
185 | if (oprofile_perf_enabled) | ||
186 | op_perf_stop(); | ||
187 | oprofile_perf_enabled = 0; | ||
188 | mutex_unlock(&oprofile_perf_mutex); | ||
189 | } | ||
190 | |||
191 | #ifdef CONFIG_PM | ||
192 | static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state) | ||
193 | { | ||
194 | mutex_lock(&oprofile_perf_mutex); | ||
195 | if (oprofile_perf_enabled) | ||
196 | op_perf_stop(); | ||
197 | mutex_unlock(&oprofile_perf_mutex); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static int oprofile_perf_resume(struct platform_device *dev) | ||
202 | { | ||
203 | mutex_lock(&oprofile_perf_mutex); | ||
204 | if (oprofile_perf_enabled && op_perf_start()) | ||
205 | oprofile_perf_enabled = 0; | ||
206 | mutex_unlock(&oprofile_perf_mutex); | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static struct platform_driver oprofile_driver = { | ||
211 | .driver = { | ||
212 | .name = "oprofile-perf", | ||
213 | }, | ||
214 | .resume = oprofile_perf_resume, | ||
215 | .suspend = oprofile_perf_suspend, | ||
216 | }; | ||
217 | |||
218 | static struct platform_device *oprofile_pdev; | ||
219 | |||
220 | static int __init init_driverfs(void) | ||
221 | { | ||
222 | int ret; | ||
223 | |||
224 | ret = platform_driver_register(&oprofile_driver); | ||
225 | if (ret) | ||
226 | goto out; | ||
227 | |||
228 | oprofile_pdev = platform_device_register_simple( | ||
229 | oprofile_driver.driver.name, 0, NULL, 0); | ||
230 | if (IS_ERR(oprofile_pdev)) { | ||
231 | ret = PTR_ERR(oprofile_pdev); | ||
232 | platform_driver_unregister(&oprofile_driver); | ||
233 | } | ||
234 | |||
235 | out: | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | static void __exit exit_driverfs(void) | ||
240 | { | ||
241 | platform_device_unregister(oprofile_pdev); | ||
242 | platform_driver_unregister(&oprofile_driver); | ||
243 | } | ||
244 | #else | ||
245 | static int __init init_driverfs(void) { return 0; } | ||
246 | #define exit_driverfs() do { } while (0) | ||
247 | #endif /* CONFIG_PM */ | ||
248 | |||
249 | int __init oprofile_perf_init(struct oprofile_operations *ops) | ||
250 | { | ||
251 | int cpu, ret = 0; | ||
252 | |||
253 | memset(&perf_events, 0, sizeof(perf_events)); | ||
254 | |||
255 | num_counters = perf_num_counters(); | ||
256 | if (num_counters <= 0) { | ||
257 | pr_info("oprofile: no performance counters\n"); | ||
258 | ret = -ENODEV; | ||
259 | goto out; | ||
260 | } | ||
261 | |||
262 | counter_config = kcalloc(num_counters, | ||
263 | sizeof(struct op_counter_config), GFP_KERNEL); | ||
264 | |||
265 | if (!counter_config) { | ||
266 | pr_info("oprofile: failed to allocate %d " | ||
267 | "counters\n", num_counters); | ||
268 | ret = -ENOMEM; | ||
269 | goto out; | ||
270 | } | ||
271 | |||
272 | ret = init_driverfs(); | ||
273 | if (ret) | ||
274 | goto out; | ||
275 | |||
276 | for_each_possible_cpu(cpu) { | ||
277 | perf_events[cpu] = kcalloc(num_counters, | ||
278 | sizeof(struct perf_event *), GFP_KERNEL); | ||
279 | if (!perf_events[cpu]) { | ||
280 | pr_info("oprofile: failed to allocate %d perf events " | ||
281 | "for cpu %d\n", num_counters, cpu); | ||
282 | ret = -ENOMEM; | ||
283 | goto out; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | ops->create_files = oprofile_perf_create_files; | ||
288 | ops->setup = oprofile_perf_setup; | ||
289 | ops->start = oprofile_perf_start; | ||
290 | ops->stop = oprofile_perf_stop; | ||
291 | ops->shutdown = oprofile_perf_stop; | ||
292 | ops->cpu_type = op_name_from_perf_id(); | ||
293 | |||
294 | if (!ops->cpu_type) | ||
295 | ret = -ENODEV; | ||
296 | else | ||
297 | pr_info("oprofile: using %s\n", ops->cpu_type); | ||
298 | |||
299 | out: | ||
300 | if (ret) { | ||
301 | for_each_possible_cpu(cpu) | ||
302 | kfree(perf_events[cpu]); | ||
303 | kfree(counter_config); | ||
304 | } | ||
305 | |||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | void __exit oprofile_perf_exit(void) | ||
310 | { | ||
311 | int cpu, id; | ||
312 | struct perf_event *event; | ||
313 | |||
314 | for_each_possible_cpu(cpu) { | ||
315 | for (id = 0; id < num_counters; ++id) { | ||
316 | event = perf_events[cpu][id]; | ||
317 | if (event) | ||
318 | perf_event_release_kernel(event); | ||
319 | } | ||
320 | |||
321 | kfree(perf_events[cpu]); | ||
322 | } | ||
323 | |||
324 | kfree(counter_config); | ||
325 | exit_driverfs(); | ||
326 | } | ||
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1574d4aca721..d67a8330b41e 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/init.h> | ||
18 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
19 | 20 | ||
20 | /* Each escaped entry is prefixed by ESCAPE_CODE | 21 | /* Each escaped entry is prefixed by ESCAPE_CODE |
@@ -186,6 +187,8 @@ int oprofile_add_data64(struct op_entry *entry, u64 val); | |||
186 | int oprofile_write_commit(struct op_entry *entry); | 187 | int oprofile_write_commit(struct op_entry *entry); |
187 | 188 | ||
188 | #ifdef CONFIG_PERF_EVENTS | 189 | #ifdef CONFIG_PERF_EVENTS |
190 | int __init oprofile_perf_init(struct oprofile_operations *ops); | ||
191 | void __exit oprofile_perf_exit(void); | ||
189 | char *op_name_from_perf_id(void); | 192 | char *op_name_from_perf_id(void); |
190 | #endif /* CONFIG_PERF_EVENTS */ | 193 | #endif /* CONFIG_PERF_EVENTS */ |
191 | 194 | ||