aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-10-11 13:38:39 -0400
committerRobert Richter <robert.richter@amd.com>2010-10-11 13:38:39 -0400
commit0361e02342f60b64a7075755d5851ed4e6f98c7d (patch)
tree76ca78323ad1d4b1ecf1e8137f6b48eddcbebe3a
parent4fdaa7b682b413dfb7ca9fa74ff45b1e0cb3dade (diff)
parente9677b3ce207a07fad5746b6f7ddc70cae79de0a (diff)
Merge branch 'oprofile/perf' into oprofile/core
Conflicts: arch/arm/oprofile/common.c Signed-off-by: Robert Richter <robert.richter@amd.com>
-rw-r--r--arch/arm/kernel/perf_event.c6
-rw-r--r--arch/arm/oprofile/Makefile4
-rw-r--r--arch/arm/oprofile/common.c313
-rw-r--r--arch/sh/Kconfig13
-rw-r--r--arch/sh/kernel/perf_event.c18
-rw-r--r--arch/sh/oprofile/Makefile4
-rw-r--r--arch/sh/oprofile/common.c115
-rw-r--r--arch/sh/oprofile/op_impl.h33
-rw-r--r--drivers/oprofile/oprofile_perf.c323
-rw-r--r--include/linux/oprofile.h7
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--kernel/perf_event.c5
12 files changed, 412 insertions, 431 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index ecbb0288e5dd..ef3bc331518f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -123,6 +123,12 @@ armpmu_get_max_events(void)
123} 123}
124EXPORT_SYMBOL_GPL(armpmu_get_max_events); 124EXPORT_SYMBOL_GPL(armpmu_get_max_events);
125 125
126int perf_num_counters(void)
127{
128 return armpmu_get_max_events();
129}
130EXPORT_SYMBOL_GPL(perf_num_counters);
131
126#define HW_OP_UNSUPPORTED 0xFFFF 132#define HW_OP_UNSUPPORTED 0xFFFF
127 133
128#define C(_x) \ 134#define C(_x) \
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index e666eafed152..b2215c61cdf0 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9ifeq ($(CONFIG_HW_PERF_EVENTS),y)
10DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
11endif
12
9oprofile-y := $(DRIVER_OBJS) common.o 13oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index ab875f304f7c..8aa974491dfc 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -25,138 +25,10 @@
25#include <asm/ptrace.h> 25#include <asm/ptrace.h>
26 26
27#ifdef CONFIG_HW_PERF_EVENTS 27#ifdef CONFIG_HW_PERF_EVENTS
28/* 28char *op_name_from_perf_id(void)
29 * Per performance monitor configuration as set via oprofilefs.
30 */
31struct op_counter_config {
32 unsigned long count;
33 unsigned long enabled;
34 unsigned long event;
35 unsigned long unit_mask;
36 unsigned long kernel;
37 unsigned long user;
38 struct perf_event_attr attr;
39};
40
41static int op_arm_enabled;
42static DEFINE_MUTEX(op_arm_mutex);
43
44static struct op_counter_config *counter_config;
45static struct perf_event **perf_events[nr_cpumask_bits];
46static int perf_num_counters;
47
48/*
49 * Overflow callback for oprofile.
50 */
51static void op_overflow_handler(struct perf_event *event, int unused,
52 struct perf_sample_data *data, struct pt_regs *regs)
53{
54 int id;
55 u32 cpu = smp_processor_id();
56
57 for (id = 0; id < perf_num_counters; ++id)
58 if (perf_events[cpu][id] == event)
59 break;
60
61 if (id != perf_num_counters)
62 oprofile_add_sample(regs, id);
63 else
64 pr_warning("oprofile: ignoring spurious overflow "
65 "on cpu %u\n", cpu);
66}
67
68/*
69 * Called by op_arm_setup to create perf attributes to mirror the oprofile
70 * settings in counter_config. Attributes are created as `pinned' events and
71 * so are permanently scheduled on the PMU.
72 */
73static void op_perf_setup(void)
74{ 29{
75 int i; 30 enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
76 u32 size = sizeof(struct perf_event_attr);
77 struct perf_event_attr *attr;
78
79 for (i = 0; i < perf_num_counters; ++i) {
80 attr = &counter_config[i].attr;
81 memset(attr, 0, size);
82 attr->type = PERF_TYPE_RAW;
83 attr->size = size;
84 attr->config = counter_config[i].event;
85 attr->sample_period = counter_config[i].count;
86 attr->pinned = 1;
87 }
88}
89
90static int op_create_counter(int cpu, int event)
91{
92 struct perf_event *pevent;
93
94 if (!counter_config[event].enabled || perf_events[cpu][event])
95 return 0;
96
97 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
98 cpu, -1,
99 op_overflow_handler);
100
101 if (IS_ERR(pevent))
102 return PTR_ERR(pevent);
103
104 if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105 perf_event_release_kernel(pevent);
106 pr_warning("oprofile: failed to enable event %d "
107 "on CPU %d\n", event, cpu);
108 return -EBUSY;
109 }
110
111 perf_events[cpu][event] = pevent;
112
113 return 0;
114}
115 31
116static void op_destroy_counter(int cpu, int event)
117{
118 struct perf_event *pevent = perf_events[cpu][event];
119
120 if (pevent) {
121 perf_event_release_kernel(pevent);
122 perf_events[cpu][event] = NULL;
123 }
124}
125
126/*
127 * Called by op_arm_start to create active perf events based on the
128 * perviously configured attributes.
129 */
130static int op_perf_start(void)
131{
132 int cpu, event, ret = 0;
133
134 for_each_online_cpu(cpu) {
135 for (event = 0; event < perf_num_counters; ++event) {
136 ret = op_create_counter(cpu, event);
137 if (ret)
138 return ret;
139 }
140 }
141
142 return ret;
143}
144
145/*
146 * Called by op_arm_stop at the end of a profiling run.
147 */
148static void op_perf_stop(void)
149{
150 int cpu, event;
151
152 for_each_online_cpu(cpu)
153 for (event = 0; event < perf_num_counters; ++event)
154 op_destroy_counter(cpu, event);
155}
156
157
158static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
159{
160 switch (id) { 32 switch (id) {
161 case ARM_PERF_PMU_ID_XSCALE1: 33 case ARM_PERF_PMU_ID_XSCALE1:
162 return "arm/xscale1"; 34 return "arm/xscale1";
@@ -175,115 +47,6 @@ static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
175 } 47 }
176} 48}
177 49
178static int op_arm_create_files(struct super_block *sb, struct dentry *root)
179{
180 unsigned int i;
181
182 for (i = 0; i < perf_num_counters; i++) {
183 struct dentry *dir;
184 char buf[4];
185
186 snprintf(buf, sizeof buf, "%d", i);
187 dir = oprofilefs_mkdir(sb, root, buf);
188 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
189 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
190 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
191 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
192 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
193 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
194 }
195
196 return 0;
197}
198
199static int op_arm_setup(void)
200{
201 spin_lock(&oprofilefs_lock);
202 op_perf_setup();
203 spin_unlock(&oprofilefs_lock);
204 return 0;
205}
206
207static int op_arm_start(void)
208{
209 int ret = -EBUSY;
210
211 mutex_lock(&op_arm_mutex);
212 if (!op_arm_enabled) {
213 ret = 0;
214 op_perf_start();
215 op_arm_enabled = 1;
216 }
217 mutex_unlock(&op_arm_mutex);
218 return ret;
219}
220
221static void op_arm_stop(void)
222{
223 mutex_lock(&op_arm_mutex);
224 if (op_arm_enabled)
225 op_perf_stop();
226 op_arm_enabled = 0;
227 mutex_unlock(&op_arm_mutex);
228}
229
230#ifdef CONFIG_PM
231static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
232{
233 mutex_lock(&op_arm_mutex);
234 if (op_arm_enabled)
235 op_perf_stop();
236 mutex_unlock(&op_arm_mutex);
237 return 0;
238}
239
240static int op_arm_resume(struct platform_device *dev)
241{
242 mutex_lock(&op_arm_mutex);
243 if (op_arm_enabled && op_perf_start())
244 op_arm_enabled = 0;
245 mutex_unlock(&op_arm_mutex);
246 return 0;
247}
248
249static struct platform_driver oprofile_driver = {
250 .driver = {
251 .name = "arm-oprofile",
252 },
253 .resume = op_arm_resume,
254 .suspend = op_arm_suspend,
255};
256
257static struct platform_device *oprofile_pdev;
258
259static int __init init_driverfs(void)
260{
261 int ret;
262
263 ret = platform_driver_register(&oprofile_driver);
264 if (ret)
265 return ret;
266
267 oprofile_pdev = platform_device_register_simple(
268 oprofile_driver.driver.name, 0, NULL, 0);
269 if (IS_ERR(oprofile_pdev)) {
270 ret = PTR_ERR(oprofile_pdev);
271 platform_driver_unregister(&oprofile_driver);
272 }
273
274 return ret;
275}
276
277static void __exit exit_driverfs(void)
278{
279 platform_device_unregister(oprofile_pdev);
280 platform_driver_unregister(&oprofile_driver);
281}
282#else
283static int __init init_driverfs(void) { return 0; }
284#define exit_driverfs() do { } while (0)
285#endif /* CONFIG_PM */
286
287static int report_trace(struct stackframe *frame, void *d) 50static int report_trace(struct stackframe *frame, void *d)
288{ 51{
289 unsigned int *depth = d; 52 unsigned int *depth = d;
@@ -346,79 +109,17 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
346 tail = user_backtrace(tail); 109 tail = user_backtrace(tail);
347} 110}
348 111
349void oprofile_arch_exit(void)
350{
351 int cpu, id;
352 struct perf_event *event;
353
354 for_each_possible_cpu(cpu) {
355 for (id = 0; id < perf_num_counters; ++id) {
356 event = perf_events[cpu][id];
357 if (event)
358 perf_event_release_kernel(event);
359 }
360
361 kfree(perf_events[cpu]);
362 }
363
364 kfree(counter_config);
365 exit_driverfs();
366}
367
368int __init oprofile_arch_init(struct oprofile_operations *ops) 112int __init oprofile_arch_init(struct oprofile_operations *ops)
369{ 113{
370 int cpu, ret = 0;
371
372 ret = init_driverfs();
373 if (ret)
374 return ret;
375
376 memset(&perf_events, 0, sizeof(perf_events));
377
378 perf_num_counters = armpmu_get_max_events();
379
380 counter_config = kcalloc(perf_num_counters,
381 sizeof(struct op_counter_config), GFP_KERNEL);
382
383 if (!counter_config) {
384 pr_info("oprofile: failed to allocate %d "
385 "counters\n", perf_num_counters);
386 ret = -ENOMEM;
387 perf_num_counters = 0;
388 goto out;
389 }
390
391 for_each_possible_cpu(cpu) {
392 perf_events[cpu] = kcalloc(perf_num_counters,
393 sizeof(struct perf_event *), GFP_KERNEL);
394 if (!perf_events[cpu]) {
395 pr_info("oprofile: failed to allocate %d perf events "
396 "for cpu %d\n", perf_num_counters, cpu);
397 ret = -ENOMEM;
398 goto out;
399 }
400 }
401
402 ops->backtrace = arm_backtrace; 114 ops->backtrace = arm_backtrace;
403 ops->create_files = op_arm_create_files;
404 ops->setup = op_arm_setup;
405 ops->start = op_arm_start;
406 ops->stop = op_arm_stop;
407 ops->shutdown = op_arm_stop;
408 ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
409
410 if (!ops->cpu_type)
411 ret = -ENODEV;
412 else
413 pr_info("oprofile: using %s\n", ops->cpu_type);
414 115
415out: 116 return oprofile_perf_init(ops);
416 if (ret)
417 oprofile_arch_exit();
418
419 return ret;
420} 117}
421 118
119void __exit oprofile_arch_exit(void)
120{
121 oprofile_perf_exit();
122}
422#else 123#else
423int __init oprofile_arch_init(struct oprofile_operations *ops) 124int __init oprofile_arch_init(struct oprofile_operations *ops)
424{ 125{
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 33990fa95af0..35b6c3f85173 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -249,6 +249,11 @@ config ARCH_SHMOBILE
249 select PM 249 select PM
250 select PM_RUNTIME 250 select PM_RUNTIME
251 251
252config CPU_HAS_PMU
253 depends on CPU_SH4 || CPU_SH4A
254 default y
255 bool
256
252if SUPERH32 257if SUPERH32
253 258
254choice 259choice
@@ -738,6 +743,14 @@ config GUSA_RB
738 LLSC, this should be more efficient than the other alternative of 743 LLSC, this should be more efficient than the other alternative of
739 disabling interrupts around the atomic sequence. 744 disabling interrupts around the atomic sequence.
740 745
746config HW_PERF_EVENTS
747 bool "Enable hardware performance counter support for perf events"
748 depends on PERF_EVENTS && CPU_HAS_PMU
749 default y
750 help
751 Enable hardware performance counter support for perf events. If
752 disabled, perf events will use software events only.
753
741source "drivers/sh/Kconfig" 754source "drivers/sh/Kconfig"
742 755
743endmenu 756endmenu
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7a3dc3567258..55fe89bbdfe0 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -59,6 +59,24 @@ static inline int sh_pmu_initialized(void)
59 return !!sh_pmu; 59 return !!sh_pmu;
60} 60}
61 61
62const char *perf_pmu_name(void)
63{
64 if (!sh_pmu)
65 return NULL;
66
67 return sh_pmu->name;
68}
69EXPORT_SYMBOL_GPL(perf_pmu_name);
70
71int perf_num_counters(void)
72{
73 if (!sh_pmu)
74 return 0;
75
76 return sh_pmu->num_events;
77}
78EXPORT_SYMBOL_GPL(perf_num_counters);
79
62/* 80/*
63 * Release the PMU if this is the last perf_event. 81 * Release the PMU if this is the last perf_event.
64 */ 82 */
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
index 4886c5c1786c..e85aae73e3dc 100644
--- a/arch/sh/oprofile/Makefile
+++ b/arch/sh/oprofile/Makefile
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9ifeq ($(CONFIG_HW_PERF_EVENTS),y)
10DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
11endif
12
9oprofile-y := $(DRIVER_OBJS) common.o backtrace.o 13oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index ac604937f3ee..e10d89376f9b 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -17,114 +17,45 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/perf_event.h>
20#include <asm/processor.h> 21#include <asm/processor.h>
21#include "op_impl.h"
22
23static struct op_sh_model *model;
24
25static struct op_counter_config ctr[20];
26 22
23#ifdef CONFIG_HW_PERF_EVENTS
27extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth); 24extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
28 25
29static int op_sh_setup(void) 26char *op_name_from_perf_id(void)
30{
31 /* Pre-compute the values to stuff in the hardware registers. */
32 model->reg_setup(ctr);
33
34 /* Configure the registers on all cpus. */
35 on_each_cpu(model->cpu_setup, NULL, 1);
36
37 return 0;
38}
39
40static int op_sh_create_files(struct super_block *sb, struct dentry *root)
41{ 27{
42 int i, ret = 0; 28 const char *pmu;
29 char buf[20];
30 int size;
43 31
44 for (i = 0; i < model->num_counters; i++) { 32 pmu = perf_pmu_name();
45 struct dentry *dir; 33 if (!pmu)
46 char buf[4]; 34 return NULL;
47 35
48 snprintf(buf, sizeof(buf), "%d", i); 36 size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
49 dir = oprofilefs_mkdir(sb, root, buf); 37 if (size > -1 && size < sizeof(buf))
38 return buf;
50 39
51 ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 40 return NULL;
52 ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
53 ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
54 ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
55
56 if (model->create_files)
57 ret |= model->create_files(sb, dir);
58 else
59 ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
60
61 /* Dummy entries */
62 ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
63 }
64
65 return ret;
66} 41}
67 42
68static int op_sh_start(void) 43int __init oprofile_arch_init(struct oprofile_operations *ops)
69{ 44{
70 /* Enable performance monitoring for all counters. */ 45 ops->backtrace = sh_backtrace;
71 on_each_cpu(model->cpu_start, NULL, 1);
72 46
73 return 0; 47 return oprofile_perf_init(ops);
74} 48}
75 49
76static void op_sh_stop(void) 50void __exit oprofile_arch_exit(void)
77{ 51{
78 /* Disable performance monitoring for all counters. */ 52 oprofile_perf_exit();
79 on_each_cpu(model->cpu_stop, NULL, 1);
80} 53}
81 54#else
82int __init oprofile_arch_init(struct oprofile_operations *ops) 55int __init oprofile_arch_init(struct oprofile_operations *ops)
83{ 56{
84 struct op_sh_model *lmodel = NULL; 57 pr_info("oprofile: hardware counters not available\n");
85 int ret; 58 return -ENODEV;
86
87 /*
88 * Always assign the backtrace op. If the counter initialization
89 * fails, we fall back to the timer which will still make use of
90 * this.
91 */
92 ops->backtrace = sh_backtrace;
93
94 /*
95 * XXX
96 *
97 * All of the SH7750/SH-4A counters have been converted to perf,
98 * this infrastructure hook is left for other users until they've
99 * had a chance to convert over, at which point all of this
100 * will be deleted.
101 */
102
103 if (!lmodel)
104 return -ENODEV;
105 if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
106 return -ENODEV;
107
108 ret = lmodel->init();
109 if (unlikely(ret != 0))
110 return ret;
111
112 model = lmodel;
113
114 ops->setup = op_sh_setup;
115 ops->create_files = op_sh_create_files;
116 ops->start = op_sh_start;
117 ops->stop = op_sh_stop;
118 ops->cpu_type = lmodel->cpu_type;
119
120 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
121 lmodel->cpu_type);
122
123 return 0;
124}
125
126void oprofile_arch_exit(void)
127{
128 if (model && model->exit)
129 model->exit();
130} 59}
60void __exit oprofile_arch_exit(void) {}
61#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h
deleted file mode 100644
index 1244479ceb29..000000000000
--- a/arch/sh/oprofile/op_impl.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef __OP_IMPL_H
2#define __OP_IMPL_H
3
4/* Per-counter configuration as set via oprofilefs. */
5struct op_counter_config {
6 unsigned long enabled;
7 unsigned long event;
8
9 unsigned long count;
10
11 /* Dummy values for userspace tool compliance */
12 unsigned long kernel;
13 unsigned long user;
14 unsigned long unit_mask;
15};
16
17/* Per-architecture configury and hooks. */
18struct op_sh_model {
19 void (*reg_setup)(struct op_counter_config *);
20 int (*create_files)(struct super_block *sb, struct dentry *dir);
21 void (*cpu_setup)(void *dummy);
22 int (*init)(void);
23 void (*exit)(void);
24 void (*cpu_start)(void *args);
25 void (*cpu_stop)(void *args);
26 char *cpu_type;
27 unsigned char num_counters;
28};
29
30/* arch/sh/oprofile/common.c */
31extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
32
33#endif /* __OP_IMPL_H */
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
new file mode 100644
index 000000000000..b17235a24a4d
--- /dev/null
+++ b/drivers/oprofile/oprofile_perf.c
@@ -0,0 +1,323 @@
1/*
2 * Copyright 2010 ARM Ltd.
3 *
4 * Perf-events backend for OProfile.
5 */
6#include <linux/perf_event.h>
7#include <linux/oprofile.h>
8#include <linux/slab.h>
9
10/*
11 * Per performance monitor configuration as set via oprofilefs.
12 */
13struct op_counter_config {
14 unsigned long count;
15 unsigned long enabled;
16 unsigned long event;
17 unsigned long unit_mask;
18 unsigned long kernel;
19 unsigned long user;
20 struct perf_event_attr attr;
21};
22
23static int oprofile_perf_enabled;
24static DEFINE_MUTEX(oprofile_perf_mutex);
25
26static struct op_counter_config *counter_config;
27static struct perf_event **perf_events[nr_cpumask_bits];
28static int num_counters;
29
30/*
31 * Overflow callback for oprofile.
32 */
33static void op_overflow_handler(struct perf_event *event, int unused,
34 struct perf_sample_data *data, struct pt_regs *regs)
35{
36 int id;
37 u32 cpu = smp_processor_id();
38
39 for (id = 0; id < num_counters; ++id)
40 if (perf_events[cpu][id] == event)
41 break;
42
43 if (id != num_counters)
44 oprofile_add_sample(regs, id);
45 else
46 pr_warning("oprofile: ignoring spurious overflow "
47 "on cpu %u\n", cpu);
48}
49
50/*
51 * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
52 * settings in counter_config. Attributes are created as `pinned' events and
53 * so are permanently scheduled on the PMU.
54 */
55static void op_perf_setup(void)
56{
57 int i;
58 u32 size = sizeof(struct perf_event_attr);
59 struct perf_event_attr *attr;
60
61 for (i = 0; i < num_counters; ++i) {
62 attr = &counter_config[i].attr;
63 memset(attr, 0, size);
64 attr->type = PERF_TYPE_RAW;
65 attr->size = size;
66 attr->config = counter_config[i].event;
67 attr->sample_period = counter_config[i].count;
68 attr->pinned = 1;
69 }
70}
71
72static int op_create_counter(int cpu, int event)
73{
74 struct perf_event *pevent;
75
76 if (!counter_config[event].enabled || perf_events[cpu][event])
77 return 0;
78
79 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
80 cpu, -1,
81 op_overflow_handler);
82
83 if (IS_ERR(pevent))
84 return PTR_ERR(pevent);
85
86 if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
87 perf_event_release_kernel(pevent);
88 pr_warning("oprofile: failed to enable event %d "
89 "on CPU %d\n", event, cpu);
90 return -EBUSY;
91 }
92
93 perf_events[cpu][event] = pevent;
94
95 return 0;
96}
97
98static void op_destroy_counter(int cpu, int event)
99{
100 struct perf_event *pevent = perf_events[cpu][event];
101
102 if (pevent) {
103 perf_event_release_kernel(pevent);
104 perf_events[cpu][event] = NULL;
105 }
106}
107
108/*
109 * Called by oprofile_perf_start to create active perf events based on the
110 * perviously configured attributes.
111 */
112static int op_perf_start(void)
113{
114 int cpu, event, ret = 0;
115
116 for_each_online_cpu(cpu) {
117 for (event = 0; event < num_counters; ++event) {
118 ret = op_create_counter(cpu, event);
119 if (ret)
120 return ret;
121 }
122 }
123
124 return ret;
125}
126
127/*
128 * Called by oprofile_perf_stop at the end of a profiling run.
129 */
130static void op_perf_stop(void)
131{
132 int cpu, event;
133
134 for_each_online_cpu(cpu)
135 for (event = 0; event < num_counters; ++event)
136 op_destroy_counter(cpu, event);
137}
138
139static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
140{
141 unsigned int i;
142
143 for (i = 0; i < num_counters; i++) {
144 struct dentry *dir;
145 char buf[4];
146
147 snprintf(buf, sizeof buf, "%d", i);
148 dir = oprofilefs_mkdir(sb, root, buf);
149 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
150 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
151 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
152 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
153 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
154 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
155 }
156
157 return 0;
158}
159
160static int oprofile_perf_setup(void)
161{
162 spin_lock(&oprofilefs_lock);
163 op_perf_setup();
164 spin_unlock(&oprofilefs_lock);
165 return 0;
166}
167
168static int oprofile_perf_start(void)
169{
170 int ret = -EBUSY;
171
172 mutex_lock(&oprofile_perf_mutex);
173 if (!oprofile_perf_enabled) {
174 ret = 0;
175 op_perf_start();
176 oprofile_perf_enabled = 1;
177 }
178 mutex_unlock(&oprofile_perf_mutex);
179 return ret;
180}
181
182static void oprofile_perf_stop(void)
183{
184 mutex_lock(&oprofile_perf_mutex);
185 if (oprofile_perf_enabled)
186 op_perf_stop();
187 oprofile_perf_enabled = 0;
188 mutex_unlock(&oprofile_perf_mutex);
189}
190
191#ifdef CONFIG_PM
192static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
193{
194 mutex_lock(&oprofile_perf_mutex);
195 if (oprofile_perf_enabled)
196 op_perf_stop();
197 mutex_unlock(&oprofile_perf_mutex);
198 return 0;
199}
200
201static int oprofile_perf_resume(struct platform_device *dev)
202{
203 mutex_lock(&oprofile_perf_mutex);
204 if (oprofile_perf_enabled && op_perf_start())
205 oprofile_perf_enabled = 0;
206 mutex_unlock(&oprofile_perf_mutex);
207 return 0;
208}
209
210static struct platform_driver oprofile_driver = {
211 .driver = {
212 .name = "oprofile-perf",
213 },
214 .resume = oprofile_perf_resume,
215 .suspend = oprofile_perf_suspend,
216};
217
218static struct platform_device *oprofile_pdev;
219
220static int __init init_driverfs(void)
221{
222 int ret;
223
224 ret = platform_driver_register(&oprofile_driver);
225 if (ret)
226 return ret;
227
228 oprofile_pdev = platform_device_register_simple(
229 oprofile_driver.driver.name, 0, NULL, 0);
230 if (IS_ERR(oprofile_pdev)) {
231 ret = PTR_ERR(oprofile_pdev);
232 platform_driver_unregister(&oprofile_driver);
233 }
234
235 return ret;
236}
237
238static void __exit exit_driverfs(void)
239{
240 platform_device_unregister(oprofile_pdev);
241 platform_driver_unregister(&oprofile_driver);
242}
243#else
244static int __init init_driverfs(void) { return 0; }
245#define exit_driverfs() do { } while (0)
246#endif /* CONFIG_PM */
247
248void oprofile_perf_exit(void)
249{
250 int cpu, id;
251 struct perf_event *event;
252
253 for_each_possible_cpu(cpu) {
254 for (id = 0; id < num_counters; ++id) {
255 event = perf_events[cpu][id];
256 if (event)
257 perf_event_release_kernel(event);
258 }
259
260 kfree(perf_events[cpu]);
261 }
262
263 kfree(counter_config);
264 exit_driverfs();
265}
266
267int __init oprofile_perf_init(struct oprofile_operations *ops)
268{
269 int cpu, ret = 0;
270
271 ret = init_driverfs();
272 if (ret)
273 return ret;
274
275 memset(&perf_events, 0, sizeof(perf_events));
276
277 num_counters = perf_num_counters();
278 if (num_counters <= 0) {
279 pr_info("oprofile: no performance counters\n");
280 ret = -ENODEV;
281 goto out;
282 }
283
284 counter_config = kcalloc(num_counters,
285 sizeof(struct op_counter_config), GFP_KERNEL);
286
287 if (!counter_config) {
288 pr_info("oprofile: failed to allocate %d "
289 "counters\n", num_counters);
290 ret = -ENOMEM;
291 num_counters = 0;
292 goto out;
293 }
294
295 for_each_possible_cpu(cpu) {
296 perf_events[cpu] = kcalloc(num_counters,
297 sizeof(struct perf_event *), GFP_KERNEL);
298 if (!perf_events[cpu]) {
299 pr_info("oprofile: failed to allocate %d perf events "
300 "for cpu %d\n", num_counters, cpu);
301 ret = -ENOMEM;
302 goto out;
303 }
304 }
305
306 ops->create_files = oprofile_perf_create_files;
307 ops->setup = oprofile_perf_setup;
308 ops->start = oprofile_perf_start;
309 ops->stop = oprofile_perf_stop;
310 ops->shutdown = oprofile_perf_stop;
311 ops->cpu_type = op_name_from_perf_id();
312
313 if (!ops->cpu_type)
314 ret = -ENODEV;
315 else
316 pr_info("oprofile: using %s\n", ops->cpu_type);
317
318out:
319 if (ret)
320 oprofile_perf_exit();
321
322 return ret;
323}
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 5171639ecf0f..d67a8330b41e 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/init.h>
18#include <asm/atomic.h> 19#include <asm/atomic.h>
19 20
20/* Each escaped entry is prefixed by ESCAPE_CODE 21/* Each escaped entry is prefixed by ESCAPE_CODE
@@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
185int oprofile_add_data64(struct op_entry *entry, u64 val); 186int oprofile_add_data64(struct op_entry *entry, u64 val);
186int oprofile_write_commit(struct op_entry *entry); 187int oprofile_write_commit(struct op_entry *entry);
187 188
189#ifdef CONFIG_PERF_EVENTS
190int __init oprofile_perf_init(struct oprofile_operations *ops);
191void __exit oprofile_perf_exit(void);
192char *op_name_from_perf_id(void);
193#endif /* CONFIG_PERF_EVENTS */
194
188#endif /* OPROFILE_H */ 195#endif /* OPROFILE_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 716f99b682c1..33f08dafda2f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -849,6 +849,8 @@ extern int perf_max_events;
849 849
850extern const struct pmu *hw_perf_event_init(struct perf_event *event); 850extern const struct pmu *hw_perf_event_init(struct perf_event *event);
851 851
852extern int perf_num_counters(void);
853extern const char *perf_pmu_name(void);
852extern void perf_event_task_sched_in(struct task_struct *task); 854extern void perf_event_task_sched_in(struct task_struct *task);
853extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 855extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
854extern void perf_event_task_tick(struct task_struct *task); 856extern void perf_event_task_tick(struct task_struct *task);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index db5b56064687..fc512684423f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -85,6 +85,11 @@ void __weak hw_perf_enable(void) { barrier(); }
85 85
86void __weak perf_event_print_debug(void) { } 86void __weak perf_event_print_debug(void) { }
87 87
88extern __weak const char *perf_pmu_name(void)
89{
90 return "pmu";
91}
92
88static DEFINE_PER_CPU(int, perf_disable_count); 93static DEFINE_PER_CPU(int, perf_disable_count);
89 94
90void perf_disable(void) 95void perf_disable(void)