aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/perf_event.c6
-rw-r--r--arch/arm/oprofile/Makefile4
-rw-r--r--arch/arm/oprofile/common.c311
-rw-r--r--arch/sh/Kconfig13
-rw-r--r--arch/sh/kernel/perf_event.c18
-rw-r--r--arch/sh/oprofile/Makefile4
-rw-r--r--arch/sh/oprofile/common.c115
-rw-r--r--arch/sh/oprofile/op_impl.h33
-rw-r--r--arch/x86/oprofile/backtrace.c70
-rw-r--r--arch/x86/oprofile/nmi_int.c9
-rw-r--r--drivers/oprofile/oprof.c32
-rw-r--r--drivers/oprofile/oprof.h2
-rw-r--r--drivers/oprofile/oprofile_files.c7
-rw-r--r--drivers/oprofile/oprofile_perf.c328
-rw-r--r--drivers/oprofile/oprofilefs.c54
-rw-r--r--include/linux/oprofile.h7
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--kernel/perf_event.c5
18 files changed, 510 insertions, 510 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 55addc85eaa7..6cc6521881aa 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -123,6 +123,12 @@ armpmu_get_max_events(void)
123} 123}
124EXPORT_SYMBOL_GPL(armpmu_get_max_events); 124EXPORT_SYMBOL_GPL(armpmu_get_max_events);
125 125
126int perf_num_counters(void)
127{
128 return armpmu_get_max_events();
129}
130EXPORT_SYMBOL_GPL(perf_num_counters);
131
126#define HW_OP_UNSUPPORTED 0xFFFF 132#define HW_OP_UNSUPPORTED 0xFFFF
127 133
128#define C(_x) \ 134#define C(_x) \
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index e666eafed152..b2215c61cdf0 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9ifeq ($(CONFIG_HW_PERF_EVENTS),y)
10DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
11endif
12
9oprofile-y := $(DRIVER_OBJS) common.o 13oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index d1fb5b2f5218..8aa974491dfc 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -25,139 +25,10 @@
25#include <asm/ptrace.h> 25#include <asm/ptrace.h>
26 26
27#ifdef CONFIG_HW_PERF_EVENTS 27#ifdef CONFIG_HW_PERF_EVENTS
28/* 28char *op_name_from_perf_id(void)
29 * Per performance monitor configuration as set via oprofilefs.
30 */
31struct op_counter_config {
32 unsigned long count;
33 unsigned long enabled;
34 unsigned long event;
35 unsigned long unit_mask;
36 unsigned long kernel;
37 unsigned long user;
38 struct perf_event_attr attr;
39};
40
41static int op_arm_enabled;
42static DEFINE_MUTEX(op_arm_mutex);
43
44static struct op_counter_config *counter_config;
45static struct perf_event **perf_events[nr_cpumask_bits];
46static int perf_num_counters;
47
48/*
49 * Overflow callback for oprofile.
50 */
51static void op_overflow_handler(struct perf_event *event, int unused,
52 struct perf_sample_data *data, struct pt_regs *regs)
53{ 29{
54 int id; 30 enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
55 u32 cpu = smp_processor_id();
56
57 for (id = 0; id < perf_num_counters; ++id)
58 if (perf_events[cpu][id] == event)
59 break;
60
61 if (id != perf_num_counters)
62 oprofile_add_sample(regs, id);
63 else
64 pr_warning("oprofile: ignoring spurious overflow "
65 "on cpu %u\n", cpu);
66}
67
68/*
69 * Called by op_arm_setup to create perf attributes to mirror the oprofile
70 * settings in counter_config. Attributes are created as `pinned' events and
71 * so are permanently scheduled on the PMU.
72 */
73static void op_perf_setup(void)
74{
75 int i;
76 u32 size = sizeof(struct perf_event_attr);
77 struct perf_event_attr *attr;
78
79 for (i = 0; i < perf_num_counters; ++i) {
80 attr = &counter_config[i].attr;
81 memset(attr, 0, size);
82 attr->type = PERF_TYPE_RAW;
83 attr->size = size;
84 attr->config = counter_config[i].event;
85 attr->sample_period = counter_config[i].count;
86 attr->pinned = 1;
87 }
88}
89
90static int op_create_counter(int cpu, int event)
91{
92 int ret = 0;
93 struct perf_event *pevent;
94
95 if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
96 return ret;
97
98 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
99 cpu, NULL,
100 op_overflow_handler);
101
102 if (IS_ERR(pevent)) {
103 ret = PTR_ERR(pevent);
104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105 perf_event_release_kernel(pevent);
106 pr_warning("oprofile: failed to enable event %d "
107 "on CPU %d\n", event, cpu);
108 ret = -EBUSY;
109 } else {
110 perf_events[cpu][event] = pevent;
111 }
112
113 return ret;
114}
115 31
116static void op_destroy_counter(int cpu, int event)
117{
118 struct perf_event *pevent = perf_events[cpu][event];
119
120 if (pevent) {
121 perf_event_release_kernel(pevent);
122 perf_events[cpu][event] = NULL;
123 }
124}
125
126/*
127 * Called by op_arm_start to create active perf events based on the
128 * perviously configured attributes.
129 */
130static int op_perf_start(void)
131{
132 int cpu, event, ret = 0;
133
134 for_each_online_cpu(cpu) {
135 for (event = 0; event < perf_num_counters; ++event) {
136 ret = op_create_counter(cpu, event);
137 if (ret)
138 goto out;
139 }
140 }
141
142out:
143 return ret;
144}
145
146/*
147 * Called by op_arm_stop at the end of a profiling run.
148 */
149static void op_perf_stop(void)
150{
151 int cpu, event;
152
153 for_each_online_cpu(cpu)
154 for (event = 0; event < perf_num_counters; ++event)
155 op_destroy_counter(cpu, event);
156}
157
158
159static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
160{
161 switch (id) { 32 switch (id) {
162 case ARM_PERF_PMU_ID_XSCALE1: 33 case ARM_PERF_PMU_ID_XSCALE1:
163 return "arm/xscale1"; 34 return "arm/xscale1";
@@ -176,116 +47,6 @@ static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
176 } 47 }
177} 48}
178 49
179static int op_arm_create_files(struct super_block *sb, struct dentry *root)
180{
181 unsigned int i;
182
183 for (i = 0; i < perf_num_counters; i++) {
184 struct dentry *dir;
185 char buf[4];
186
187 snprintf(buf, sizeof buf, "%d", i);
188 dir = oprofilefs_mkdir(sb, root, buf);
189 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
190 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
191 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
192 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
193 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
194 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
195 }
196
197 return 0;
198}
199
200static int op_arm_setup(void)
201{
202 spin_lock(&oprofilefs_lock);
203 op_perf_setup();
204 spin_unlock(&oprofilefs_lock);
205 return 0;
206}
207
208static int op_arm_start(void)
209{
210 int ret = -EBUSY;
211
212 mutex_lock(&op_arm_mutex);
213 if (!op_arm_enabled) {
214 ret = 0;
215 op_perf_start();
216 op_arm_enabled = 1;
217 }
218 mutex_unlock(&op_arm_mutex);
219 return ret;
220}
221
222static void op_arm_stop(void)
223{
224 mutex_lock(&op_arm_mutex);
225 if (op_arm_enabled)
226 op_perf_stop();
227 op_arm_enabled = 0;
228 mutex_unlock(&op_arm_mutex);
229}
230
231#ifdef CONFIG_PM
232static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
233{
234 mutex_lock(&op_arm_mutex);
235 if (op_arm_enabled)
236 op_perf_stop();
237 mutex_unlock(&op_arm_mutex);
238 return 0;
239}
240
241static int op_arm_resume(struct platform_device *dev)
242{
243 mutex_lock(&op_arm_mutex);
244 if (op_arm_enabled && op_perf_start())
245 op_arm_enabled = 0;
246 mutex_unlock(&op_arm_mutex);
247 return 0;
248}
249
250static struct platform_driver oprofile_driver = {
251 .driver = {
252 .name = "arm-oprofile",
253 },
254 .resume = op_arm_resume,
255 .suspend = op_arm_suspend,
256};
257
258static struct platform_device *oprofile_pdev;
259
260static int __init init_driverfs(void)
261{
262 int ret;
263
264 ret = platform_driver_register(&oprofile_driver);
265 if (ret)
266 goto out;
267
268 oprofile_pdev = platform_device_register_simple(
269 oprofile_driver.driver.name, 0, NULL, 0);
270 if (IS_ERR(oprofile_pdev)) {
271 ret = PTR_ERR(oprofile_pdev);
272 platform_driver_unregister(&oprofile_driver);
273 }
274
275out:
276 return ret;
277}
278
279static void exit_driverfs(void)
280{
281 platform_device_unregister(oprofile_pdev);
282 platform_driver_unregister(&oprofile_driver);
283}
284#else
285static int __init init_driverfs(void) { return 0; }
286#define exit_driverfs() do { } while (0)
287#endif /* CONFIG_PM */
288
289static int report_trace(struct stackframe *frame, void *d) 50static int report_trace(struct stackframe *frame, void *d)
290{ 51{
291 unsigned int *depth = d; 52 unsigned int *depth = d;
@@ -350,74 +111,14 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
350 111
351int __init oprofile_arch_init(struct oprofile_operations *ops) 112int __init oprofile_arch_init(struct oprofile_operations *ops)
352{ 113{
353 int cpu, ret = 0;
354
355 perf_num_counters = armpmu_get_max_events();
356
357 counter_config = kcalloc(perf_num_counters,
358 sizeof(struct op_counter_config), GFP_KERNEL);
359
360 if (!counter_config) {
361 pr_info("oprofile: failed to allocate %d "
362 "counters\n", perf_num_counters);
363 return -ENOMEM;
364 }
365
366 ret = init_driverfs();
367 if (ret) {
368 kfree(counter_config);
369 counter_config = NULL;
370 return ret;
371 }
372
373 for_each_possible_cpu(cpu) {
374 perf_events[cpu] = kcalloc(perf_num_counters,
375 sizeof(struct perf_event *), GFP_KERNEL);
376 if (!perf_events[cpu]) {
377 pr_info("oprofile: failed to allocate %d perf events "
378 "for cpu %d\n", perf_num_counters, cpu);
379 while (--cpu >= 0)
380 kfree(perf_events[cpu]);
381 return -ENOMEM;
382 }
383 }
384
385 ops->backtrace = arm_backtrace; 114 ops->backtrace = arm_backtrace;
386 ops->create_files = op_arm_create_files;
387 ops->setup = op_arm_setup;
388 ops->start = op_arm_start;
389 ops->stop = op_arm_stop;
390 ops->shutdown = op_arm_stop;
391 ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
392
393 if (!ops->cpu_type)
394 ret = -ENODEV;
395 else
396 pr_info("oprofile: using %s\n", ops->cpu_type);
397 115
398 return ret; 116 return oprofile_perf_init(ops);
399} 117}
400 118
401void oprofile_arch_exit(void) 119void __exit oprofile_arch_exit(void)
402{ 120{
403 int cpu, id; 121 oprofile_perf_exit();
404 struct perf_event *event;
405
406 if (*perf_events) {
407 for_each_possible_cpu(cpu) {
408 for (id = 0; id < perf_num_counters; ++id) {
409 event = perf_events[cpu][id];
410 if (event != NULL)
411 perf_event_release_kernel(event);
412 }
413 kfree(perf_events[cpu]);
414 }
415 }
416
417 if (counter_config) {
418 kfree(counter_config);
419 exit_driverfs();
420 }
421} 122}
422#else 123#else
423int __init oprofile_arch_init(struct oprofile_operations *ops) 124int __init oprofile_arch_init(struct oprofile_operations *ops)
@@ -425,5 +126,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
425 pr_info("oprofile: hardware counters not available\n"); 126 pr_info("oprofile: hardware counters not available\n");
426 return -ENODEV; 127 return -ENODEV;
427} 128}
428void oprofile_arch_exit(void) {} 129void __exit oprofile_arch_exit(void) {}
429#endif /* CONFIG_HW_PERF_EVENTS */ 130#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 33990fa95af0..35b6c3f85173 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -249,6 +249,11 @@ config ARCH_SHMOBILE
249 select PM 249 select PM
250 select PM_RUNTIME 250 select PM_RUNTIME
251 251
252config CPU_HAS_PMU
253 depends on CPU_SH4 || CPU_SH4A
254 default y
255 bool
256
252if SUPERH32 257if SUPERH32
253 258
254choice 259choice
@@ -738,6 +743,14 @@ config GUSA_RB
738 LLSC, this should be more efficient than the other alternative of 743 LLSC, this should be more efficient than the other alternative of
739 disabling interrupts around the atomic sequence. 744 disabling interrupts around the atomic sequence.
740 745
746config HW_PERF_EVENTS
747 bool "Enable hardware performance counter support for perf events"
748 depends on PERF_EVENTS && CPU_HAS_PMU
749 default y
750 help
751 Enable hardware performance counter support for perf events. If
752 disabled, perf events will use software events only.
753
741source "drivers/sh/Kconfig" 754source "drivers/sh/Kconfig"
742 755
743endmenu 756endmenu
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 036f7a9296fa..5a4b33435650 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -59,6 +59,24 @@ static inline int sh_pmu_initialized(void)
59 return !!sh_pmu; 59 return !!sh_pmu;
60} 60}
61 61
62const char *perf_pmu_name(void)
63{
64 if (!sh_pmu)
65 return NULL;
66
67 return sh_pmu->name;
68}
69EXPORT_SYMBOL_GPL(perf_pmu_name);
70
71int perf_num_counters(void)
72{
73 if (!sh_pmu)
74 return 0;
75
76 return sh_pmu->num_events;
77}
78EXPORT_SYMBOL_GPL(perf_num_counters);
79
62/* 80/*
63 * Release the PMU if this is the last perf_event. 81 * Release the PMU if this is the last perf_event.
64 */ 82 */
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
index 4886c5c1786c..e85aae73e3dc 100644
--- a/arch/sh/oprofile/Makefile
+++ b/arch/sh/oprofile/Makefile
@@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9ifeq ($(CONFIG_HW_PERF_EVENTS),y)
10DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
11endif
12
9oprofile-y := $(DRIVER_OBJS) common.o backtrace.o 13oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index ac604937f3ee..e10d89376f9b 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -17,114 +17,45 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/perf_event.h>
20#include <asm/processor.h> 21#include <asm/processor.h>
21#include "op_impl.h"
22
23static struct op_sh_model *model;
24
25static struct op_counter_config ctr[20];
26 22
23#ifdef CONFIG_HW_PERF_EVENTS
27extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth); 24extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
28 25
29static int op_sh_setup(void) 26char *op_name_from_perf_id(void)
30{
31 /* Pre-compute the values to stuff in the hardware registers. */
32 model->reg_setup(ctr);
33
34 /* Configure the registers on all cpus. */
35 on_each_cpu(model->cpu_setup, NULL, 1);
36
37 return 0;
38}
39
40static int op_sh_create_files(struct super_block *sb, struct dentry *root)
41{ 27{
42 int i, ret = 0; 28 const char *pmu;
29 char buf[20];
30 int size;
43 31
44 for (i = 0; i < model->num_counters; i++) { 32 pmu = perf_pmu_name();
45 struct dentry *dir; 33 if (!pmu)
46 char buf[4]; 34 return NULL;
47 35
48 snprintf(buf, sizeof(buf), "%d", i); 36 size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
49 dir = oprofilefs_mkdir(sb, root, buf); 37 if (size > -1 && size < sizeof(buf))
38 return buf;
50 39
51 ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 40 return NULL;
52 ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
53 ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
54 ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
55
56 if (model->create_files)
57 ret |= model->create_files(sb, dir);
58 else
59 ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
60
61 /* Dummy entries */
62 ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
63 }
64
65 return ret;
66} 41}
67 42
68static int op_sh_start(void) 43int __init oprofile_arch_init(struct oprofile_operations *ops)
69{ 44{
70 /* Enable performance monitoring for all counters. */ 45 ops->backtrace = sh_backtrace;
71 on_each_cpu(model->cpu_start, NULL, 1);
72 46
73 return 0; 47 return oprofile_perf_init(ops);
74} 48}
75 49
76static void op_sh_stop(void) 50void __exit oprofile_arch_exit(void)
77{ 51{
78 /* Disable performance monitoring for all counters. */ 52 oprofile_perf_exit();
79 on_each_cpu(model->cpu_stop, NULL, 1);
80} 53}
81 54#else
82int __init oprofile_arch_init(struct oprofile_operations *ops) 55int __init oprofile_arch_init(struct oprofile_operations *ops)
83{ 56{
84 struct op_sh_model *lmodel = NULL; 57 pr_info("oprofile: hardware counters not available\n");
85 int ret; 58 return -ENODEV;
86
87 /*
88 * Always assign the backtrace op. If the counter initialization
89 * fails, we fall back to the timer which will still make use of
90 * this.
91 */
92 ops->backtrace = sh_backtrace;
93
94 /*
95 * XXX
96 *
97 * All of the SH7750/SH-4A counters have been converted to perf,
98 * this infrastructure hook is left for other users until they've
99 * had a chance to convert over, at which point all of this
100 * will be deleted.
101 */
102
103 if (!lmodel)
104 return -ENODEV;
105 if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
106 return -ENODEV;
107
108 ret = lmodel->init();
109 if (unlikely(ret != 0))
110 return ret;
111
112 model = lmodel;
113
114 ops->setup = op_sh_setup;
115 ops->create_files = op_sh_create_files;
116 ops->start = op_sh_start;
117 ops->stop = op_sh_stop;
118 ops->cpu_type = lmodel->cpu_type;
119
120 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
121 lmodel->cpu_type);
122
123 return 0;
124}
125
126void oprofile_arch_exit(void)
127{
128 if (model && model->exit)
129 model->exit();
130} 59}
60void __exit oprofile_arch_exit(void) {}
61#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h
deleted file mode 100644
index 1244479ceb29..000000000000
--- a/arch/sh/oprofile/op_impl.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef __OP_IMPL_H
2#define __OP_IMPL_H
3
4/* Per-counter configuration as set via oprofilefs. */
5struct op_counter_config {
6 unsigned long enabled;
7 unsigned long event;
8
9 unsigned long count;
10
11 /* Dummy values for userspace tool compliance */
12 unsigned long kernel;
13 unsigned long user;
14 unsigned long unit_mask;
15};
16
17/* Per-architecture configury and hooks. */
18struct op_sh_model {
19 void (*reg_setup)(struct op_counter_config *);
20 int (*create_files)(struct super_block *sb, struct dentry *dir);
21 void (*cpu_setup)(void *dummy);
22 int (*init)(void);
23 void (*exit)(void);
24 void (*cpu_start)(void *args);
25 void (*cpu_stop)(void *args);
26 char *cpu_type;
27 unsigned char num_counters;
28};
29
30/* arch/sh/oprofile/common.c */
31extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
32
33#endif /* __OP_IMPL_H */
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 3855096c59b8..2d49d4e19a36 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -14,6 +14,7 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/stacktrace.h> 16#include <asm/stacktrace.h>
17#include <linux/compat.h>
17 18
18static void backtrace_warning_symbol(void *data, char *msg, 19static void backtrace_warning_symbol(void *data, char *msg,
19 unsigned long symbol) 20 unsigned long symbol)
@@ -48,14 +49,12 @@ static struct stacktrace_ops backtrace_ops = {
48 .walk_stack = print_context_stack, 49 .walk_stack = print_context_stack,
49}; 50};
50 51
51struct frame_head { 52#ifdef CONFIG_COMPAT
52 struct frame_head *bp; 53static struct stack_frame_ia32 *
53 unsigned long ret; 54dump_user_backtrace_32(struct stack_frame_ia32 *head)
54} __attribute__((packed));
55
56static struct frame_head *dump_user_backtrace(struct frame_head *head)
57{ 55{
58 struct frame_head bufhead[2]; 56 struct stack_frame_ia32 bufhead[2];
57 struct stack_frame_ia32 *fp;
59 58
60 /* Also check accessibility of one struct frame_head beyond */ 59 /* Also check accessibility of one struct frame_head beyond */
61 if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) 60 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
@@ -63,20 +62,66 @@ static struct frame_head *dump_user_backtrace(struct frame_head *head)
63 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) 62 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
64 return NULL; 63 return NULL;
65 64
66 oprofile_add_trace(bufhead[0].ret); 65 fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
66
67 oprofile_add_trace(bufhead[0].return_address);
68
69 /* frame pointers should strictly progress back up the stack
70 * (towards higher addresses) */
71 if (head >= fp)
72 return NULL;
73
74 return fp;
75}
76
77static inline int
78x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
79{
80 struct stack_frame_ia32 *head;
81
82 /* User process is 32-bit */
83 if (!current || !test_thread_flag(TIF_IA32))
84 return 0;
85
86 head = (struct stack_frame_ia32 *) regs->bp;
87 while (depth-- && head)
88 head = dump_user_backtrace_32(head);
89
90 return 1;
91}
92
93#else
94static inline int
95x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
96{
97 return 0;
98}
99#endif /* CONFIG_COMPAT */
100
101static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
102{
103 struct stack_frame bufhead[2];
104
105 /* Also check accessibility of one struct stack_frame beyond */
106 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
107 return NULL;
108 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
109 return NULL;
110
111 oprofile_add_trace(bufhead[0].return_address);
67 112
68 /* frame pointers should strictly progress back up the stack 113 /* frame pointers should strictly progress back up the stack
69 * (towards higher addresses) */ 114 * (towards higher addresses) */
70 if (head >= bufhead[0].bp) 115 if (head >= bufhead[0].next_frame)
71 return NULL; 116 return NULL;
72 117
73 return bufhead[0].bp; 118 return bufhead[0].next_frame;
74} 119}
75 120
76void 121void
77x86_backtrace(struct pt_regs * const regs, unsigned int depth) 122x86_backtrace(struct pt_regs * const regs, unsigned int depth)
78{ 123{
79 struct frame_head *head = (struct frame_head *)frame_pointer(regs); 124 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
80 125
81 if (!user_mode_vm(regs)) { 126 if (!user_mode_vm(regs)) {
82 unsigned long stack = kernel_stack_pointer(regs); 127 unsigned long stack = kernel_stack_pointer(regs);
@@ -86,6 +131,9 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
86 return; 131 return;
87 } 132 }
88 133
134 if (x86_backtrace_32(regs, depth))
135 return;
136
89 while (depth-- && head) 137 while (depth-- && head)
90 head = dump_user_backtrace(head); 138 head = dump_user_backtrace(head);
91} 139}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f1575c9a2572..bd1489c3ce09 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -695,9 +695,6 @@ static int __init ppro_init(char **cpu_type)
695 return 1; 695 return 1;
696} 696}
697 697
698/* in order to get sysfs right */
699static int using_nmi;
700
701int __init op_nmi_init(struct oprofile_operations *ops) 698int __init op_nmi_init(struct oprofile_operations *ops)
702{ 699{
703 __u8 vendor = boot_cpu_data.x86_vendor; 700 __u8 vendor = boot_cpu_data.x86_vendor;
@@ -705,8 +702,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
705 char *cpu_type = NULL; 702 char *cpu_type = NULL;
706 int ret = 0; 703 int ret = 0;
707 704
708 using_nmi = 0;
709
710 if (!cpu_has_apic) 705 if (!cpu_has_apic)
711 return -ENODEV; 706 return -ENODEV;
712 707
@@ -790,13 +785,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
790 if (ret) 785 if (ret)
791 return ret; 786 return ret;
792 787
793 using_nmi = 1;
794 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 788 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
795 return 0; 789 return 0;
796} 790}
797 791
798void op_nmi_exit(void) 792void op_nmi_exit(void)
799{ 793{
800 if (using_nmi) 794 exit_sysfs();
801 exit_sysfs();
802} 795}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index b336cd9ee7a1..f9bda64fcd1b 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -225,26 +225,17 @@ post_sync:
225 mutex_unlock(&start_mutex); 225 mutex_unlock(&start_mutex);
226} 226}
227 227
228int oprofile_set_backtrace(unsigned long val) 228int oprofile_set_ulong(unsigned long *addr, unsigned long val)
229{ 229{
230 int err = 0; 230 int err = -EBUSY;
231 231
232 mutex_lock(&start_mutex); 232 mutex_lock(&start_mutex);
233 233 if (!oprofile_started) {
234 if (oprofile_started) { 234 *addr = val;
235 err = -EBUSY; 235 err = 0;
236 goto out;
237 }
238
239 if (!oprofile_ops.backtrace) {
240 err = -EINVAL;
241 goto out;
242 } 236 }
243
244 oprofile_backtrace_depth = val;
245
246out:
247 mutex_unlock(&start_mutex); 237 mutex_unlock(&start_mutex);
238
248 return err; 239 return err;
249} 240}
250 241
@@ -257,16 +248,9 @@ static int __init oprofile_init(void)
257 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 248 printk(KERN_INFO "oprofile: using timer interrupt.\n");
258 err = oprofile_timer_init(&oprofile_ops); 249 err = oprofile_timer_init(&oprofile_ops);
259 if (err) 250 if (err)
260 goto out_arch; 251 return err;
261 } 252 }
262 err = oprofilefs_register(); 253 return oprofilefs_register();
263 if (err)
264 goto out_arch;
265 return 0;
266
267out_arch:
268 oprofile_arch_exit();
269 return err;
270} 254}
271 255
272 256
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 47e12cb4ee8b..177b73de5e5f 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -37,7 +37,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
37int oprofile_timer_init(struct oprofile_operations *ops); 37int oprofile_timer_init(struct oprofile_operations *ops);
38void oprofile_timer_exit(void); 38void oprofile_timer_exit(void);
39 39
40int oprofile_set_backtrace(unsigned long depth); 40int oprofile_set_ulong(unsigned long *addr, unsigned long val);
41int oprofile_set_timeout(unsigned long time); 41int oprofile_set_timeout(unsigned long time);
42 42
43#endif /* OPROF_H */ 43#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index bbd7516e0869..ccf099e684a4 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -79,14 +79,17 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
79 if (*offset) 79 if (*offset)
80 return -EINVAL; 80 return -EINVAL;
81 81
82 if (!oprofile_ops.backtrace)
83 return -EINVAL;
84
82 retval = oprofilefs_ulong_from_user(&val, buf, count); 85 retval = oprofilefs_ulong_from_user(&val, buf, count);
83 if (retval) 86 if (retval)
84 return retval; 87 return retval;
85 88
86 retval = oprofile_set_backtrace(val); 89 retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
87
88 if (retval) 90 if (retval)
89 return retval; 91 return retval;
92
90 return count; 93 return count;
91} 94}
92 95
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
new file mode 100644
index 000000000000..9046f7b2ed79
--- /dev/null
+++ b/drivers/oprofile/oprofile_perf.c
@@ -0,0 +1,328 @@
1/*
2 * Copyright 2010 ARM Ltd.
3 *
4 * Perf-events backend for OProfile.
5 */
6#include <linux/perf_event.h>
7#include <linux/platform_device.h>
8#include <linux/oprofile.h>
9#include <linux/slab.h>
10
11/*
12 * Per performance monitor configuration as set via oprofilefs.
13 */
14struct op_counter_config {
15 unsigned long count;
16 unsigned long enabled;
17 unsigned long event;
18 unsigned long unit_mask;
19 unsigned long kernel;
20 unsigned long user;
21 struct perf_event_attr attr;
22};
23
24static int oprofile_perf_enabled;
25static DEFINE_MUTEX(oprofile_perf_mutex);
26
27static struct op_counter_config *counter_config;
28static struct perf_event **perf_events[nr_cpumask_bits];
29static int num_counters;
30
31/*
32 * Overflow callback for oprofile.
33 */
34static void op_overflow_handler(struct perf_event *event, int unused,
35 struct perf_sample_data *data, struct pt_regs *regs)
36{
37 int id;
38 u32 cpu = smp_processor_id();
39
40 for (id = 0; id < num_counters; ++id)
41 if (perf_events[cpu][id] == event)
42 break;
43
44 if (id != num_counters)
45 oprofile_add_sample(regs, id);
46 else
47 pr_warning("oprofile: ignoring spurious overflow "
48 "on cpu %u\n", cpu);
49}
50
51/*
52 * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
53 * settings in counter_config. Attributes are created as `pinned' events and
54 * so are permanently scheduled on the PMU.
55 */
56static void op_perf_setup(void)
57{
58 int i;
59 u32 size = sizeof(struct perf_event_attr);
60 struct perf_event_attr *attr;
61
62 for (i = 0; i < num_counters; ++i) {
63 attr = &counter_config[i].attr;
64 memset(attr, 0, size);
65 attr->type = PERF_TYPE_RAW;
66 attr->size = size;
67 attr->config = counter_config[i].event;
68 attr->sample_period = counter_config[i].count;
69 attr->pinned = 1;
70 }
71}
72
73static int op_create_counter(int cpu, int event)
74{
75 struct perf_event *pevent;
76
77 if (!counter_config[event].enabled || perf_events[cpu][event])
78 return 0;
79
80 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
81 cpu, NULL,
82 op_overflow_handler);
83
84 if (IS_ERR(pevent))
85 return PTR_ERR(pevent);
86
87 if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
88 perf_event_release_kernel(pevent);
89 pr_warning("oprofile: failed to enable event %d "
90 "on CPU %d\n", event, cpu);
91 return -EBUSY;
92 }
93
94 perf_events[cpu][event] = pevent;
95
96 return 0;
97}
98
99static void op_destroy_counter(int cpu, int event)
100{
101 struct perf_event *pevent = perf_events[cpu][event];
102
103 if (pevent) {
104 perf_event_release_kernel(pevent);
105 perf_events[cpu][event] = NULL;
106 }
107}
108
109/*
110 * Called by oprofile_perf_start to create active perf events based on the
111 * perviously configured attributes.
112 */
113static int op_perf_start(void)
114{
115 int cpu, event, ret = 0;
116
117 for_each_online_cpu(cpu) {
118 for (event = 0; event < num_counters; ++event) {
119 ret = op_create_counter(cpu, event);
120 if (ret)
121 return ret;
122 }
123 }
124
125 return ret;
126}
127
128/*
129 * Called by oprofile_perf_stop at the end of a profiling run.
130 */
131static void op_perf_stop(void)
132{
133 int cpu, event;
134
135 for_each_online_cpu(cpu)
136 for (event = 0; event < num_counters; ++event)
137 op_destroy_counter(cpu, event);
138}
139
140static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
141{
142 unsigned int i;
143
144 for (i = 0; i < num_counters; i++) {
145 struct dentry *dir;
146 char buf[4];
147
148 snprintf(buf, sizeof buf, "%d", i);
149 dir = oprofilefs_mkdir(sb, root, buf);
150 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
151 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
152 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
153 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
154 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
155 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
156 }
157
158 return 0;
159}
160
161static int oprofile_perf_setup(void)
162{
163 spin_lock(&oprofilefs_lock);
164 op_perf_setup();
165 spin_unlock(&oprofilefs_lock);
166 return 0;
167}
168
169static int oprofile_perf_start(void)
170{
171 int ret = -EBUSY;
172
173 mutex_lock(&oprofile_perf_mutex);
174 if (!oprofile_perf_enabled) {
175 ret = 0;
176 op_perf_start();
177 oprofile_perf_enabled = 1;
178 }
179 mutex_unlock(&oprofile_perf_mutex);
180 return ret;
181}
182
183static void oprofile_perf_stop(void)
184{
185 mutex_lock(&oprofile_perf_mutex);
186 if (oprofile_perf_enabled)
187 op_perf_stop();
188 oprofile_perf_enabled = 0;
189 mutex_unlock(&oprofile_perf_mutex);
190}
191
192#ifdef CONFIG_PM
193
194static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
195{
196 mutex_lock(&oprofile_perf_mutex);
197 if (oprofile_perf_enabled)
198 op_perf_stop();
199 mutex_unlock(&oprofile_perf_mutex);
200 return 0;
201}
202
203static int oprofile_perf_resume(struct platform_device *dev)
204{
205 mutex_lock(&oprofile_perf_mutex);
206 if (oprofile_perf_enabled && op_perf_start())
207 oprofile_perf_enabled = 0;
208 mutex_unlock(&oprofile_perf_mutex);
209 return 0;
210}
211
212static struct platform_driver oprofile_driver = {
213 .driver = {
214 .name = "oprofile-perf",
215 },
216 .resume = oprofile_perf_resume,
217 .suspend = oprofile_perf_suspend,
218};
219
220static struct platform_device *oprofile_pdev;
221
222static int __init init_driverfs(void)
223{
224 int ret;
225
226 ret = platform_driver_register(&oprofile_driver);
227 if (ret)
228 return ret;
229
230 oprofile_pdev = platform_device_register_simple(
231 oprofile_driver.driver.name, 0, NULL, 0);
232 if (IS_ERR(oprofile_pdev)) {
233 ret = PTR_ERR(oprofile_pdev);
234 platform_driver_unregister(&oprofile_driver);
235 }
236
237 return ret;
238}
239
240static void exit_driverfs(void)
241{
242 platform_device_unregister(oprofile_pdev);
243 platform_driver_unregister(&oprofile_driver);
244}
245
246#else
247
248static inline int init_driverfs(void) { return 0; }
249static inline void exit_driverfs(void) { }
250
251#endif /* CONFIG_PM */
252
253void oprofile_perf_exit(void)
254{
255 int cpu, id;
256 struct perf_event *event;
257
258 for_each_possible_cpu(cpu) {
259 for (id = 0; id < num_counters; ++id) {
260 event = perf_events[cpu][id];
261 if (event)
262 perf_event_release_kernel(event);
263 }
264
265 kfree(perf_events[cpu]);
266 }
267
268 kfree(counter_config);
269 exit_driverfs();
270}
271
272int __init oprofile_perf_init(struct oprofile_operations *ops)
273{
274 int cpu, ret = 0;
275
276 ret = init_driverfs();
277 if (ret)
278 return ret;
279
280 memset(&perf_events, 0, sizeof(perf_events));
281
282 num_counters = perf_num_counters();
283 if (num_counters <= 0) {
284 pr_info("oprofile: no performance counters\n");
285 ret = -ENODEV;
286 goto out;
287 }
288
289 counter_config = kcalloc(num_counters,
290 sizeof(struct op_counter_config), GFP_KERNEL);
291
292 if (!counter_config) {
293 pr_info("oprofile: failed to allocate %d "
294 "counters\n", num_counters);
295 ret = -ENOMEM;
296 num_counters = 0;
297 goto out;
298 }
299
300 for_each_possible_cpu(cpu) {
301 perf_events[cpu] = kcalloc(num_counters,
302 sizeof(struct perf_event *), GFP_KERNEL);
303 if (!perf_events[cpu]) {
304 pr_info("oprofile: failed to allocate %d perf events "
305 "for cpu %d\n", num_counters, cpu);
306 ret = -ENOMEM;
307 goto out;
308 }
309 }
310
311 ops->create_files = oprofile_perf_create_files;
312 ops->setup = oprofile_perf_setup;
313 ops->start = oprofile_perf_start;
314 ops->stop = oprofile_perf_stop;
315 ops->shutdown = oprofile_perf_stop;
316 ops->cpu_type = op_name_from_perf_id();
317
318 if (!ops->cpu_type)
319 ret = -ENODEV;
320 else
321 pr_info("oprofile: using %s\n", ops->cpu_type);
322
323out:
324 if (ret)
325 oprofile_perf_exit();
326
327 return ret;
328}
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 2766a6d3c2e9..1944621930d9 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -91,16 +91,20 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
91 91
92static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) 92static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
93{ 93{
94 unsigned long *value = file->private_data; 94 unsigned long value;
95 int retval; 95 int retval;
96 96
97 if (*offset) 97 if (*offset)
98 return -EINVAL; 98 return -EINVAL;
99 99
100 retval = oprofilefs_ulong_from_user(value, buf, count); 100 retval = oprofilefs_ulong_from_user(&value, buf, count);
101 if (retval)
102 return retval;
101 103
104 retval = oprofile_set_ulong(file->private_data, value);
102 if (retval) 105 if (retval)
103 return retval; 106 return retval;
107
104 return count; 108 return count;
105} 109}
106 110
@@ -126,50 +130,41 @@ static const struct file_operations ulong_ro_fops = {
126}; 130};
127 131
128 132
129static struct dentry *__oprofilefs_create_file(struct super_block *sb, 133static int __oprofilefs_create_file(struct super_block *sb,
130 struct dentry *root, char const *name, const struct file_operations *fops, 134 struct dentry *root, char const *name, const struct file_operations *fops,
131 int perm) 135 int perm, void *priv)
132{ 136{
133 struct dentry *dentry; 137 struct dentry *dentry;
134 struct inode *inode; 138 struct inode *inode;
135 139
136 dentry = d_alloc_name(root, name); 140 dentry = d_alloc_name(root, name);
137 if (!dentry) 141 if (!dentry)
138 return NULL; 142 return -ENOMEM;
139 inode = oprofilefs_get_inode(sb, S_IFREG | perm); 143 inode = oprofilefs_get_inode(sb, S_IFREG | perm);
140 if (!inode) { 144 if (!inode) {
141 dput(dentry); 145 dput(dentry);
142 return NULL; 146 return -ENOMEM;
143 } 147 }
144 inode->i_fop = fops; 148 inode->i_fop = fops;
145 d_add(dentry, inode); 149 d_add(dentry, inode);
146 return dentry; 150 dentry->d_inode->i_private = priv;
151 return 0;
147} 152}
148 153
149 154
150int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root, 155int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
151 char const *name, unsigned long *val) 156 char const *name, unsigned long *val)
152{ 157{
153 struct dentry *d = __oprofilefs_create_file(sb, root, name, 158 return __oprofilefs_create_file(sb, root, name,
154 &ulong_fops, 0644); 159 &ulong_fops, 0644, val);
155 if (!d)
156 return -EFAULT;
157
158 d->d_inode->i_private = val;
159 return 0;
160} 160}
161 161
162 162
163int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root, 163int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
164 char const *name, unsigned long *val) 164 char const *name, unsigned long *val)
165{ 165{
166 struct dentry *d = __oprofilefs_create_file(sb, root, name, 166 return __oprofilefs_create_file(sb, root, name,
167 &ulong_ro_fops, 0444); 167 &ulong_ro_fops, 0444, val);
168 if (!d)
169 return -EFAULT;
170
171 d->d_inode->i_private = val;
172 return 0;
173} 168}
174 169
175 170
@@ -189,31 +184,22 @@ static const struct file_operations atomic_ro_fops = {
189int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, 184int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
190 char const *name, atomic_t *val) 185 char const *name, atomic_t *val)
191{ 186{
192 struct dentry *d = __oprofilefs_create_file(sb, root, name, 187 return __oprofilefs_create_file(sb, root, name,
193 &atomic_ro_fops, 0444); 188 &atomic_ro_fops, 0444, val);
194 if (!d)
195 return -EFAULT;
196
197 d->d_inode->i_private = val;
198 return 0;
199} 189}
200 190
201 191
202int oprofilefs_create_file(struct super_block *sb, struct dentry *root, 192int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
203 char const *name, const struct file_operations *fops) 193 char const *name, const struct file_operations *fops)
204{ 194{
205 if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) 195 return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
206 return -EFAULT;
207 return 0;
208} 196}
209 197
210 198
211int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root, 199int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
212 char const *name, const struct file_operations *fops, int perm) 200 char const *name, const struct file_operations *fops, int perm)
213{ 201{
214 if (!__oprofilefs_create_file(sb, root, name, fops, perm)) 202 return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
215 return -EFAULT;
216 return 0;
217} 203}
218 204
219 205
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 5171639ecf0f..32fb81212fd1 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/init.h>
18#include <asm/atomic.h> 19#include <asm/atomic.h>
19 20
20/* Each escaped entry is prefixed by ESCAPE_CODE 21/* Each escaped entry is prefixed by ESCAPE_CODE
@@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
185int oprofile_add_data64(struct op_entry *entry, u64 val); 186int oprofile_add_data64(struct op_entry *entry, u64 val);
186int oprofile_write_commit(struct op_entry *entry); 187int oprofile_write_commit(struct op_entry *entry);
187 188
189#ifdef CONFIG_PERF_EVENTS
190int __init oprofile_perf_init(struct oprofile_operations *ops);
191void oprofile_perf_exit(void);
192char *op_name_from_perf_id(void);
193#endif /* CONFIG_PERF_EVENTS */
194
188#endif /* OPROFILE_H */ 195#endif /* OPROFILE_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 61b1e2d760fd..a9227e985207 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -890,6 +890,8 @@ struct perf_output_handle {
890extern int perf_pmu_register(struct pmu *pmu); 890extern int perf_pmu_register(struct pmu *pmu);
891extern void perf_pmu_unregister(struct pmu *pmu); 891extern void perf_pmu_unregister(struct pmu *pmu);
892 892
893extern int perf_num_counters(void);
894extern const char *perf_pmu_name(void);
893extern void perf_event_task_sched_in(struct task_struct *task); 895extern void perf_event_task_sched_in(struct task_struct *task);
894extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 896extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
895extern int perf_event_init_task(struct task_struct *child); 897extern int perf_event_init_task(struct task_struct *child);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 64507eaa2d9e..1ec3916ffef0 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -63,6 +63,11 @@ static atomic64_t perf_event_id;
63 63
64void __weak perf_event_print_debug(void) { } 64void __weak perf_event_print_debug(void) { }
65 65
66extern __weak const char *perf_pmu_name(void)
67{
68 return "pmu";
69}
70
66void perf_pmu_disable(struct pmu *pmu) 71void perf_pmu_disable(struct pmu *pmu)
67{ 72{
68 int *count = this_cpu_ptr(pmu->pmu_disable_count); 73 int *count = this_cpu_ptr(pmu->pmu_disable_count);