aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/perf_event_cpu.c47
-rw-r--r--arch/arm/kernel/perf_event_v6.c72
-rw-r--r--arch/arm/kernel/perf_event_v7.c106
-rw-r--r--arch/arm/kernel/perf_event_xscale.c72
4 files changed, 153 insertions, 144 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d7d8d4de9d6..3863fd405fa1 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/slab.h>
26#include <linux/spinlock.h> 27#include <linux/spinlock.h>
27 28
28#include <asm/cputype.h> 29#include <asm/cputype.h>
@@ -195,13 +196,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
195/* 196/*
196 * CPU PMU identification and probing. 197 * CPU PMU identification and probing.
197 */ 198 */
198static struct arm_pmu *__devinit probe_current_pmu(void) 199static int __devinit probe_current_pmu(struct arm_pmu *pmu)
199{ 200{
200 struct arm_pmu *pmu = NULL;
201 int cpu = get_cpu(); 201 int cpu = get_cpu();
202 unsigned long cpuid = read_cpuid_id(); 202 unsigned long cpuid = read_cpuid_id();
203 unsigned long implementor = (cpuid & 0xFF000000) >> 24; 203 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
204 unsigned long part_number = (cpuid & 0xFFF0); 204 unsigned long part_number = (cpuid & 0xFFF0);
205 int ret = -ENODEV;
205 206
206 pr_info("probing PMU on CPU %d\n", cpu); 207 pr_info("probing PMU on CPU %d\n", cpu);
207 208
@@ -211,25 +212,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
211 case 0xB360: /* ARM1136 */ 212 case 0xB360: /* ARM1136 */
212 case 0xB560: /* ARM1156 */ 213 case 0xB560: /* ARM1156 */
213 case 0xB760: /* ARM1176 */ 214 case 0xB760: /* ARM1176 */
214 pmu = armv6pmu_init(); 215 ret = armv6pmu_init(pmu);
215 break; 216 break;
216 case 0xB020: /* ARM11mpcore */ 217 case 0xB020: /* ARM11mpcore */
217 pmu = armv6mpcore_pmu_init(); 218 ret = armv6mpcore_pmu_init(pmu);
218 break; 219 break;
219 case 0xC080: /* Cortex-A8 */ 220 case 0xC080: /* Cortex-A8 */
220 pmu = armv7_a8_pmu_init(); 221 ret = armv7_a8_pmu_init(pmu);
221 break; 222 break;
222 case 0xC090: /* Cortex-A9 */ 223 case 0xC090: /* Cortex-A9 */
223 pmu = armv7_a9_pmu_init(); 224 ret = armv7_a9_pmu_init(pmu);
224 break; 225 break;
225 case 0xC050: /* Cortex-A5 */ 226 case 0xC050: /* Cortex-A5 */
226 pmu = armv7_a5_pmu_init(); 227 ret = armv7_a5_pmu_init(pmu);
227 break; 228 break;
228 case 0xC0F0: /* Cortex-A15 */ 229 case 0xC0F0: /* Cortex-A15 */
229 pmu = armv7_a15_pmu_init(); 230 ret = armv7_a15_pmu_init(pmu);
230 break; 231 break;
231 case 0xC070: /* Cortex-A7 */ 232 case 0xC070: /* Cortex-A7 */
232 pmu = armv7_a7_pmu_init(); 233 ret = armv7_a7_pmu_init(pmu);
233 break; 234 break;
234 } 235 }
235 /* Intel CPUs [xscale]. */ 236 /* Intel CPUs [xscale]. */
@@ -237,39 +238,51 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
237 part_number = (cpuid >> 13) & 0x7; 238 part_number = (cpuid >> 13) & 0x7;
238 switch (part_number) { 239 switch (part_number) {
239 case 1: 240 case 1:
240 pmu = xscale1pmu_init(); 241 ret = xscale1pmu_init(pmu);
241 break; 242 break;
242 case 2: 243 case 2:
243 pmu = xscale2pmu_init(); 244 ret = xscale2pmu_init(pmu);
244 break; 245 break;
245 } 246 }
246 } 247 }
247 248
248 put_cpu(); 249 put_cpu();
249 return pmu; 250 return ret;
250} 251}
251 252
252static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) 253static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
253{ 254{
254 const struct of_device_id *of_id; 255 const struct of_device_id *of_id;
255 struct arm_pmu *(*init_fn)(void); 256 int (*init_fn)(struct arm_pmu *);
256 struct device_node *node = pdev->dev.of_node; 257 struct device_node *node = pdev->dev.of_node;
258 struct arm_pmu *pmu;
259 int ret = -ENODEV;
257 260
258 if (cpu_pmu) { 261 if (cpu_pmu) {
259 pr_info("attempt to register multiple PMU devices!"); 262 pr_info("attempt to register multiple PMU devices!");
260 return -ENOSPC; 263 return -ENOSPC;
261 } 264 }
262 265
266 pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
267 if (!pmu) {
268 pr_info("failed to allocate PMU device!");
269 return -ENOMEM;
270 }
271
263 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { 272 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
264 init_fn = of_id->data; 273 init_fn = of_id->data;
265 cpu_pmu = init_fn(); 274 ret = init_fn(pmu);
266 } else { 275 } else {
267 cpu_pmu = probe_current_pmu(); 276 ret = probe_current_pmu(pmu);
268 } 277 }
269 278
270 if (!cpu_pmu) 279 if (ret) {
271 return -ENODEV; 280 pr_info("failed to register PMU devices!");
281 kfree(pmu);
282 return ret;
283 }
272 284
285 cpu_pmu = pmu;
273 cpu_pmu->plat_device = pdev; 286 cpu_pmu->plat_device = pdev;
274 cpu_pmu_init(cpu_pmu); 287 cpu_pmu_init(cpu_pmu);
275 register_cpu_notifier(&cpu_pmu_hotplug_notifier); 288 register_cpu_notifier(&cpu_pmu_hotplug_notifier);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 6ccc07971745..3908cb4e5566 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -649,24 +649,22 @@ static int armv6_map_event(struct perf_event *event)
649 &armv6_perf_cache_map, 0xFF); 649 &armv6_perf_cache_map, 0xFF);
650} 650}
651 651
652static struct arm_pmu armv6pmu = { 652static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
653 .name = "v6",
654 .handle_irq = armv6pmu_handle_irq,
655 .enable = armv6pmu_enable_event,
656 .disable = armv6pmu_disable_event,
657 .read_counter = armv6pmu_read_counter,
658 .write_counter = armv6pmu_write_counter,
659 .get_event_idx = armv6pmu_get_event_idx,
660 .start = armv6pmu_start,
661 .stop = armv6pmu_stop,
662 .map_event = armv6_map_event,
663 .num_events = 3,
664 .max_period = (1LLU << 32) - 1,
665};
666
667static struct arm_pmu *__devinit armv6pmu_init(void)
668{ 653{
669 return &armv6pmu; 654 cpu_pmu->name = "v6";
655 cpu_pmu->handle_irq = armv6pmu_handle_irq;
656 cpu_pmu->enable = armv6pmu_enable_event;
657 cpu_pmu->disable = armv6pmu_disable_event;
658 cpu_pmu->read_counter = armv6pmu_read_counter;
659 cpu_pmu->write_counter = armv6pmu_write_counter;
660 cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
661 cpu_pmu->start = armv6pmu_start;
662 cpu_pmu->stop = armv6pmu_stop;
663 cpu_pmu->map_event = armv6_map_event;
664 cpu_pmu->num_events = 3;
665 cpu_pmu->max_period = (1LLU << 32) - 1;
666
667 return 0;
670} 668}
671 669
672/* 670/*
@@ -683,33 +681,31 @@ static int armv6mpcore_map_event(struct perf_event *event)
683 &armv6mpcore_perf_cache_map, 0xFF); 681 &armv6mpcore_perf_cache_map, 0xFF);
684} 682}
685 683
686static struct arm_pmu armv6mpcore_pmu = { 684static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
687 .name = "v6mpcore",
688 .handle_irq = armv6pmu_handle_irq,
689 .enable = armv6pmu_enable_event,
690 .disable = armv6mpcore_pmu_disable_event,
691 .read_counter = armv6pmu_read_counter,
692 .write_counter = armv6pmu_write_counter,
693 .get_event_idx = armv6pmu_get_event_idx,
694 .start = armv6pmu_start,
695 .stop = armv6pmu_stop,
696 .map_event = armv6mpcore_map_event,
697 .num_events = 3,
698 .max_period = (1LLU << 32) - 1,
699};
700
701static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
702{ 685{
703 return &armv6mpcore_pmu; 686 cpu_pmu->name = "v6mpcore";
687 cpu_pmu->handle_irq = armv6pmu_handle_irq;
688 cpu_pmu->enable = armv6pmu_enable_event;
689 cpu_pmu->disable = armv6mpcore_pmu_disable_event;
690 cpu_pmu->read_counter = armv6pmu_read_counter;
691 cpu_pmu->write_counter = armv6pmu_write_counter;
692 cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
693 cpu_pmu->start = armv6pmu_start;
694 cpu_pmu->stop = armv6pmu_stop;
695 cpu_pmu->map_event = armv6mpcore_map_event;
696 cpu_pmu->num_events = 3;
697 cpu_pmu->max_period = (1LLU << 32) - 1;
698
699 return 0;
704} 700}
705#else 701#else
706static struct arm_pmu *__devinit armv6pmu_init(void) 702static int armv6pmu_init(struct arm_pmu *cpu_pmu)
707{ 703{
708 return NULL; 704 return -ENODEV;
709} 705}
710 706
711static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) 707static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
712{ 708{
713 return NULL; 709 return -ENODEV;
714} 710}
715#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ 711#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index bd4b090ebcfd..b189403f30e4 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -18,8 +18,6 @@
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20 20
21static struct arm_pmu armv7pmu;
22
23/* 21/*
24 * Common ARMv7 event types 22 * Common ARMv7 event types
25 * 23 *
@@ -1014,7 +1012,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1014 * We only need to set the event for the cycle counter if we 1012 * We only need to set the event for the cycle counter if we
1015 * have the ability to perform event filtering. 1013 * have the ability to perform event filtering.
1016 */ 1014 */
1017 if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) 1015 if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1018 armv7_pmnc_write_evtsel(idx, hwc->config_base); 1016 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1019 1017
1020 /* 1018 /*
@@ -1232,17 +1230,18 @@ static int armv7_a7_map_event(struct perf_event *event)
1232 &armv7_a7_perf_cache_map, 0xFF); 1230 &armv7_a7_perf_cache_map, 0xFF);
1233} 1231}
1234 1232
1235static struct arm_pmu armv7pmu = { 1233static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1236 .handle_irq = armv7pmu_handle_irq, 1234{
1237 .enable = armv7pmu_enable_event, 1235 cpu_pmu->handle_irq = armv7pmu_handle_irq;
1238 .disable = armv7pmu_disable_event, 1236 cpu_pmu->enable = armv7pmu_enable_event;
1239 .read_counter = armv7pmu_read_counter, 1237 cpu_pmu->disable = armv7pmu_disable_event;
1240 .write_counter = armv7pmu_write_counter, 1238 cpu_pmu->read_counter = armv7pmu_read_counter;
1241 .get_event_idx = armv7pmu_get_event_idx, 1239 cpu_pmu->write_counter = armv7pmu_write_counter;
1242 .start = armv7pmu_start, 1240 cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
1243 .stop = armv7pmu_stop, 1241 cpu_pmu->start = armv7pmu_start;
1244 .reset = armv7pmu_reset, 1242 cpu_pmu->stop = armv7pmu_stop;
1245 .max_period = (1LLU << 32) - 1, 1243 cpu_pmu->reset = armv7pmu_reset;
1244 cpu_pmu->max_period = (1LLU << 32) - 1;
1246}; 1245};
1247 1246
1248static u32 __devinit armv7_read_num_pmnc_events(void) 1247static u32 __devinit armv7_read_num_pmnc_events(void)
@@ -1256,70 +1255,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void)
1256 return nb_cnt + 1; 1255 return nb_cnt + 1;
1257} 1256}
1258 1257
1259static struct arm_pmu *__devinit armv7_a8_pmu_init(void) 1258static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1260{ 1259{
1261 armv7pmu.name = "ARMv7 Cortex-A8"; 1260 armv7pmu_init(cpu_pmu);
1262 armv7pmu.map_event = armv7_a8_map_event; 1261 cpu_pmu->name = "ARMv7 Cortex-A8";
1263 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1262 cpu_pmu->map_event = armv7_a8_map_event;
1264 return &armv7pmu; 1263 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1264 return 0;
1265} 1265}
1266 1266
1267static struct arm_pmu *__devinit armv7_a9_pmu_init(void) 1267static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1268{ 1268{
1269 armv7pmu.name = "ARMv7 Cortex-A9"; 1269 armv7pmu_init(cpu_pmu);
1270 armv7pmu.map_event = armv7_a9_map_event; 1270 cpu_pmu->name = "ARMv7 Cortex-A9";
1271 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1271 cpu_pmu->map_event = armv7_a9_map_event;
1272 return &armv7pmu; 1272 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1273 return 0;
1273} 1274}
1274 1275
1275static struct arm_pmu *__devinit armv7_a5_pmu_init(void) 1276static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1276{ 1277{
1277 armv7pmu.name = "ARMv7 Cortex-A5"; 1278 armv7pmu_init(cpu_pmu);
1278 armv7pmu.map_event = armv7_a5_map_event; 1279 cpu_pmu->name = "ARMv7 Cortex-A5";
1279 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1280 cpu_pmu->map_event = armv7_a5_map_event;
1280 return &armv7pmu; 1281 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1282 return 0;
1281} 1283}
1282 1284
1283static struct arm_pmu *__devinit armv7_a15_pmu_init(void) 1285static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1284{ 1286{
1285 armv7pmu.name = "ARMv7 Cortex-A15"; 1287 armv7pmu_init(cpu_pmu);
1286 armv7pmu.map_event = armv7_a15_map_event; 1288 cpu_pmu->name = "ARMv7 Cortex-A15";
1287 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1289 cpu_pmu->map_event = armv7_a15_map_event;
1288 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1290 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1289 return &armv7pmu; 1291 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1292 return 0;
1290} 1293}
1291 1294
1292static struct arm_pmu *__devinit armv7_a7_pmu_init(void) 1295static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1293{ 1296{
1294 armv7pmu.name = "ARMv7 Cortex-A7"; 1297 armv7pmu_init(cpu_pmu);
1295 armv7pmu.map_event = armv7_a7_map_event; 1298 cpu_pmu->name = "ARMv7 Cortex-A7";
1296 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1299 cpu_pmu->map_event = armv7_a7_map_event;
1297 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1300 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1298 return &armv7pmu; 1301 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1302 return 0;
1299} 1303}
1300#else 1304#else
1301static struct arm_pmu *__devinit armv7_a8_pmu_init(void) 1305static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1302{ 1306{
1303 return NULL; 1307 return -ENODEV;
1304} 1308}
1305 1309
1306static struct arm_pmu *__devinit armv7_a9_pmu_init(void) 1310static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1307{ 1311{
1308 return NULL; 1312 return -ENODEV;
1309} 1313}
1310 1314
1311static struct arm_pmu *__devinit armv7_a5_pmu_init(void) 1315static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1312{ 1316{
1313 return NULL; 1317 return -ENODEV;
1314} 1318}
1315 1319
1316static struct arm_pmu *__devinit armv7_a15_pmu_init(void) 1320static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1317{ 1321{
1318 return NULL; 1322 return -ENODEV;
1319} 1323}
1320 1324
1321static struct arm_pmu *__devinit armv7_a7_pmu_init(void) 1325static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1322{ 1326{
1323 return NULL; 1327 return -ENODEV;
1324} 1328}
1325#endif /* CONFIG_CPU_V7 */ 1329#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 426e19f380a2..131ede6c2fdf 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -434,24 +434,22 @@ static int xscale_map_event(struct perf_event *event)
434 &xscale_perf_cache_map, 0xFF); 434 &xscale_perf_cache_map, 0xFF);
435} 435}
436 436
437static struct arm_pmu xscale1pmu = { 437static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu)
438 .name = "xscale1",
439 .handle_irq = xscale1pmu_handle_irq,
440 .enable = xscale1pmu_enable_event,
441 .disable = xscale1pmu_disable_event,
442 .read_counter = xscale1pmu_read_counter,
443 .write_counter = xscale1pmu_write_counter,
444 .get_event_idx = xscale1pmu_get_event_idx,
445 .start = xscale1pmu_start,
446 .stop = xscale1pmu_stop,
447 .map_event = xscale_map_event,
448 .num_events = 3,
449 .max_period = (1LLU << 32) - 1,
450};
451
452static struct arm_pmu *__devinit xscale1pmu_init(void)
453{ 438{
454 return &xscale1pmu; 439 cpu_pmu->name = "xscale1";
440 cpu_pmu->handle_irq = xscale1pmu_handle_irq;
441 cpu_pmu->enable = xscale1pmu_enable_event;
442 cpu_pmu->disable = xscale1pmu_disable_event;
443 cpu_pmu->read_counter = xscale1pmu_read_counter;
444 cpu_pmu->write_counter = xscale1pmu_write_counter;
445 cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
446 cpu_pmu->start = xscale1pmu_start;
447 cpu_pmu->stop = xscale1pmu_stop;
448 cpu_pmu->map_event = xscale_map_event;
449 cpu_pmu->num_events = 3;
450 cpu_pmu->max_period = (1LLU << 32) - 1;
451
452 return 0;
455} 453}
456 454
457#define XSCALE2_OVERFLOWED_MASK 0x01f 455#define XSCALE2_OVERFLOWED_MASK 0x01f
@@ -801,33 +799,31 @@ xscale2pmu_write_counter(int counter, u32 val)
801 } 799 }
802} 800}
803 801
804static struct arm_pmu xscale2pmu = { 802static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu)
805 .name = "xscale2",
806 .handle_irq = xscale2pmu_handle_irq,
807 .enable = xscale2pmu_enable_event,
808 .disable = xscale2pmu_disable_event,
809 .read_counter = xscale2pmu_read_counter,
810 .write_counter = xscale2pmu_write_counter,
811 .get_event_idx = xscale2pmu_get_event_idx,
812 .start = xscale2pmu_start,
813 .stop = xscale2pmu_stop,
814 .map_event = xscale_map_event,
815 .num_events = 5,
816 .max_period = (1LLU << 32) - 1,
817};
818
819static struct arm_pmu *__devinit xscale2pmu_init(void)
820{ 803{
821 return &xscale2pmu; 804 cpu_pmu->name = "xscale2";
805 cpu_pmu->handle_irq = xscale2pmu_handle_irq;
806 cpu_pmu->enable = xscale2pmu_enable_event;
807 cpu_pmu->disable = xscale2pmu_disable_event;
808 cpu_pmu->read_counter = xscale2pmu_read_counter;
809 cpu_pmu->write_counter = xscale2pmu_write_counter;
810 cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
811 cpu_pmu->start = xscale2pmu_start;
812 cpu_pmu->stop = xscale2pmu_stop;
813 cpu_pmu->map_event = xscale_map_event;
814 cpu_pmu->num_events = 5;
815 cpu_pmu->max_period = (1LLU << 32) - 1;
816
817 return 0;
822} 818}
823#else 819#else
824static struct arm_pmu *__devinit xscale1pmu_init(void) 820static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu)
825{ 821{
826 return NULL; 822 return -ENODEV;
827} 823}
828 824
829static struct arm_pmu *__devinit xscale2pmu_init(void) 825static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu)
830{ 826{
831 return NULL; 827 return -ENODEV;
832} 828}
833#endif /* CONFIG_CPU_XSCALE */ 829#endif /* CONFIG_CPU_XSCALE */