aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2011-05-17 06:20:11 -0400
committerWill Deacon <will.deacon@arm.com>2011-08-31 05:50:12 -0400
commit8be3f9a2385f91f7bf5c58f351e24b9247898e8f (patch)
tree5e293c7702f36b7eafd611bd5e8e710719643ac4 /arch/arm/kernel
parent3fc2c83087717dc88003428245d97b9d432fff2d (diff)
ARM: perf: remove cpu-related misnomers
Currently struct cpu_hw_events stores data on events running on a PMU associated with a CPU. As this data is general enough to be used for system PMUs, this name is a misnomer, and may cause confusion when it is used for system PMUs. Additionally, 'armpmu' is commonly used as a parameter name for an instance of struct arm_pmu. The name is also used for a global instance which represents the CPU's PMU. As cpu_hw_events is now not tied to CPU PMUs, it is renamed to pmu_hw_events, with instances of it renamed similarly. As the global 'armpmu' is CPU-specfic, it is renamed to cpu_pmu. This should make it clearer which code is generic, and which is coupled with the CPU. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Jamie Iles <jamie@jamieiles.com> Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/perf_event.c84
-rw-r--r--arch/arm/kernel/perf_event_v6.c18
-rw-r--r--arch/arm/kernel/perf_event_v7.c22
-rw-r--r--arch/arm/kernel/perf_event_xscale.c32
4 files changed, 78 insertions, 78 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e1db55500784..831513342d53 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -37,10 +37,10 @@
37 */ 37 */
38#define ARMPMU_MAX_HWEVENTS 32 38#define ARMPMU_MAX_HWEVENTS 32
39 39
40/* The events for a given CPU. */ 40/* The events for a given PMU register set. */
41struct cpu_hw_events { 41struct pmu_hw_events {
42 /* 42 /*
43 * The events that are active on the CPU for the given index. 43 * The events that are active on the PMU for the given index.
44 */ 44 */
45 struct perf_event **events; 45 struct perf_event **events;
46 46
@@ -59,7 +59,7 @@ struct cpu_hw_events {
59 59
60static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); 60static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
61static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); 61static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
62static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 62static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
63 63
64struct arm_pmu { 64struct arm_pmu {
65 struct pmu pmu; 65 struct pmu pmu;
@@ -70,7 +70,7 @@ struct arm_pmu {
70 irqreturn_t (*handle_irq)(int irq_num, void *dev); 70 irqreturn_t (*handle_irq)(int irq_num, void *dev);
71 void (*enable)(struct hw_perf_event *evt, int idx); 71 void (*enable)(struct hw_perf_event *evt, int idx);
72 void (*disable)(struct hw_perf_event *evt, int idx); 72 void (*disable)(struct hw_perf_event *evt, int idx);
73 int (*get_event_idx)(struct cpu_hw_events *cpuc, 73 int (*get_event_idx)(struct pmu_hw_events *hw_events,
74 struct hw_perf_event *hwc); 74 struct hw_perf_event *hwc);
75 int (*set_event_filter)(struct hw_perf_event *evt, 75 int (*set_event_filter)(struct hw_perf_event *evt,
76 struct perf_event_attr *attr); 76 struct perf_event_attr *attr);
@@ -85,21 +85,21 @@ struct arm_pmu {
85 struct mutex reserve_mutex; 85 struct mutex reserve_mutex;
86 u64 max_period; 86 u64 max_period;
87 struct platform_device *plat_device; 87 struct platform_device *plat_device;
88 struct cpu_hw_events *(*get_hw_events)(void); 88 struct pmu_hw_events *(*get_hw_events)(void);
89}; 89};
90 90
91#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 91#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
92 92
93/* Set at runtime when we know what CPU type we are. */ 93/* Set at runtime when we know what CPU type we are. */
94static struct arm_pmu *armpmu; 94static struct arm_pmu *cpu_pmu;
95 95
96enum arm_perf_pmu_ids 96enum arm_perf_pmu_ids
97armpmu_get_pmu_id(void) 97armpmu_get_pmu_id(void)
98{ 98{
99 int id = -ENODEV; 99 int id = -ENODEV;
100 100
101 if (armpmu != NULL) 101 if (cpu_pmu != NULL)
102 id = armpmu->id; 102 id = cpu_pmu->id;
103 103
104 return id; 104 return id;
105} 105}
@@ -110,8 +110,8 @@ armpmu_get_max_events(void)
110{ 110{
111 int max_events = 0; 111 int max_events = 0;
112 112
113 if (armpmu != NULL) 113 if (cpu_pmu != NULL)
114 max_events = armpmu->num_events; 114 max_events = cpu_pmu->num_events;
115 115
116 return max_events; 116 return max_events;
117} 117}
@@ -319,15 +319,15 @@ static void
319armpmu_del(struct perf_event *event, int flags) 319armpmu_del(struct perf_event *event, int flags)
320{ 320{
321 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 321 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
322 struct cpu_hw_events *cpuc = armpmu->get_hw_events(); 322 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
323 struct hw_perf_event *hwc = &event->hw; 323 struct hw_perf_event *hwc = &event->hw;
324 int idx = hwc->idx; 324 int idx = hwc->idx;
325 325
326 WARN_ON(idx < 0); 326 WARN_ON(idx < 0);
327 327
328 armpmu_stop(event, PERF_EF_UPDATE); 328 armpmu_stop(event, PERF_EF_UPDATE);
329 cpuc->events[idx] = NULL; 329 hw_events->events[idx] = NULL;
330 clear_bit(idx, cpuc->used_mask); 330 clear_bit(idx, hw_events->used_mask);
331 331
332 perf_event_update_userpage(event); 332 perf_event_update_userpage(event);
333} 333}
@@ -336,7 +336,7 @@ static int
336armpmu_add(struct perf_event *event, int flags) 336armpmu_add(struct perf_event *event, int flags)
337{ 337{
338 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 338 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
339 struct cpu_hw_events *cpuc = armpmu->get_hw_events(); 339 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
340 struct hw_perf_event *hwc = &event->hw; 340 struct hw_perf_event *hwc = &event->hw;
341 int idx; 341 int idx;
342 int err = 0; 342 int err = 0;
@@ -344,7 +344,7 @@ armpmu_add(struct perf_event *event, int flags)
344 perf_pmu_disable(event->pmu); 344 perf_pmu_disable(event->pmu);
345 345
346 /* If we don't have a space for the counter then finish early. */ 346 /* If we don't have a space for the counter then finish early. */
347 idx = armpmu->get_event_idx(cpuc, hwc); 347 idx = armpmu->get_event_idx(hw_events, hwc);
348 if (idx < 0) { 348 if (idx < 0) {
349 err = idx; 349 err = idx;
350 goto out; 350 goto out;
@@ -356,7 +356,7 @@ armpmu_add(struct perf_event *event, int flags)
356 */ 356 */
357 event->hw.idx = idx; 357 event->hw.idx = idx;
358 armpmu->disable(hwc, idx); 358 armpmu->disable(hwc, idx);
359 cpuc->events[idx] = event; 359 hw_events->events[idx] = event;
360 360
361 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 361 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
362 if (flags & PERF_EF_START) 362 if (flags & PERF_EF_START)
@@ -371,7 +371,7 @@ out:
371} 371}
372 372
373static int 373static int
374validate_event(struct cpu_hw_events *cpuc, 374validate_event(struct pmu_hw_events *hw_events,
375 struct perf_event *event) 375 struct perf_event *event)
376{ 376{
377 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 377 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
@@ -381,14 +381,14 @@ validate_event(struct cpu_hw_events *cpuc,
381 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 381 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
382 return 1; 382 return 1;
383 383
384 return armpmu->get_event_idx(cpuc, &fake_event) >= 0; 384 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
385} 385}
386 386
387static int 387static int
388validate_group(struct perf_event *event) 388validate_group(struct perf_event *event)
389{ 389{
390 struct perf_event *sibling, *leader = event->group_leader; 390 struct perf_event *sibling, *leader = event->group_leader;
391 struct cpu_hw_events fake_pmu; 391 struct pmu_hw_events fake_pmu;
392 392
393 memset(&fake_pmu, 0, sizeof(fake_pmu)); 393 memset(&fake_pmu, 0, sizeof(fake_pmu));
394 394
@@ -604,13 +604,13 @@ static int armpmu_event_init(struct perf_event *event)
604 604
605static void armpmu_enable(struct pmu *pmu) 605static void armpmu_enable(struct pmu *pmu)
606{ 606{
607 struct arm_pmu *armpmu = to_arm_pmu(pmu);
608 /* Enable all of the perf events on hardware. */ 607 /* Enable all of the perf events on hardware. */
608 struct arm_pmu *armpmu = to_arm_pmu(pmu);
609 int idx, enabled = 0; 609 int idx, enabled = 0;
610 struct cpu_hw_events *cpuc = armpmu->get_hw_events(); 610 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
611 611
612 for (idx = 0; idx < armpmu->num_events; ++idx) { 612 for (idx = 0; idx < armpmu->num_events; ++idx) {
613 struct perf_event *event = cpuc->events[idx]; 613 struct perf_event *event = hw_events->events[idx];
614 614
615 if (!event) 615 if (!event)
616 continue; 616 continue;
@@ -662,13 +662,13 @@ static int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
662 * This requires SMP to be available, so exists as a separate initcall. 662 * This requires SMP to be available, so exists as a separate initcall.
663 */ 663 */
664static int __init 664static int __init
665armpmu_reset(void) 665cpu_pmu_reset(void)
666{ 666{
667 if (armpmu && armpmu->reset) 667 if (cpu_pmu && cpu_pmu->reset)
668 return on_each_cpu(armpmu->reset, NULL, 1); 668 return on_each_cpu(cpu_pmu->reset, NULL, 1);
669 return 0; 669 return 0;
670} 670}
671arch_initcall(armpmu_reset); 671arch_initcall(cpu_pmu_reset);
672 672
673/* 673/*
674 * PMU platform driver and devicetree bindings. 674 * PMU platform driver and devicetree bindings.
@@ -688,7 +688,7 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
688 688
689static int __devinit armpmu_device_probe(struct platform_device *pdev) 689static int __devinit armpmu_device_probe(struct platform_device *pdev)
690{ 690{
691 armpmu->plat_device = pdev; 691 cpu_pmu->plat_device = pdev;
692 return 0; 692 return 0;
693} 693}
694 694
@@ -707,7 +707,7 @@ static int __init register_pmu_driver(void)
707} 707}
708device_initcall(register_pmu_driver); 708device_initcall(register_pmu_driver);
709 709
710static struct cpu_hw_events *armpmu_get_cpu_events(void) 710static struct pmu_hw_events *armpmu_get_cpu_events(void)
711{ 711{
712 return &__get_cpu_var(cpu_hw_events); 712 return &__get_cpu_var(cpu_hw_events);
713} 713}
@@ -716,7 +716,7 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
716{ 716{
717 int cpu; 717 int cpu;
718 for_each_possible_cpu(cpu) { 718 for_each_possible_cpu(cpu) {
719 struct cpu_hw_events *events = &per_cpu(cpu_hw_events, cpu); 719 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
720 events->events = per_cpu(hw_events, cpu); 720 events->events = per_cpu(hw_events, cpu);
721 events->used_mask = per_cpu(used_mask, cpu); 721 events->used_mask = per_cpu(used_mask, cpu);
722 raw_spin_lock_init(&events->pmu_lock); 722 raw_spin_lock_init(&events->pmu_lock);
@@ -741,22 +741,22 @@ init_hw_perf_events(void)
741 case 0xB360: /* ARM1136 */ 741 case 0xB360: /* ARM1136 */
742 case 0xB560: /* ARM1156 */ 742 case 0xB560: /* ARM1156 */
743 case 0xB760: /* ARM1176 */ 743 case 0xB760: /* ARM1176 */
744 armpmu = armv6pmu_init(); 744 cpu_pmu = armv6pmu_init();
745 break; 745 break;
746 case 0xB020: /* ARM11mpcore */ 746 case 0xB020: /* ARM11mpcore */
747 armpmu = armv6mpcore_pmu_init(); 747 cpu_pmu = armv6mpcore_pmu_init();
748 break; 748 break;
749 case 0xC080: /* Cortex-A8 */ 749 case 0xC080: /* Cortex-A8 */
750 armpmu = armv7_a8_pmu_init(); 750 cpu_pmu = armv7_a8_pmu_init();
751 break; 751 break;
752 case 0xC090: /* Cortex-A9 */ 752 case 0xC090: /* Cortex-A9 */
753 armpmu = armv7_a9_pmu_init(); 753 cpu_pmu = armv7_a9_pmu_init();
754 break; 754 break;
755 case 0xC050: /* Cortex-A5 */ 755 case 0xC050: /* Cortex-A5 */
756 armpmu = armv7_a5_pmu_init(); 756 cpu_pmu = armv7_a5_pmu_init();
757 break; 757 break;
758 case 0xC0F0: /* Cortex-A15 */ 758 case 0xC0F0: /* Cortex-A15 */
759 armpmu = armv7_a15_pmu_init(); 759 cpu_pmu = armv7_a15_pmu_init();
760 break; 760 break;
761 } 761 }
762 /* Intel CPUs [xscale]. */ 762 /* Intel CPUs [xscale]. */
@@ -764,19 +764,19 @@ init_hw_perf_events(void)
764 part_number = (cpuid >> 13) & 0x7; 764 part_number = (cpuid >> 13) & 0x7;
765 switch (part_number) { 765 switch (part_number) {
766 case 1: 766 case 1:
767 armpmu = xscale1pmu_init(); 767 cpu_pmu = xscale1pmu_init();
768 break; 768 break;
769 case 2: 769 case 2:
770 armpmu = xscale2pmu_init(); 770 cpu_pmu = xscale2pmu_init();
771 break; 771 break;
772 } 772 }
773 } 773 }
774 774
775 if (armpmu) { 775 if (cpu_pmu) {
776 pr_info("enabled with %s PMU driver, %d counters available\n", 776 pr_info("enabled with %s PMU driver, %d counters available\n",
777 armpmu->name, armpmu->num_events); 777 cpu_pmu->name, cpu_pmu->num_events);
778 cpu_pmu_init(armpmu); 778 cpu_pmu_init(cpu_pmu);
779 armpmu_register(armpmu, "cpu", PERF_TYPE_RAW); 779 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
780 } else { 780 } else {
781 pr_info("no hardware support available\n"); 781 pr_info("no hardware support available\n");
782 } 782 }
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index a4c5aa9baa44..e63d8115c01b 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -433,7 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
433 int idx) 433 int idx)
434{ 434{
435 unsigned long val, mask, evt, flags; 435 unsigned long val, mask, evt, flags;
436 struct cpu_hw_events *events = armpmu->get_hw_events(); 436 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
437 437
438 if (ARMV6_CYCLE_COUNTER == idx) { 438 if (ARMV6_CYCLE_COUNTER == idx) {
439 mask = 0; 439 mask = 0;
@@ -486,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
486{ 486{
487 unsigned long pmcr = armv6_pmcr_read(); 487 unsigned long pmcr = armv6_pmcr_read();
488 struct perf_sample_data data; 488 struct perf_sample_data data;
489 struct cpu_hw_events *cpuc; 489 struct pmu_hw_events *cpuc;
490 struct pt_regs *regs; 490 struct pt_regs *regs;
491 int idx; 491 int idx;
492 492
@@ -505,7 +505,7 @@ armv6pmu_handle_irq(int irq_num,
505 perf_sample_data_init(&data, 0); 505 perf_sample_data_init(&data, 0);
506 506
507 cpuc = &__get_cpu_var(cpu_hw_events); 507 cpuc = &__get_cpu_var(cpu_hw_events);
508 for (idx = 0; idx < armpmu->num_events; ++idx) { 508 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
509 struct perf_event *event = cpuc->events[idx]; 509 struct perf_event *event = cpuc->events[idx];
510 struct hw_perf_event *hwc; 510 struct hw_perf_event *hwc;
511 511
@@ -526,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
526 continue; 526 continue;
527 527
528 if (perf_event_overflow(event, &data, regs)) 528 if (perf_event_overflow(event, &data, regs))
529 armpmu->disable(hwc, idx); 529 cpu_pmu->disable(hwc, idx);
530 } 530 }
531 531
532 /* 532 /*
@@ -545,7 +545,7 @@ static void
545armv6pmu_start(void) 545armv6pmu_start(void)
546{ 546{
547 unsigned long flags, val; 547 unsigned long flags, val;
548 struct cpu_hw_events *events = armpmu->get_hw_events(); 548 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
549 549
550 raw_spin_lock_irqsave(&events->pmu_lock, flags); 550 raw_spin_lock_irqsave(&events->pmu_lock, flags);
551 val = armv6_pmcr_read(); 551 val = armv6_pmcr_read();
@@ -558,7 +558,7 @@ static void
558armv6pmu_stop(void) 558armv6pmu_stop(void)
559{ 559{
560 unsigned long flags, val; 560 unsigned long flags, val;
561 struct cpu_hw_events *events = armpmu->get_hw_events(); 561 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
562 562
563 raw_spin_lock_irqsave(&events->pmu_lock, flags); 563 raw_spin_lock_irqsave(&events->pmu_lock, flags);
564 val = armv6_pmcr_read(); 564 val = armv6_pmcr_read();
@@ -568,7 +568,7 @@ armv6pmu_stop(void)
568} 568}
569 569
570static int 570static int
571armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, 571armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
572 struct hw_perf_event *event) 572 struct hw_perf_event *event)
573{ 573{
574 /* Always place a cycle counter into the cycle counter. */ 574 /* Always place a cycle counter into the cycle counter. */
@@ -598,7 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
598 int idx) 598 int idx)
599{ 599{
600 unsigned long val, mask, evt, flags; 600 unsigned long val, mask, evt, flags;
601 struct cpu_hw_events *events = armpmu->get_hw_events(); 601 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
602 602
603 if (ARMV6_CYCLE_COUNTER == idx) { 603 if (ARMV6_CYCLE_COUNTER == idx) {
604 mask = ARMV6_PMCR_CCOUNT_IEN; 604 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -632,7 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
632 int idx) 632 int idx)
633{ 633{
634 unsigned long val, mask, flags, evt = 0; 634 unsigned long val, mask, flags, evt = 0;
635 struct cpu_hw_events *events = armpmu->get_hw_events(); 635 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
636 636
637 if (ARMV6_CYCLE_COUNTER == idx) { 637 if (ARMV6_CYCLE_COUNTER == idx) {
638 mask = ARMV6_PMCR_CCOUNT_IEN; 638 mask = ARMV6_PMCR_CCOUNT_IEN;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index be7b58a2cc6f..98b75738345e 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -683,7 +683,7 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
683 */ 683 */
684#define ARMV7_IDX_CYCLE_COUNTER 0 684#define ARMV7_IDX_CYCLE_COUNTER 0
685#define ARMV7_IDX_COUNTER0 1 685#define ARMV7_IDX_COUNTER0 1
686#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + armpmu->num_events - 1) 686#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
687 687
688#define ARMV7_MAX_COUNTERS 32 688#define ARMV7_MAX_COUNTERS 32
689#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) 689#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
@@ -936,7 +936,7 @@ static void armv7_pmnc_dump_regs(void)
936static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) 936static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
937{ 937{
938 unsigned long flags; 938 unsigned long flags;
939 struct cpu_hw_events *events = armpmu->get_hw_events(); 939 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
940 940
941 /* 941 /*
942 * Enable counter and interrupt, and set the counter to count 942 * Enable counter and interrupt, and set the counter to count
@@ -973,7 +973,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
973static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) 973static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
974{ 974{
975 unsigned long flags; 975 unsigned long flags;
976 struct cpu_hw_events *events = armpmu->get_hw_events(); 976 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
977 977
978 /* 978 /*
979 * Disable counter and interrupt 979 * Disable counter and interrupt
@@ -997,7 +997,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
997{ 997{
998 u32 pmnc; 998 u32 pmnc;
999 struct perf_sample_data data; 999 struct perf_sample_data data;
1000 struct cpu_hw_events *cpuc; 1000 struct pmu_hw_events *cpuc;
1001 struct pt_regs *regs; 1001 struct pt_regs *regs;
1002 int idx; 1002 int idx;
1003 1003
@@ -1020,7 +1020,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1020 perf_sample_data_init(&data, 0); 1020 perf_sample_data_init(&data, 0);
1021 1021
1022 cpuc = &__get_cpu_var(cpu_hw_events); 1022 cpuc = &__get_cpu_var(cpu_hw_events);
1023 for (idx = 0; idx < armpmu->num_events; ++idx) { 1023 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1024 struct perf_event *event = cpuc->events[idx]; 1024 struct perf_event *event = cpuc->events[idx];
1025 struct hw_perf_event *hwc; 1025 struct hw_perf_event *hwc;
1026 1026
@@ -1038,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1038 continue; 1038 continue;
1039 1039
1040 if (perf_event_overflow(event, &data, regs)) 1040 if (perf_event_overflow(event, &data, regs))
1041 armpmu->disable(hwc, idx); 1041 cpu_pmu->disable(hwc, idx);
1042 } 1042 }
1043 1043
1044 /* 1044 /*
@@ -1056,7 +1056,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1056static void armv7pmu_start(void) 1056static void armv7pmu_start(void)
1057{ 1057{
1058 unsigned long flags; 1058 unsigned long flags;
1059 struct cpu_hw_events *events = armpmu->get_hw_events(); 1059 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1060 1060
1061 raw_spin_lock_irqsave(&events->pmu_lock, flags); 1061 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1062 /* Enable all counters */ 1062 /* Enable all counters */
@@ -1067,7 +1067,7 @@ static void armv7pmu_start(void)
1067static void armv7pmu_stop(void) 1067static void armv7pmu_stop(void)
1068{ 1068{
1069 unsigned long flags; 1069 unsigned long flags;
1070 struct cpu_hw_events *events = armpmu->get_hw_events(); 1070 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1071 1071
1072 raw_spin_lock_irqsave(&events->pmu_lock, flags); 1072 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1073 /* Disable all counters */ 1073 /* Disable all counters */
@@ -1075,7 +1075,7 @@ static void armv7pmu_stop(void)
1075 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1075 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1076} 1076}
1077 1077
1078static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, 1078static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1079 struct hw_perf_event *event) 1079 struct hw_perf_event *event)
1080{ 1080{
1081 int idx; 1081 int idx;
@@ -1093,7 +1093,7 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
1093 * For anything other than a cycle counter, try and use 1093 * For anything other than a cycle counter, try and use
1094 * the events counters 1094 * the events counters
1095 */ 1095 */
1096 for (idx = ARMV7_IDX_COUNTER0; idx < armpmu->num_events; ++idx) { 1096 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1097 if (!test_and_set_bit(idx, cpuc->used_mask)) 1097 if (!test_and_set_bit(idx, cpuc->used_mask))
1098 return idx; 1098 return idx;
1099 } 1099 }
@@ -1130,7 +1130,7 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1130 1130
1131static void armv7pmu_reset(void *info) 1131static void armv7pmu_reset(void *info)
1132{ 1132{
1133 u32 idx, nb_cnt = armpmu->num_events; 1133 u32 idx, nb_cnt = cpu_pmu->num_events;
1134 1134
1135 /* The counter and interrupt enable registers are unknown at reset. */ 1135 /* The counter and interrupt enable registers are unknown at reset. */
1136 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) 1136 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index d4c7610d25b9..e0cca10a8411 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
222{ 222{
223 unsigned long pmnc; 223 unsigned long pmnc;
224 struct perf_sample_data data; 224 struct perf_sample_data data;
225 struct cpu_hw_events *cpuc; 225 struct pmu_hw_events *cpuc;
226 struct pt_regs *regs; 226 struct pt_regs *regs;
227 int idx; 227 int idx;
228 228
@@ -249,7 +249,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
249 perf_sample_data_init(&data, 0); 249 perf_sample_data_init(&data, 0);
250 250
251 cpuc = &__get_cpu_var(cpu_hw_events); 251 cpuc = &__get_cpu_var(cpu_hw_events);
252 for (idx = 0; idx < armpmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
253 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
254 struct hw_perf_event *hwc; 254 struct hw_perf_event *hwc;
255 255
@@ -263,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
263 continue; 263 continue;
264 264
265 if (perf_event_overflow(event, &data, regs)) 265 if (perf_event_overflow(event, &data, regs))
266 armpmu->disable(hwc, idx); 266 cpu_pmu->disable(hwc, idx);
267 } 267 }
268 268
269 irq_work_run(); 269 irq_work_run();
@@ -281,7 +281,7 @@ static void
281xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) 281xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
282{ 282{
283 unsigned long val, mask, evt, flags; 283 unsigned long val, mask, evt, flags;
284 struct cpu_hw_events *events = armpmu->get_hw_events(); 284 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
285 285
286 switch (idx) { 286 switch (idx) {
287 case XSCALE_CYCLE_COUNTER: 287 case XSCALE_CYCLE_COUNTER:
@@ -315,7 +315,7 @@ static void
315xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) 315xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
316{ 316{
317 unsigned long val, mask, evt, flags; 317 unsigned long val, mask, evt, flags;
318 struct cpu_hw_events *events = armpmu->get_hw_events(); 318 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
319 319
320 switch (idx) { 320 switch (idx) {
321 case XSCALE_CYCLE_COUNTER: 321 case XSCALE_CYCLE_COUNTER:
@@ -344,7 +344,7 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
344} 344}
345 345
346static int 346static int
347xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, 347xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
348 struct hw_perf_event *event) 348 struct hw_perf_event *event)
349{ 349{
350 if (XSCALE_PERFCTR_CCNT == event->config_base) { 350 if (XSCALE_PERFCTR_CCNT == event->config_base) {
@@ -367,7 +367,7 @@ static void
367xscale1pmu_start(void) 367xscale1pmu_start(void)
368{ 368{
369 unsigned long flags, val; 369 unsigned long flags, val;
370 struct cpu_hw_events *events = armpmu->get_hw_events(); 370 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
371 371
372 raw_spin_lock_irqsave(&events->pmu_lock, flags); 372 raw_spin_lock_irqsave(&events->pmu_lock, flags);
373 val = xscale1pmu_read_pmnc(); 373 val = xscale1pmu_read_pmnc();
@@ -380,7 +380,7 @@ static void
380xscale1pmu_stop(void) 380xscale1pmu_stop(void)
381{ 381{
382 unsigned long flags, val; 382 unsigned long flags, val;
383 struct cpu_hw_events *events = armpmu->get_hw_events(); 383 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
384 384
385 raw_spin_lock_irqsave(&events->pmu_lock, flags); 385 raw_spin_lock_irqsave(&events->pmu_lock, flags);
386 val = xscale1pmu_read_pmnc(); 386 val = xscale1pmu_read_pmnc();
@@ -565,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
565{ 565{
566 unsigned long pmnc, of_flags; 566 unsigned long pmnc, of_flags;
567 struct perf_sample_data data; 567 struct perf_sample_data data;
568 struct cpu_hw_events *cpuc; 568 struct pmu_hw_events *cpuc;
569 struct pt_regs *regs; 569 struct pt_regs *regs;
570 int idx; 570 int idx;
571 571
@@ -586,7 +586,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
586 perf_sample_data_init(&data, 0); 586 perf_sample_data_init(&data, 0);
587 587
588 cpuc = &__get_cpu_var(cpu_hw_events); 588 cpuc = &__get_cpu_var(cpu_hw_events);
589 for (idx = 0; idx < armpmu->num_events; ++idx) { 589 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
590 struct perf_event *event = cpuc->events[idx]; 590 struct perf_event *event = cpuc->events[idx];
591 struct hw_perf_event *hwc; 591 struct hw_perf_event *hwc;
592 592
@@ -600,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
600 continue; 600 continue;
601 601
602 if (perf_event_overflow(event, &data, regs)) 602 if (perf_event_overflow(event, &data, regs))
603 armpmu->disable(hwc, idx); 603 cpu_pmu->disable(hwc, idx);
604 } 604 }
605 605
606 irq_work_run(); 606 irq_work_run();
@@ -618,7 +618,7 @@ static void
618xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) 618xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
619{ 619{
620 unsigned long flags, ien, evtsel; 620 unsigned long flags, ien, evtsel;
621 struct cpu_hw_events *events = armpmu->get_hw_events(); 621 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
622 622
623 ien = xscale2pmu_read_int_enable(); 623 ien = xscale2pmu_read_int_enable();
624 evtsel = xscale2pmu_read_event_select(); 624 evtsel = xscale2pmu_read_event_select();
@@ -662,7 +662,7 @@ static void
662xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) 662xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
663{ 663{
664 unsigned long flags, ien, evtsel; 664 unsigned long flags, ien, evtsel;
665 struct cpu_hw_events *events = armpmu->get_hw_events(); 665 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
666 666
667 ien = xscale2pmu_read_int_enable(); 667 ien = xscale2pmu_read_int_enable();
668 evtsel = xscale2pmu_read_event_select(); 668 evtsel = xscale2pmu_read_event_select();
@@ -703,7 +703,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
703} 703}
704 704
705static int 705static int
706xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, 706xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
707 struct hw_perf_event *event) 707 struct hw_perf_event *event)
708{ 708{
709 int idx = xscale1pmu_get_event_idx(cpuc, event); 709 int idx = xscale1pmu_get_event_idx(cpuc, event);
@@ -722,7 +722,7 @@ static void
722xscale2pmu_start(void) 722xscale2pmu_start(void)
723{ 723{
724 unsigned long flags, val; 724 unsigned long flags, val;
725 struct cpu_hw_events *events = armpmu->get_hw_events(); 725 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
726 726
727 raw_spin_lock_irqsave(&events->pmu_lock, flags); 727 raw_spin_lock_irqsave(&events->pmu_lock, flags);
728 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; 728 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
@@ -735,7 +735,7 @@ static void
735xscale2pmu_stop(void) 735xscale2pmu_stop(void)
736{ 736{
737 unsigned long flags, val; 737 unsigned long flags, val;
738 struct cpu_hw_events *events = armpmu->get_hw_events(); 738 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
739 739
740 raw_spin_lock_irqsave(&events->pmu_lock, flags); 740 raw_spin_lock_irqsave(&events->pmu_lock, flags);
741 val = xscale2pmu_read_pmnc(); 741 val = xscale2pmu_read_pmnc();