aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorSudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>2012-07-30 07:00:02 -0400
committerWill Deacon <will.deacon@arm.com>2012-11-09 06:37:25 -0500
commited6f2a522398c26559f4da23a80aa6195e6284c7 (patch)
treef07a2bb16e7d5b121820256b51cf22c3be9bc352 /arch/arm/kernel
parent513c99ce4e64245be1f83f56039ec4891b451955 (diff)
ARM: perf: consistently use struct perf_event in arm_pmu functions
The arm_pmu functions have wildly varied parameters which can often be derived from struct perf_event. This patch changes the arm_pmu function prototypes so that struct perf_event pointers are passed in preference to fields that can be derived from the event. Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/perf_event.c44
-rw-r--r--arch/arm/kernel/perf_event_cpu.c8
-rw-r--r--arch/arm/kernel/perf_event_v6.c54
-rw-r--r--arch/arm/kernel/perf_event_v7.c46
-rw-r--r--arch/arm/kernel/perf_event_xscale.c85
5 files changed, 131 insertions, 106 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f8406af03279..1cfa3f35713e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event,
86 return -ENOENT; 86 return -ENOENT;
87} 87}
88 88
89int 89int armpmu_event_set_period(struct perf_event *event)
90armpmu_event_set_period(struct perf_event *event,
91 struct hw_perf_event *hwc,
92 int idx)
93{ 90{
94 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 91 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
92 struct hw_perf_event *hwc = &event->hw;
95 s64 left = local64_read(&hwc->period_left); 93 s64 left = local64_read(&hwc->period_left);
96 s64 period = hwc->sample_period; 94 s64 period = hwc->sample_period;
97 int ret = 0; 95 int ret = 0;
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,
119 117
120 local64_set(&hwc->prev_count, (u64)-left); 118 local64_set(&hwc->prev_count, (u64)-left);
121 119
122 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); 120 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
123 121
124 perf_event_update_userpage(event); 122 perf_event_update_userpage(event);
125 123
126 return ret; 124 return ret;
127} 125}
128 126
129u64 127u64 armpmu_event_update(struct perf_event *event)
130armpmu_event_update(struct perf_event *event,
131 struct hw_perf_event *hwc,
132 int idx)
133{ 128{
134 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 129 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
130 struct hw_perf_event *hwc = &event->hw;
135 u64 delta, prev_raw_count, new_raw_count; 131 u64 delta, prev_raw_count, new_raw_count;
136 132
137again: 133again:
138 prev_raw_count = local64_read(&hwc->prev_count); 134 prev_raw_count = local64_read(&hwc->prev_count);
139 new_raw_count = armpmu->read_counter(idx); 135 new_raw_count = armpmu->read_counter(event);
140 136
141 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 137 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
142 new_raw_count) != prev_raw_count) 138 new_raw_count) != prev_raw_count)
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event)
159 if (hwc->idx < 0) 155 if (hwc->idx < 0)
160 return; 156 return;
161 157
162 armpmu_event_update(event, hwc, hwc->idx); 158 armpmu_event_update(event);
163} 159}
164 160
165static void 161static void
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags)
173 * PERF_EF_UPDATE, see comments in armpmu_start(). 169 * PERF_EF_UPDATE, see comments in armpmu_start().
174 */ 170 */
175 if (!(hwc->state & PERF_HES_STOPPED)) { 171 if (!(hwc->state & PERF_HES_STOPPED)) {
176 armpmu->disable(hwc, hwc->idx); 172 armpmu->disable(event);
177 armpmu_event_update(event, hwc, hwc->idx); 173 armpmu_event_update(event);
178 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 174 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
179 } 175 }
180} 176}
181 177
182static void 178static void armpmu_start(struct perf_event *event, int flags)
183armpmu_start(struct perf_event *event, int flags)
184{ 179{
185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 180 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186 struct hw_perf_event *hwc = &event->hw; 181 struct hw_perf_event *hwc = &event->hw;
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags)
200 * get an interrupt too soon or *way* too late if the overflow has 195 * get an interrupt too soon or *way* too late if the overflow has
201 * happened since disabling. 196 * happened since disabling.
202 */ 197 */
203 armpmu_event_set_period(event, hwc, hwc->idx); 198 armpmu_event_set_period(event);
204 armpmu->enable(hwc, hwc->idx); 199 armpmu->enable(event);
205} 200}
206 201
207static void 202static void
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags)
233 perf_pmu_disable(event->pmu); 228 perf_pmu_disable(event->pmu);
234 229
235 /* If we don't have a space for the counter then finish early. */ 230 /* If we don't have a space for the counter then finish early. */
236 idx = armpmu->get_event_idx(hw_events, hwc); 231 idx = armpmu->get_event_idx(hw_events, event);
237 if (idx < 0) { 232 if (idx < 0) {
238 err = idx; 233 err = idx;
239 goto out; 234 goto out;
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags)
244 * sure it is disabled. 239 * sure it is disabled.
245 */ 240 */
246 event->hw.idx = idx; 241 event->hw.idx = idx;
247 armpmu->disable(hwc, idx); 242 armpmu->disable(event);
248 hw_events->events[idx] = event; 243 hw_events->events[idx] = event;
249 244
250 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 245 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events,
264 struct perf_event *event) 259 struct perf_event *event)
265{ 260{
266 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 261 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
267 struct hw_perf_event fake_event = event->hw;
268 struct pmu *leader_pmu = event->group_leader->pmu; 262 struct pmu *leader_pmu = event->group_leader->pmu;
269 263
270 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 264 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
271 return 1; 265 return 1;
272 266
273 return armpmu->get_event_idx(hw_events, &fake_event) >= 0; 267 return armpmu->get_event_idx(hw_events, event) >= 0;
274} 268}
275 269
276static int 270static int
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
316static void 310static void
317armpmu_release_hardware(struct arm_pmu *armpmu) 311armpmu_release_hardware(struct arm_pmu *armpmu)
318{ 312{
319 armpmu->free_irq(); 313 armpmu->free_irq(armpmu);
320 pm_runtime_put_sync(&armpmu->plat_device->dev); 314 pm_runtime_put_sync(&armpmu->plat_device->dev);
321} 315}
322 316
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
330 return -ENODEV; 324 return -ENODEV;
331 325
332 pm_runtime_get_sync(&pmu_device->dev); 326 pm_runtime_get_sync(&pmu_device->dev);
333 err = armpmu->request_irq(armpmu_dispatch_irq); 327 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
334 if (err) { 328 if (err) {
335 armpmu_release_hardware(armpmu); 329 armpmu_release_hardware(armpmu);
336 return err; 330 return err;
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu)
465 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 459 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
466 460
467 if (enabled) 461 if (enabled)
468 armpmu->start(); 462 armpmu->start(armpmu);
469} 463}
470 464
471static void armpmu_disable(struct pmu *pmu) 465static void armpmu_disable(struct pmu *pmu)
472{ 466{
473 struct arm_pmu *armpmu = to_arm_pmu(pmu); 467 struct arm_pmu *armpmu = to_arm_pmu(pmu);
474 armpmu->stop(); 468 armpmu->stop(armpmu);
475} 469}
476 470
477#ifdef CONFIG_PM_RUNTIME 471#ifdef CONFIG_PM_RUNTIME
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 3863fd405fa1..02244faa5396 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -71,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
71 return &__get_cpu_var(cpu_hw_events); 71 return &__get_cpu_var(cpu_hw_events);
72} 72}
73 73
74static void cpu_pmu_free_irq(void) 74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
75{ 75{
76 int i, irq, irqs; 76 int i, irq, irqs;
77 struct platform_device *pmu_device = cpu_pmu->plat_device; 77 struct platform_device *pmu_device = cpu_pmu->plat_device;
@@ -87,7 +87,7 @@ static void cpu_pmu_free_irq(void)
87 } 87 }
88} 88}
89 89
90static int cpu_pmu_request_irq(irq_handler_t handler) 90static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
91{ 91{
92 int i, err, irq, irqs; 92 int i, err, irq, irqs;
93 struct platform_device *pmu_device = cpu_pmu->plat_device; 93 struct platform_device *pmu_device = cpu_pmu->plat_device;
@@ -148,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
148 148
149 /* Ensure the PMU has sane values out of reset. */ 149 /* Ensure the PMU has sane values out of reset. */
150 if (cpu_pmu && cpu_pmu->reset) 150 if (cpu_pmu && cpu_pmu->reset)
151 on_each_cpu(cpu_pmu->reset, NULL, 1); 151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
152} 152}
153 153
154/* 154/*
@@ -164,7 +164,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
164 return NOTIFY_DONE; 164 return NOTIFY_DONE;
165 165
166 if (cpu_pmu && cpu_pmu->reset) 166 if (cpu_pmu && cpu_pmu->reset)
167 cpu_pmu->reset(NULL); 167 cpu_pmu->reset(cpu_pmu);
168 168
169 return NOTIFY_OK; 169 return NOTIFY_OK;
170} 170}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 3908cb4e5566..f3e22ff8b6a2 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
401 return ret; 401 return ret;
402} 402}
403 403
404static inline u32 404static inline u32 armv6pmu_read_counter(struct perf_event *event)
405armv6pmu_read_counter(int counter)
406{ 405{
406 struct hw_perf_event *hwc = &event->hw;
407 int counter = hwc->idx;
407 unsigned long value = 0; 408 unsigned long value = 0;
408 409
409 if (ARMV6_CYCLE_COUNTER == counter) 410 if (ARMV6_CYCLE_COUNTER == counter)
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
418 return value; 419 return value;
419} 420}
420 421
421static inline void 422static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
422armv6pmu_write_counter(int counter,
423 u32 value)
424{ 423{
424 struct hw_perf_event *hwc = &event->hw;
425 int counter = hwc->idx;
426
425 if (ARMV6_CYCLE_COUNTER == counter) 427 if (ARMV6_CYCLE_COUNTER == counter)
426 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); 428 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
427 else if (ARMV6_COUNTER0 == counter) 429 else if (ARMV6_COUNTER0 == counter)
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
432 WARN_ONCE(1, "invalid counter number (%d)\n", counter); 434 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
433} 435}
434 436
435static void 437static void armv6pmu_enable_event(struct perf_event *event)
436armv6pmu_enable_event(struct hw_perf_event *hwc,
437 int idx)
438{ 438{
439 unsigned long val, mask, evt, flags; 439 unsigned long val, mask, evt, flags;
440 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
441 struct hw_perf_event *hwc = &event->hw;
440 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 442 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
443 int idx = hwc->idx;
441 444
442 if (ARMV6_CYCLE_COUNTER == idx) { 445 if (ARMV6_CYCLE_COUNTER == idx) {
443 mask = 0; 446 mask = 0;
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
473{ 476{
474 unsigned long pmcr = armv6_pmcr_read(); 477 unsigned long pmcr = armv6_pmcr_read();
475 struct perf_sample_data data; 478 struct perf_sample_data data;
476 struct pmu_hw_events *cpuc; 479 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
480 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
477 struct pt_regs *regs; 481 struct pt_regs *regs;
478 int idx; 482 int idx;
479 483
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
489 */ 493 */
490 armv6_pmcr_write(pmcr); 494 armv6_pmcr_write(pmcr);
491 495
492 cpuc = &__get_cpu_var(cpu_hw_events);
493 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 496 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
494 struct perf_event *event = cpuc->events[idx]; 497 struct perf_event *event = cpuc->events[idx];
495 struct hw_perf_event *hwc; 498 struct hw_perf_event *hwc;
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
506 continue; 509 continue;
507 510
508 hwc = &event->hw; 511 hwc = &event->hw;
509 armpmu_event_update(event, hwc, idx); 512 armpmu_event_update(event);
510 perf_sample_data_init(&data, 0, hwc->last_period); 513 perf_sample_data_init(&data, 0, hwc->last_period);
511 if (!armpmu_event_set_period(event, hwc, idx)) 514 if (!armpmu_event_set_period(event))
512 continue; 515 continue;
513 516
514 if (perf_event_overflow(event, &data, regs)) 517 if (perf_event_overflow(event, &data, regs))
515 cpu_pmu->disable(hwc, idx); 518 cpu_pmu->disable(event);
516 } 519 }
517 520
518 /* 521 /*
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
527 return IRQ_HANDLED; 530 return IRQ_HANDLED;
528} 531}
529 532
530static void 533static void armv6pmu_start(struct arm_pmu *cpu_pmu)
531armv6pmu_start(void)
532{ 534{
533 unsigned long flags, val; 535 unsigned long flags, val;
534 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 536 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -540,8 +542,7 @@ armv6pmu_start(void)
540 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 542 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
541} 543}
542 544
543static void 545static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
544armv6pmu_stop(void)
545{ 546{
546 unsigned long flags, val; 547 unsigned long flags, val;
547 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 548 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -555,10 +556,11 @@ armv6pmu_stop(void)
555 556
556static int 557static int
557armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, 558armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
558 struct hw_perf_event *event) 559 struct perf_event *event)
559{ 560{
561 struct hw_perf_event *hwc = &event->hw;
560 /* Always place a cycle counter into the cycle counter. */ 562 /* Always place a cycle counter into the cycle counter. */
561 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { 563 if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
562 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) 564 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
563 return -EAGAIN; 565 return -EAGAIN;
564 566
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
579 } 581 }
580} 582}
581 583
582static void 584static void armv6pmu_disable_event(struct perf_event *event)
583armv6pmu_disable_event(struct hw_perf_event *hwc,
584 int idx)
585{ 585{
586 unsigned long val, mask, evt, flags; 586 unsigned long val, mask, evt, flags;
587 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
588 struct hw_perf_event *hwc = &event->hw;
587 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 589 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
590 int idx = hwc->idx;
588 591
589 if (ARMV6_CYCLE_COUNTER == idx) { 592 if (ARMV6_CYCLE_COUNTER == idx) {
590 mask = ARMV6_PMCR_CCOUNT_IEN; 593 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
613 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 616 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
614} 617}
615 618
616static void 619static void armv6mpcore_pmu_disable_event(struct perf_event *event)
617armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
618 int idx)
619{ 620{
620 unsigned long val, mask, flags, evt = 0; 621 unsigned long val, mask, flags, evt = 0;
622 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
623 struct hw_perf_event *hwc = &event->hw;
621 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 624 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
625 int idx = hwc->idx;
622 626
623 if (ARMV6_CYCLE_COUNTER == idx) { 627 if (ARMV6_CYCLE_COUNTER == idx) {
624 mask = ARMV6_PMCR_CCOUNT_IEN; 628 mask = ARMV6_PMCR_CCOUNT_IEN;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index b189403f30e4..1183c81087b2 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -840,8 +840,10 @@ static inline int armv7_pmnc_select_counter(int idx)
840 return idx; 840 return idx;
841} 841}
842 842
843static inline u32 armv7pmu_read_counter(int idx) 843static inline u32 armv7pmu_read_counter(struct perf_event *event)
844{ 844{
845 struct hw_perf_event *hwc = &event->hw;
846 int idx = hwc->idx;
845 u32 value = 0; 847 u32 value = 0;
846 848
847 if (!armv7_pmnc_counter_valid(idx)) 849 if (!armv7_pmnc_counter_valid(idx))
@@ -855,8 +857,11 @@ static inline u32 armv7pmu_read_counter(int idx)
855 return value; 857 return value;
856} 858}
857 859
858static inline void armv7pmu_write_counter(int idx, u32 value) 860static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
859{ 861{
862 struct hw_perf_event *hwc = &event->hw;
863 int idx = hwc->idx;
864
860 if (!armv7_pmnc_counter_valid(idx)) 865 if (!armv7_pmnc_counter_valid(idx))
861 pr_err("CPU%u writing wrong counter %d\n", 866 pr_err("CPU%u writing wrong counter %d\n",
862 smp_processor_id(), idx); 867 smp_processor_id(), idx);
@@ -991,10 +996,13 @@ static void armv7_pmnc_dump_regs(void)
991} 996}
992#endif 997#endif
993 998
994static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) 999static void armv7pmu_enable_event(struct perf_event *event)
995{ 1000{
996 unsigned long flags; 1001 unsigned long flags;
1002 struct hw_perf_event *hwc = &event->hw;
1003 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
997 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1004 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1005 int idx = hwc->idx;
998 1006
999 /* 1007 /*
1000 * Enable counter and interrupt, and set the counter to count 1008 * Enable counter and interrupt, and set the counter to count
@@ -1028,10 +1036,13 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1028 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1036 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1029} 1037}
1030 1038
1031static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) 1039static void armv7pmu_disable_event(struct perf_event *event)
1032{ 1040{
1033 unsigned long flags; 1041 unsigned long flags;
1042 struct hw_perf_event *hwc = &event->hw;
1043 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1034 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1044 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1045 int idx = hwc->idx;
1035 1046
1036 /* 1047 /*
1037 * Disable counter and interrupt 1048 * Disable counter and interrupt
@@ -1055,7 +1066,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1055{ 1066{
1056 u32 pmnc; 1067 u32 pmnc;
1057 struct perf_sample_data data; 1068 struct perf_sample_data data;
1058 struct pmu_hw_events *cpuc; 1069 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1070 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
1059 struct pt_regs *regs; 1071 struct pt_regs *regs;
1060 int idx; 1072 int idx;
1061 1073
@@ -1075,7 +1087,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1075 */ 1087 */
1076 regs = get_irq_regs(); 1088 regs = get_irq_regs();
1077 1089
1078 cpuc = &__get_cpu_var(cpu_hw_events);
1079 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 1090 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1080 struct perf_event *event = cpuc->events[idx]; 1091 struct perf_event *event = cpuc->events[idx];
1081 struct hw_perf_event *hwc; 1092 struct hw_perf_event *hwc;
@@ -1092,13 +1103,13 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1092 continue; 1103 continue;
1093 1104
1094 hwc = &event->hw; 1105 hwc = &event->hw;
1095 armpmu_event_update(event, hwc, idx); 1106 armpmu_event_update(event);
1096 perf_sample_data_init(&data, 0, hwc->last_period); 1107 perf_sample_data_init(&data, 0, hwc->last_period);
1097 if (!armpmu_event_set_period(event, hwc, idx)) 1108 if (!armpmu_event_set_period(event))
1098 continue; 1109 continue;
1099 1110
1100 if (perf_event_overflow(event, &data, regs)) 1111 if (perf_event_overflow(event, &data, regs))
1101 cpu_pmu->disable(hwc, idx); 1112 cpu_pmu->disable(event);
1102 } 1113 }
1103 1114
1104 /* 1115 /*
@@ -1113,7 +1124,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1113 return IRQ_HANDLED; 1124 return IRQ_HANDLED;
1114} 1125}
1115 1126
1116static void armv7pmu_start(void) 1127static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1117{ 1128{
1118 unsigned long flags; 1129 unsigned long flags;
1119 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1130 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -1124,7 +1135,7 @@ static void armv7pmu_start(void)
1124 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1135 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1125} 1136}
1126 1137
1127static void armv7pmu_stop(void) 1138static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1128{ 1139{
1129 unsigned long flags; 1140 unsigned long flags;
1130 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1141 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -1136,10 +1147,12 @@ static void armv7pmu_stop(void)
1136} 1147}
1137 1148
1138static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, 1149static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1139 struct hw_perf_event *event) 1150 struct perf_event *event)
1140{ 1151{
1141 int idx; 1152 int idx;
1142 unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; 1153 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1154 struct hw_perf_event *hwc = &event->hw;
1155 unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1143 1156
1144 /* Always place a cycle counter into the cycle counter. */ 1157 /* Always place a cycle counter into the cycle counter. */
1145 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { 1158 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
@@ -1190,11 +1203,14 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1190 1203
1191static void armv7pmu_reset(void *info) 1204static void armv7pmu_reset(void *info)
1192{ 1205{
1206 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1193 u32 idx, nb_cnt = cpu_pmu->num_events; 1207 u32 idx, nb_cnt = cpu_pmu->num_events;
1194 1208
1195 /* The counter and interrupt enable registers are unknown at reset. */ 1209 /* The counter and interrupt enable registers are unknown at reset. */
1196 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) 1210 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1197 armv7pmu_disable_event(NULL, idx); 1211 armv7_pmnc_disable_counter(idx);
1212 armv7_pmnc_disable_intens(idx);
1213 }
1198 1214
1199 /* Initialize & Reset PMNC: C and P bits */ 1215 /* Initialize & Reset PMNC: C and P bits */
1200 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 1216 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 131ede6c2fdf..0c8265e53d5f 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
224{ 224{
225 unsigned long pmnc; 225 unsigned long pmnc;
226 struct perf_sample_data data; 226 struct perf_sample_data data;
227 struct pmu_hw_events *cpuc; 227 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
228 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
228 struct pt_regs *regs; 229 struct pt_regs *regs;
229 int idx; 230 int idx;
230 231
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
248 249
249 regs = get_irq_regs(); 250 regs = get_irq_regs();
250 251
251 cpuc = &__get_cpu_var(cpu_hw_events);
252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
253 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
254 struct hw_perf_event *hwc; 254 struct hw_perf_event *hwc;
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
260 continue; 260 continue;
261 261
262 hwc = &event->hw; 262 hwc = &event->hw;
263 armpmu_event_update(event, hwc, idx); 263 armpmu_event_update(event);
264 perf_sample_data_init(&data, 0, hwc->last_period); 264 perf_sample_data_init(&data, 0, hwc->last_period);
265 if (!armpmu_event_set_period(event, hwc, idx)) 265 if (!armpmu_event_set_period(event))
266 continue; 266 continue;
267 267
268 if (perf_event_overflow(event, &data, regs)) 268 if (perf_event_overflow(event, &data, regs))
269 cpu_pmu->disable(hwc, idx); 269 cpu_pmu->disable(event);
270 } 270 }
271 271
272 irq_work_run(); 272 irq_work_run();
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
280 return IRQ_HANDLED; 280 return IRQ_HANDLED;
281} 281}
282 282
283static void 283static void xscale1pmu_enable_event(struct perf_event *event)
284xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
285{ 284{
286 unsigned long val, mask, evt, flags; 285 unsigned long val, mask, evt, flags;
286 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
287 struct hw_perf_event *hwc = &event->hw;
287 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 288 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
289 int idx = hwc->idx;
288 290
289 switch (idx) { 291 switch (idx) {
290 case XSCALE_CYCLE_COUNTER: 292 case XSCALE_CYCLE_COUNTER:
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
314 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 316 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
315} 317}
316 318
317static void 319static void xscale1pmu_disable_event(struct perf_event *event)
318xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
319{ 320{
320 unsigned long val, mask, evt, flags; 321 unsigned long val, mask, evt, flags;
322 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
323 struct hw_perf_event *hwc = &event->hw;
321 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 324 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
325 int idx = hwc->idx;
322 326
323 switch (idx) { 327 switch (idx) {
324 case XSCALE_CYCLE_COUNTER: 328 case XSCALE_CYCLE_COUNTER:
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
348 352
349static int 353static int
350xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, 354xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
351 struct hw_perf_event *event) 355 struct perf_event *event)
352{ 356{
353 if (XSCALE_PERFCTR_CCNT == event->config_base) { 357 struct hw_perf_event *hwc = &event->hw;
358 if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
354 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) 359 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
355 return -EAGAIN; 360 return -EAGAIN;
356 361
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
366 } 371 }
367} 372}
368 373
369static void 374static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
370xscale1pmu_start(void)
371{ 375{
372 unsigned long flags, val; 376 unsigned long flags, val;
373 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 377 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -379,8 +383,7 @@ xscale1pmu_start(void)
379 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 383 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
380} 384}
381 385
382static void 386static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
383xscale1pmu_stop(void)
384{ 387{
385 unsigned long flags, val; 388 unsigned long flags, val;
386 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 389 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -392,9 +395,10 @@ xscale1pmu_stop(void)
392 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 395 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
393} 396}
394 397
395static inline u32 398static inline u32 xscale1pmu_read_counter(struct perf_event *event)
396xscale1pmu_read_counter(int counter)
397{ 399{
400 struct hw_perf_event *hwc = &event->hw;
401 int counter = hwc->idx;
398 u32 val = 0; 402 u32 val = 0;
399 403
400 switch (counter) { 404 switch (counter) {
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter)
412 return val; 416 return val;
413} 417}
414 418
415static inline void 419static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
416xscale1pmu_write_counter(int counter, u32 val)
417{ 420{
421 struct hw_perf_event *hwc = &event->hw;
422 int counter = hwc->idx;
423
418 switch (counter) { 424 switch (counter) {
419 case XSCALE_CYCLE_COUNTER: 425 case XSCALE_CYCLE_COUNTER:
420 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); 426 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
@@ -565,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
565{ 571{
566 unsigned long pmnc, of_flags; 572 unsigned long pmnc, of_flags;
567 struct perf_sample_data data; 573 struct perf_sample_data data;
568 struct pmu_hw_events *cpuc; 574 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
575 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
569 struct pt_regs *regs; 576 struct pt_regs *regs;
570 int idx; 577 int idx;
571 578
@@ -583,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
583 590
584 regs = get_irq_regs(); 591 regs = get_irq_regs();
585 592
586 cpuc = &__get_cpu_var(cpu_hw_events);
587 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 593 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
588 struct perf_event *event = cpuc->events[idx]; 594 struct perf_event *event = cpuc->events[idx];
589 struct hw_perf_event *hwc; 595 struct hw_perf_event *hwc;
@@ -595,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
595 continue; 601 continue;
596 602
597 hwc = &event->hw; 603 hwc = &event->hw;
598 armpmu_event_update(event, hwc, idx); 604 armpmu_event_update(event);
599 perf_sample_data_init(&data, 0, hwc->last_period); 605 perf_sample_data_init(&data, 0, hwc->last_period);
600 if (!armpmu_event_set_period(event, hwc, idx)) 606 if (!armpmu_event_set_period(event))
601 continue; 607 continue;
602 608
603 if (perf_event_overflow(event, &data, regs)) 609 if (perf_event_overflow(event, &data, regs))
604 cpu_pmu->disable(hwc, idx); 610 cpu_pmu->disable(event);
605 } 611 }
606 612
607 irq_work_run(); 613 irq_work_run();
@@ -615,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
615 return IRQ_HANDLED; 621 return IRQ_HANDLED;
616} 622}
617 623
618static void 624static void xscale2pmu_enable_event(struct perf_event *event)
619xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
620{ 625{
621 unsigned long flags, ien, evtsel; 626 unsigned long flags, ien, evtsel;
627 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
628 struct hw_perf_event *hwc = &event->hw;
622 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 629 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
630 int idx = hwc->idx;
623 631
624 ien = xscale2pmu_read_int_enable(); 632 ien = xscale2pmu_read_int_enable();
625 evtsel = xscale2pmu_read_event_select(); 633 evtsel = xscale2pmu_read_event_select();
@@ -659,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
659 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 667 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
660} 668}
661 669
662static void 670static void xscale2pmu_disable_event(struct perf_event *event)
663xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
664{ 671{
665 unsigned long flags, ien, evtsel, of_flags; 672 unsigned long flags, ien, evtsel, of_flags;
673 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
674 struct hw_perf_event *hwc = &event->hw;
666 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 675 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
676 int idx = hwc->idx;
667 677
668 ien = xscale2pmu_read_int_enable(); 678 ien = xscale2pmu_read_int_enable();
669 evtsel = xscale2pmu_read_event_select(); 679 evtsel = xscale2pmu_read_event_select();
@@ -711,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
711 721
712static int 722static int
713xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, 723xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
714 struct hw_perf_event *event) 724 struct perf_event *event)
715{ 725{
716 int idx = xscale1pmu_get_event_idx(cpuc, event); 726 int idx = xscale1pmu_get_event_idx(cpuc, event);
717 if (idx >= 0) 727 if (idx >= 0)
@@ -725,8 +735,7 @@ out:
725 return idx; 735 return idx;
726} 736}
727 737
728static void 738static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
729xscale2pmu_start(void)
730{ 739{
731 unsigned long flags, val; 740 unsigned long flags, val;
732 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 741 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -738,8 +747,7 @@ xscale2pmu_start(void)
738 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 747 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
739} 748}
740 749
741static void 750static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
742xscale2pmu_stop(void)
743{ 751{
744 unsigned long flags, val; 752 unsigned long flags, val;
745 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 753 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -751,9 +759,10 @@ xscale2pmu_stop(void)
751 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 759 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
752} 760}
753 761
754static inline u32 762static inline u32 xscale2pmu_read_counter(struct perf_event *event)
755xscale2pmu_read_counter(int counter)
756{ 763{
764 struct hw_perf_event *hwc = &event->hw;
765 int counter = hwc->idx;
757 u32 val = 0; 766 u32 val = 0;
758 767
759 switch (counter) { 768 switch (counter) {
@@ -777,9 +786,11 @@ xscale2pmu_read_counter(int counter)
777 return val; 786 return val;
778} 787}
779 788
780static inline void 789static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
781xscale2pmu_write_counter(int counter, u32 val)
782{ 790{
791 struct hw_perf_event *hwc = &event->hw;
792 int counter = hwc->idx;
793
783 switch (counter) { 794 switch (counter) {
784 case XSCALE_CYCLE_COUNTER: 795 case XSCALE_CYCLE_COUNTER:
785 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); 796 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));