aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_v6.c
diff options
context:
space:
mode:
authorSudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>2012-07-30 07:00:02 -0400
committerWill Deacon <will.deacon@arm.com>2012-11-09 06:37:25 -0500
commited6f2a522398c26559f4da23a80aa6195e6284c7 (patch)
treef07a2bb16e7d5b121820256b51cf22c3be9bc352 /arch/arm/kernel/perf_event_v6.c
parent513c99ce4e64245be1f83f56039ec4891b451955 (diff)
ARM: perf: consistently use struct perf_event in arm_pmu functions
The arm_pmu functions have wildly varied parameters which can often be derived from struct perf_event. This patch changes the arm_pmu function prototypes so that struct perf_event pointers are passed in preference to fields that can be derived from the event. Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_v6.c')
-rw-r--r--arch/arm/kernel/perf_event_v6.c54
1 files changed, 29 insertions, 25 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 3908cb4e5566..f3e22ff8b6a2 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
401 return ret; 401 return ret;
402} 402}
403 403
404static inline u32 404static inline u32 armv6pmu_read_counter(struct perf_event *event)
405armv6pmu_read_counter(int counter)
406{ 405{
406 struct hw_perf_event *hwc = &event->hw;
407 int counter = hwc->idx;
407 unsigned long value = 0; 408 unsigned long value = 0;
408 409
409 if (ARMV6_CYCLE_COUNTER == counter) 410 if (ARMV6_CYCLE_COUNTER == counter)
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
418 return value; 419 return value;
419} 420}
420 421
421static inline void 422static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
422armv6pmu_write_counter(int counter,
423 u32 value)
424{ 423{
424 struct hw_perf_event *hwc = &event->hw;
425 int counter = hwc->idx;
426
425 if (ARMV6_CYCLE_COUNTER == counter) 427 if (ARMV6_CYCLE_COUNTER == counter)
426 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); 428 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
427 else if (ARMV6_COUNTER0 == counter) 429 else if (ARMV6_COUNTER0 == counter)
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
432 WARN_ONCE(1, "invalid counter number (%d)\n", counter); 434 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
433} 435}
434 436
435static void 437static void armv6pmu_enable_event(struct perf_event *event)
436armv6pmu_enable_event(struct hw_perf_event *hwc,
437 int idx)
438{ 438{
439 unsigned long val, mask, evt, flags; 439 unsigned long val, mask, evt, flags;
440 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
441 struct hw_perf_event *hwc = &event->hw;
440 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 442 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
443 int idx = hwc->idx;
441 444
442 if (ARMV6_CYCLE_COUNTER == idx) { 445 if (ARMV6_CYCLE_COUNTER == idx) {
443 mask = 0; 446 mask = 0;
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
473{ 476{
474 unsigned long pmcr = armv6_pmcr_read(); 477 unsigned long pmcr = armv6_pmcr_read();
475 struct perf_sample_data data; 478 struct perf_sample_data data;
476 struct pmu_hw_events *cpuc; 479 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
480 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
477 struct pt_regs *regs; 481 struct pt_regs *regs;
478 int idx; 482 int idx;
479 483
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
489 */ 493 */
490 armv6_pmcr_write(pmcr); 494 armv6_pmcr_write(pmcr);
491 495
492 cpuc = &__get_cpu_var(cpu_hw_events);
493 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 496 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
494 struct perf_event *event = cpuc->events[idx]; 497 struct perf_event *event = cpuc->events[idx];
495 struct hw_perf_event *hwc; 498 struct hw_perf_event *hwc;
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
506 continue; 509 continue;
507 510
508 hwc = &event->hw; 511 hwc = &event->hw;
509 armpmu_event_update(event, hwc, idx); 512 armpmu_event_update(event);
510 perf_sample_data_init(&data, 0, hwc->last_period); 513 perf_sample_data_init(&data, 0, hwc->last_period);
511 if (!armpmu_event_set_period(event, hwc, idx)) 514 if (!armpmu_event_set_period(event))
512 continue; 515 continue;
513 516
514 if (perf_event_overflow(event, &data, regs)) 517 if (perf_event_overflow(event, &data, regs))
515 cpu_pmu->disable(hwc, idx); 518 cpu_pmu->disable(event);
516 } 519 }
517 520
518 /* 521 /*
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
527 return IRQ_HANDLED; 530 return IRQ_HANDLED;
528} 531}
529 532
530static void 533static void armv6pmu_start(struct arm_pmu *cpu_pmu)
531armv6pmu_start(void)
532{ 534{
533 unsigned long flags, val; 535 unsigned long flags, val;
534 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 536 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -540,8 +542,7 @@ armv6pmu_start(void)
540 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 542 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
541} 543}
542 544
543static void 545static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
544armv6pmu_stop(void)
545{ 546{
546 unsigned long flags, val; 547 unsigned long flags, val;
547 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 548 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -555,10 +556,11 @@ armv6pmu_stop(void)
555 556
556static int 557static int
557armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, 558armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
558 struct hw_perf_event *event) 559 struct perf_event *event)
559{ 560{
561 struct hw_perf_event *hwc = &event->hw;
560 /* Always place a cycle counter into the cycle counter. */ 562 /* Always place a cycle counter into the cycle counter. */
561 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { 563 if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
562 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) 564 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
563 return -EAGAIN; 565 return -EAGAIN;
564 566
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
579 } 581 }
580} 582}
581 583
582static void 584static void armv6pmu_disable_event(struct perf_event *event)
583armv6pmu_disable_event(struct hw_perf_event *hwc,
584 int idx)
585{ 585{
586 unsigned long val, mask, evt, flags; 586 unsigned long val, mask, evt, flags;
587 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
588 struct hw_perf_event *hwc = &event->hw;
587 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 589 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
590 int idx = hwc->idx;
588 591
589 if (ARMV6_CYCLE_COUNTER == idx) { 592 if (ARMV6_CYCLE_COUNTER == idx) {
590 mask = ARMV6_PMCR_CCOUNT_IEN; 593 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
613 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 616 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
614} 617}
615 618
616static void 619static void armv6mpcore_pmu_disable_event(struct perf_event *event)
617armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
618 int idx)
619{ 620{
620 unsigned long val, mask, flags, evt = 0; 621 unsigned long val, mask, flags, evt = 0;
622 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
623 struct hw_perf_event *hwc = &event->hw;
621 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 624 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
625 int idx = hwc->idx;
622 626
623 if (ARMV6_CYCLE_COUNTER == idx) { 627 if (ARMV6_CYCLE_COUNTER == idx) {
624 mask = ARMV6_PMCR_CCOUNT_IEN; 628 mask = ARMV6_PMCR_CCOUNT_IEN;