diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_v6.c')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 126 |
1 files changed, 63 insertions, 63 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 6ccc07971745..f3e22ff8b6a2 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | |||
401 | return ret; | 401 | return ret; |
402 | } | 402 | } |
403 | 403 | ||
404 | static inline u32 | 404 | static inline u32 armv6pmu_read_counter(struct perf_event *event) |
405 | armv6pmu_read_counter(int counter) | ||
406 | { | 405 | { |
406 | struct hw_perf_event *hwc = &event->hw; | ||
407 | int counter = hwc->idx; | ||
407 | unsigned long value = 0; | 408 | unsigned long value = 0; |
408 | 409 | ||
409 | if (ARMV6_CYCLE_COUNTER == counter) | 410 | if (ARMV6_CYCLE_COUNTER == counter) |
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter) | |||
418 | return value; | 419 | return value; |
419 | } | 420 | } |
420 | 421 | ||
421 | static inline void | 422 | static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) |
422 | armv6pmu_write_counter(int counter, | ||
423 | u32 value) | ||
424 | { | 423 | { |
424 | struct hw_perf_event *hwc = &event->hw; | ||
425 | int counter = hwc->idx; | ||
426 | |||
425 | if (ARMV6_CYCLE_COUNTER == counter) | 427 | if (ARMV6_CYCLE_COUNTER == counter) |
426 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | 428 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); |
427 | else if (ARMV6_COUNTER0 == counter) | 429 | else if (ARMV6_COUNTER0 == counter) |
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter, | |||
432 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | 434 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); |
433 | } | 435 | } |
434 | 436 | ||
435 | static void | 437 | static void armv6pmu_enable_event(struct perf_event *event) |
436 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
437 | int idx) | ||
438 | { | 438 | { |
439 | unsigned long val, mask, evt, flags; | 439 | unsigned long val, mask, evt, flags; |
440 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
441 | struct hw_perf_event *hwc = &event->hw; | ||
440 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 442 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
443 | int idx = hwc->idx; | ||
441 | 444 | ||
442 | if (ARMV6_CYCLE_COUNTER == idx) { | 445 | if (ARMV6_CYCLE_COUNTER == idx) { |
443 | mask = 0; | 446 | mask = 0; |
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num, | |||
473 | { | 476 | { |
474 | unsigned long pmcr = armv6_pmcr_read(); | 477 | unsigned long pmcr = armv6_pmcr_read(); |
475 | struct perf_sample_data data; | 478 | struct perf_sample_data data; |
476 | struct pmu_hw_events *cpuc; | 479 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
480 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | ||
477 | struct pt_regs *regs; | 481 | struct pt_regs *regs; |
478 | int idx; | 482 | int idx; |
479 | 483 | ||
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num, | |||
489 | */ | 493 | */ |
490 | armv6_pmcr_write(pmcr); | 494 | armv6_pmcr_write(pmcr); |
491 | 495 | ||
492 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
493 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 496 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
494 | struct perf_event *event = cpuc->events[idx]; | 497 | struct perf_event *event = cpuc->events[idx]; |
495 | struct hw_perf_event *hwc; | 498 | struct hw_perf_event *hwc; |
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num, | |||
506 | continue; | 509 | continue; |
507 | 510 | ||
508 | hwc = &event->hw; | 511 | hwc = &event->hw; |
509 | armpmu_event_update(event, hwc, idx); | 512 | armpmu_event_update(event); |
510 | perf_sample_data_init(&data, 0, hwc->last_period); | 513 | perf_sample_data_init(&data, 0, hwc->last_period); |
511 | if (!armpmu_event_set_period(event, hwc, idx)) | 514 | if (!armpmu_event_set_period(event)) |
512 | continue; | 515 | continue; |
513 | 516 | ||
514 | if (perf_event_overflow(event, &data, regs)) | 517 | if (perf_event_overflow(event, &data, regs)) |
515 | cpu_pmu->disable(hwc, idx); | 518 | cpu_pmu->disable(event); |
516 | } | 519 | } |
517 | 520 | ||
518 | /* | 521 | /* |
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num, | |||
527 | return IRQ_HANDLED; | 530 | return IRQ_HANDLED; |
528 | } | 531 | } |
529 | 532 | ||
530 | static void | 533 | static void armv6pmu_start(struct arm_pmu *cpu_pmu) |
531 | armv6pmu_start(void) | ||
532 | { | 534 | { |
533 | unsigned long flags, val; | 535 | unsigned long flags, val; |
534 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 536 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -540,8 +542,7 @@ armv6pmu_start(void) | |||
540 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 542 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
541 | } | 543 | } |
542 | 544 | ||
543 | static void | 545 | static void armv6pmu_stop(struct arm_pmu *cpu_pmu) |
544 | armv6pmu_stop(void) | ||
545 | { | 546 | { |
546 | unsigned long flags, val; | 547 | unsigned long flags, val; |
547 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 548 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
@@ -555,10 +556,11 @@ armv6pmu_stop(void) | |||
555 | 556 | ||
556 | static int | 557 | static int |
557 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, | 558 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
558 | struct hw_perf_event *event) | 559 | struct perf_event *event) |
559 | { | 560 | { |
561 | struct hw_perf_event *hwc = &event->hw; | ||
560 | /* Always place a cycle counter into the cycle counter. */ | 562 | /* Always place a cycle counter into the cycle counter. */ |
561 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | 563 | if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { |
562 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | 564 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) |
563 | return -EAGAIN; | 565 | return -EAGAIN; |
564 | 566 | ||
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
579 | } | 581 | } |
580 | } | 582 | } |
581 | 583 | ||
582 | static void | 584 | static void armv6pmu_disable_event(struct perf_event *event) |
583 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
584 | int idx) | ||
585 | { | 585 | { |
586 | unsigned long val, mask, evt, flags; | 586 | unsigned long val, mask, evt, flags; |
587 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
588 | struct hw_perf_event *hwc = &event->hw; | ||
587 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 589 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
590 | int idx = hwc->idx; | ||
588 | 591 | ||
589 | if (ARMV6_CYCLE_COUNTER == idx) { | 592 | if (ARMV6_CYCLE_COUNTER == idx) { |
590 | mask = ARMV6_PMCR_CCOUNT_IEN; | 593 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
613 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 616 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
614 | } | 617 | } |
615 | 618 | ||
616 | static void | 619 | static void armv6mpcore_pmu_disable_event(struct perf_event *event) |
617 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
618 | int idx) | ||
619 | { | 620 | { |
620 | unsigned long val, mask, flags, evt = 0; | 621 | unsigned long val, mask, flags, evt = 0; |
622 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
623 | struct hw_perf_event *hwc = &event->hw; | ||
621 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 624 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
625 | int idx = hwc->idx; | ||
622 | 626 | ||
623 | if (ARMV6_CYCLE_COUNTER == idx) { | 627 | if (ARMV6_CYCLE_COUNTER == idx) { |
624 | mask = ARMV6_PMCR_CCOUNT_IEN; | 628 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event) | |||
649 | &armv6_perf_cache_map, 0xFF); | 653 | &armv6_perf_cache_map, 0xFF); |
650 | } | 654 | } |
651 | 655 | ||
652 | static struct arm_pmu armv6pmu = { | 656 | static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu) |
653 | .name = "v6", | ||
654 | .handle_irq = armv6pmu_handle_irq, | ||
655 | .enable = armv6pmu_enable_event, | ||
656 | .disable = armv6pmu_disable_event, | ||
657 | .read_counter = armv6pmu_read_counter, | ||
658 | .write_counter = armv6pmu_write_counter, | ||
659 | .get_event_idx = armv6pmu_get_event_idx, | ||
660 | .start = armv6pmu_start, | ||
661 | .stop = armv6pmu_stop, | ||
662 | .map_event = armv6_map_event, | ||
663 | .num_events = 3, | ||
664 | .max_period = (1LLU << 32) - 1, | ||
665 | }; | ||
666 | |||
667 | static struct arm_pmu *__devinit armv6pmu_init(void) | ||
668 | { | 657 | { |
669 | return &armv6pmu; | 658 | cpu_pmu->name = "v6"; |
659 | cpu_pmu->handle_irq = armv6pmu_handle_irq; | ||
660 | cpu_pmu->enable = armv6pmu_enable_event; | ||
661 | cpu_pmu->disable = armv6pmu_disable_event; | ||
662 | cpu_pmu->read_counter = armv6pmu_read_counter; | ||
663 | cpu_pmu->write_counter = armv6pmu_write_counter; | ||
664 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | ||
665 | cpu_pmu->start = armv6pmu_start; | ||
666 | cpu_pmu->stop = armv6pmu_stop; | ||
667 | cpu_pmu->map_event = armv6_map_event; | ||
668 | cpu_pmu->num_events = 3; | ||
669 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
670 | |||
671 | return 0; | ||
670 | } | 672 | } |
671 | 673 | ||
672 | /* | 674 | /* |
@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event) | |||
683 | &armv6mpcore_perf_cache_map, 0xFF); | 685 | &armv6mpcore_perf_cache_map, 0xFF); |
684 | } | 686 | } |
685 | 687 | ||
686 | static struct arm_pmu armv6mpcore_pmu = { | 688 | static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
687 | .name = "v6mpcore", | ||
688 | .handle_irq = armv6pmu_handle_irq, | ||
689 | .enable = armv6pmu_enable_event, | ||
690 | .disable = armv6mpcore_pmu_disable_event, | ||
691 | .read_counter = armv6pmu_read_counter, | ||
692 | .write_counter = armv6pmu_write_counter, | ||
693 | .get_event_idx = armv6pmu_get_event_idx, | ||
694 | .start = armv6pmu_start, | ||
695 | .stop = armv6pmu_stop, | ||
696 | .map_event = armv6mpcore_map_event, | ||
697 | .num_events = 3, | ||
698 | .max_period = (1LLU << 32) - 1, | ||
699 | }; | ||
700 | |||
701 | static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) | ||
702 | { | 689 | { |
703 | return &armv6mpcore_pmu; | 690 | cpu_pmu->name = "v6mpcore"; |
691 | cpu_pmu->handle_irq = armv6pmu_handle_irq; | ||
692 | cpu_pmu->enable = armv6pmu_enable_event; | ||
693 | cpu_pmu->disable = armv6mpcore_pmu_disable_event; | ||
694 | cpu_pmu->read_counter = armv6pmu_read_counter; | ||
695 | cpu_pmu->write_counter = armv6pmu_write_counter; | ||
696 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | ||
697 | cpu_pmu->start = armv6pmu_start; | ||
698 | cpu_pmu->stop = armv6pmu_stop; | ||
699 | cpu_pmu->map_event = armv6mpcore_map_event; | ||
700 | cpu_pmu->num_events = 3; | ||
701 | cpu_pmu->max_period = (1LLU << 32) - 1; | ||
702 | |||
703 | return 0; | ||
704 | } | 704 | } |
705 | #else | 705 | #else |
706 | static struct arm_pmu *__devinit armv6pmu_init(void) | 706 | static int armv6pmu_init(struct arm_pmu *cpu_pmu) |
707 | { | 707 | { |
708 | return NULL; | 708 | return -ENODEV; |
709 | } | 709 | } |
710 | 710 | ||
711 | static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) | 711 | static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
712 | { | 712 | { |
713 | return NULL; | 713 | return -ENODEV; |
714 | } | 714 | } |
715 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ | 715 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |