diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_v6.c')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 87 |
1 files changed, 59 insertions, 28 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index dd7f3b9f4cb3..e63d8115c01b 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -54,7 +54,7 @@ enum armv6_perf_types { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | enum armv6_counters { | 56 | enum armv6_counters { |
57 | ARMV6_CYCLE_COUNTER = 1, | 57 | ARMV6_CYCLE_COUNTER = 0, |
58 | ARMV6_COUNTER0, | 58 | ARMV6_COUNTER0, |
59 | ARMV6_COUNTER1, | 59 | ARMV6_COUNTER1, |
60 | }; | 60 | }; |
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
433 | int idx) | 433 | int idx) |
434 | { | 434 | { |
435 | unsigned long val, mask, evt, flags; | 435 | unsigned long val, mask, evt, flags; |
436 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
436 | 437 | ||
437 | if (ARMV6_CYCLE_COUNTER == idx) { | 438 | if (ARMV6_CYCLE_COUNTER == idx) { |
438 | mask = 0; | 439 | mask = 0; |
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
454 | * Mask out the current event and set the counter to count the event | 455 | * Mask out the current event and set the counter to count the event |
455 | * that we're interested in. | 456 | * that we're interested in. |
456 | */ | 457 | */ |
457 | raw_spin_lock_irqsave(&pmu_lock, flags); | 458 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
458 | val = armv6_pmcr_read(); | 459 | val = armv6_pmcr_read(); |
459 | val &= ~mask; | 460 | val &= ~mask; |
460 | val |= evt; | 461 | val |= evt; |
461 | armv6_pmcr_write(val); | 462 | armv6_pmcr_write(val); |
462 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 463 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
464 | } | ||
465 | |||
466 | static int counter_is_active(unsigned long pmcr, int idx) | ||
467 | { | ||
468 | unsigned long mask = 0; | ||
469 | if (idx == ARMV6_CYCLE_COUNTER) | ||
470 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
471 | else if (idx == ARMV6_COUNTER0) | ||
472 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
473 | else if (idx == ARMV6_COUNTER1) | ||
474 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
475 | |||
476 | if (mask) | ||
477 | return pmcr & mask; | ||
478 | |||
479 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
480 | return 0; | ||
463 | } | 481 | } |
464 | 482 | ||
465 | static irqreturn_t | 483 | static irqreturn_t |
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num, | |||
468 | { | 486 | { |
469 | unsigned long pmcr = armv6_pmcr_read(); | 487 | unsigned long pmcr = armv6_pmcr_read(); |
470 | struct perf_sample_data data; | 488 | struct perf_sample_data data; |
471 | struct cpu_hw_events *cpuc; | 489 | struct pmu_hw_events *cpuc; |
472 | struct pt_regs *regs; | 490 | struct pt_regs *regs; |
473 | int idx; | 491 | int idx; |
474 | 492 | ||
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num, | |||
487 | perf_sample_data_init(&data, 0); | 505 | perf_sample_data_init(&data, 0); |
488 | 506 | ||
489 | cpuc = &__get_cpu_var(cpu_hw_events); | 507 | cpuc = &__get_cpu_var(cpu_hw_events); |
490 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 508 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
491 | struct perf_event *event = cpuc->events[idx]; | 509 | struct perf_event *event = cpuc->events[idx]; |
492 | struct hw_perf_event *hwc; | 510 | struct hw_perf_event *hwc; |
493 | 511 | ||
494 | if (!test_bit(idx, cpuc->active_mask)) | 512 | if (!counter_is_active(pmcr, idx)) |
495 | continue; | 513 | continue; |
496 | 514 | ||
497 | /* | 515 | /* |
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num, | |||
508 | continue; | 526 | continue; |
509 | 527 | ||
510 | if (perf_event_overflow(event, &data, regs)) | 528 | if (perf_event_overflow(event, &data, regs)) |
511 | armpmu->disable(hwc, idx); | 529 | cpu_pmu->disable(hwc, idx); |
512 | } | 530 | } |
513 | 531 | ||
514 | /* | 532 | /* |
@@ -527,28 +545,30 @@ static void | |||
527 | armv6pmu_start(void) | 545 | armv6pmu_start(void) |
528 | { | 546 | { |
529 | unsigned long flags, val; | 547 | unsigned long flags, val; |
548 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
530 | 549 | ||
531 | raw_spin_lock_irqsave(&pmu_lock, flags); | 550 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
532 | val = armv6_pmcr_read(); | 551 | val = armv6_pmcr_read(); |
533 | val |= ARMV6_PMCR_ENABLE; | 552 | val |= ARMV6_PMCR_ENABLE; |
534 | armv6_pmcr_write(val); | 553 | armv6_pmcr_write(val); |
535 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 554 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
536 | } | 555 | } |
537 | 556 | ||
538 | static void | 557 | static void |
539 | armv6pmu_stop(void) | 558 | armv6pmu_stop(void) |
540 | { | 559 | { |
541 | unsigned long flags, val; | 560 | unsigned long flags, val; |
561 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
542 | 562 | ||
543 | raw_spin_lock_irqsave(&pmu_lock, flags); | 563 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
544 | val = armv6_pmcr_read(); | 564 | val = armv6_pmcr_read(); |
545 | val &= ~ARMV6_PMCR_ENABLE; | 565 | val &= ~ARMV6_PMCR_ENABLE; |
546 | armv6_pmcr_write(val); | 566 | armv6_pmcr_write(val); |
547 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 567 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
548 | } | 568 | } |
549 | 569 | ||
550 | static int | 570 | static int |
551 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | 571 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
552 | struct hw_perf_event *event) | 572 | struct hw_perf_event *event) |
553 | { | 573 | { |
554 | /* Always place a cycle counter into the cycle counter. */ | 574 | /* Always place a cycle counter into the cycle counter. */ |
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
578 | int idx) | 598 | int idx) |
579 | { | 599 | { |
580 | unsigned long val, mask, evt, flags; | 600 | unsigned long val, mask, evt, flags; |
601 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
581 | 602 | ||
582 | if (ARMV6_CYCLE_COUNTER == idx) { | 603 | if (ARMV6_CYCLE_COUNTER == idx) { |
583 | mask = ARMV6_PMCR_CCOUNT_IEN; | 604 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
598 | * of ETM bus signal assertion cycles. The external reporting should | 619 | * of ETM bus signal assertion cycles. The external reporting should |
599 | * be disabled and so this should never increment. | 620 | * be disabled and so this should never increment. |
600 | */ | 621 | */ |
601 | raw_spin_lock_irqsave(&pmu_lock, flags); | 622 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
602 | val = armv6_pmcr_read(); | 623 | val = armv6_pmcr_read(); |
603 | val &= ~mask; | 624 | val &= ~mask; |
604 | val |= evt; | 625 | val |= evt; |
605 | armv6_pmcr_write(val); | 626 | armv6_pmcr_write(val); |
606 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 627 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
607 | } | 628 | } |
608 | 629 | ||
609 | static void | 630 | static void |
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
611 | int idx) | 632 | int idx) |
612 | { | 633 | { |
613 | unsigned long val, mask, flags, evt = 0; | 634 | unsigned long val, mask, flags, evt = 0; |
635 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
614 | 636 | ||
615 | if (ARMV6_CYCLE_COUNTER == idx) { | 637 | if (ARMV6_CYCLE_COUNTER == idx) { |
616 | mask = ARMV6_PMCR_CCOUNT_IEN; | 638 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
627 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | 649 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We |
628 | * simply disable the interrupt reporting. | 650 | * simply disable the interrupt reporting. |
629 | */ | 651 | */ |
630 | raw_spin_lock_irqsave(&pmu_lock, flags); | 652 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
631 | val = armv6_pmcr_read(); | 653 | val = armv6_pmcr_read(); |
632 | val &= ~mask; | 654 | val &= ~mask; |
633 | val |= evt; | 655 | val |= evt; |
634 | armv6_pmcr_write(val); | 656 | armv6_pmcr_write(val); |
635 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 657 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
658 | } | ||
659 | |||
660 | static int armv6_map_event(struct perf_event *event) | ||
661 | { | ||
662 | return map_cpu_event(event, &armv6_perf_map, | ||
663 | &armv6_perf_cache_map, 0xFF); | ||
636 | } | 664 | } |
637 | 665 | ||
638 | static const struct arm_pmu armv6pmu = { | 666 | static struct arm_pmu armv6pmu = { |
639 | .id = ARM_PERF_PMU_ID_V6, | 667 | .id = ARM_PERF_PMU_ID_V6, |
640 | .name = "v6", | 668 | .name = "v6", |
641 | .handle_irq = armv6pmu_handle_irq, | 669 | .handle_irq = armv6pmu_handle_irq, |
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = { | |||
646 | .get_event_idx = armv6pmu_get_event_idx, | 674 | .get_event_idx = armv6pmu_get_event_idx, |
647 | .start = armv6pmu_start, | 675 | .start = armv6pmu_start, |
648 | .stop = armv6pmu_stop, | 676 | .stop = armv6pmu_stop, |
649 | .cache_map = &armv6_perf_cache_map, | 677 | .map_event = armv6_map_event, |
650 | .event_map = &armv6_perf_map, | ||
651 | .raw_event_mask = 0xFF, | ||
652 | .num_events = 3, | 678 | .num_events = 3, |
653 | .max_period = (1LLU << 32) - 1, | 679 | .max_period = (1LLU << 32) - 1, |
654 | }; | 680 | }; |
655 | 681 | ||
656 | static const struct arm_pmu *__init armv6pmu_init(void) | 682 | static struct arm_pmu *__init armv6pmu_init(void) |
657 | { | 683 | { |
658 | return &armv6pmu; | 684 | return &armv6pmu; |
659 | } | 685 | } |
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void) | |||
665 | * disable the interrupt reporting and update the event. When unthrottling we | 691 | * disable the interrupt reporting and update the event. When unthrottling we |
666 | * reset the period and enable the interrupt reporting. | 692 | * reset the period and enable the interrupt reporting. |
667 | */ | 693 | */ |
668 | static const struct arm_pmu armv6mpcore_pmu = { | 694 | |
695 | static int armv6mpcore_map_event(struct perf_event *event) | ||
696 | { | ||
697 | return map_cpu_event(event, &armv6mpcore_perf_map, | ||
698 | &armv6mpcore_perf_cache_map, 0xFF); | ||
699 | } | ||
700 | |||
701 | static struct arm_pmu armv6mpcore_pmu = { | ||
669 | .id = ARM_PERF_PMU_ID_V6MP, | 702 | .id = ARM_PERF_PMU_ID_V6MP, |
670 | .name = "v6mpcore", | 703 | .name = "v6mpcore", |
671 | .handle_irq = armv6pmu_handle_irq, | 704 | .handle_irq = armv6pmu_handle_irq, |
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = { | |||
676 | .get_event_idx = armv6pmu_get_event_idx, | 709 | .get_event_idx = armv6pmu_get_event_idx, |
677 | .start = armv6pmu_start, | 710 | .start = armv6pmu_start, |
678 | .stop = armv6pmu_stop, | 711 | .stop = armv6pmu_stop, |
679 | .cache_map = &armv6mpcore_perf_cache_map, | 712 | .map_event = armv6mpcore_map_event, |
680 | .event_map = &armv6mpcore_perf_map, | ||
681 | .raw_event_mask = 0xFF, | ||
682 | .num_events = 3, | 713 | .num_events = 3, |
683 | .max_period = (1LLU << 32) - 1, | 714 | .max_period = (1LLU << 32) - 1, |
684 | }; | 715 | }; |
685 | 716 | ||
686 | static const struct arm_pmu *__init armv6mpcore_pmu_init(void) | 717 | static struct arm_pmu *__init armv6mpcore_pmu_init(void) |
687 | { | 718 | { |
688 | return &armv6mpcore_pmu; | 719 | return &armv6mpcore_pmu; |
689 | } | 720 | } |
690 | #else | 721 | #else |
691 | static const struct arm_pmu *__init armv6pmu_init(void) | 722 | static struct arm_pmu *__init armv6pmu_init(void) |
692 | { | 723 | { |
693 | return NULL; | 724 | return NULL; |
694 | } | 725 | } |
695 | 726 | ||
696 | static const struct arm_pmu *__init armv6mpcore_pmu_init(void) | 727 | static struct arm_pmu *__init armv6mpcore_pmu_init(void) |
697 | { | 728 | { |
698 | return NULL; | 729 | return NULL; |
699 | } | 730 | } |