diff options
author | Will Deacon <will.deacon@arm.com> | 2011-03-25 08:13:34 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-03-26 06:06:09 -0400 |
commit | 574b69cbb633037a9c305d2993aeb680f4a8badd (patch) | |
tree | 8fcef55167b8750eeaebffca51b937a993842136 /arch/arm/kernel | |
parent | d25d3b4c4d0e27975ee659a64b6d29f02fdbfde4 (diff) |
ARM: 6834/1: perf: reset counters on all CPUs during initialisation
ARMv7 dictates that the interrupt-enable and count-enable registers for
each PMU counter are UNKNOWN following core reset.
This patch adds a new (optional) function pointer to struct arm_pmu for
resetting the PMU state during init. The reset function is called on
each CPU via an arch_initcall in the generic ARM perf_event code and
allows the PMU backend to write sane values to any UNKNOWN registers.
Acked-by: Jean Pihet <j-pihet@ti.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 14 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 22 |
2 files changed, 30 insertions, 6 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 22e194eb8536..e422f4c269a0 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -79,6 +79,7 @@ struct arm_pmu { | |||
79 | void (*write_counter)(int idx, u32 val); | 79 | void (*write_counter)(int idx, u32 val); |
80 | void (*start)(void); | 80 | void (*start)(void); |
81 | void (*stop)(void); | 81 | void (*stop)(void); |
82 | void (*reset)(void *); | ||
82 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | 83 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] |
83 | [PERF_COUNT_HW_CACHE_OP_MAX] | 84 | [PERF_COUNT_HW_CACHE_OP_MAX] |
84 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 85 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
@@ -624,6 +625,19 @@ static struct pmu pmu = { | |||
624 | #include "perf_event_v6.c" | 625 | #include "perf_event_v6.c" |
625 | #include "perf_event_v7.c" | 626 | #include "perf_event_v7.c" |
626 | 627 | ||
628 | /* | ||
629 | * Ensure the PMU has sane values out of reset. | ||
630 | * This requires SMP to be available, so exists as a separate initcall. | ||
631 | */ | ||
632 | static int __init | ||
633 | armpmu_reset(void) | ||
634 | { | ||
635 | if (armpmu && armpmu->reset) | ||
636 | return on_each_cpu(armpmu->reset, NULL, 1); | ||
637 | return 0; | ||
638 | } | ||
639 | arch_initcall(armpmu_reset); | ||
640 | |||
627 | static int __init | 641 | static int __init |
628 | init_hw_perf_events(void) | 642 | init_hw_perf_events(void) |
629 | { | 643 | { |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index d6c9dcd1979f..c08d07a99fcc 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -849,6 +849,18 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | |||
849 | } | 849 | } |
850 | } | 850 | } |
851 | 851 | ||
852 | static void armv7pmu_reset(void *info) | ||
853 | { | ||
854 | u32 idx, nb_cnt = armpmu->num_events; | ||
855 | |||
856 | /* The counter and interrupt enable registers are unknown at reset. */ | ||
857 | for (idx = 1; idx < nb_cnt; ++idx) | ||
858 | armv7pmu_disable_event(NULL, idx); | ||
859 | |||
860 | /* Initialize & Reset PMNC: C and P bits */ | ||
861 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
862 | } | ||
863 | |||
852 | static struct arm_pmu armv7pmu = { | 864 | static struct arm_pmu armv7pmu = { |
853 | .handle_irq = armv7pmu_handle_irq, | 865 | .handle_irq = armv7pmu_handle_irq, |
854 | .enable = armv7pmu_enable_event, | 866 | .enable = armv7pmu_enable_event, |
@@ -858,17 +870,15 @@ static struct arm_pmu armv7pmu = { | |||
858 | .get_event_idx = armv7pmu_get_event_idx, | 870 | .get_event_idx = armv7pmu_get_event_idx, |
859 | .start = armv7pmu_start, | 871 | .start = armv7pmu_start, |
860 | .stop = armv7pmu_stop, | 872 | .stop = armv7pmu_stop, |
873 | .reset = armv7pmu_reset, | ||
861 | .raw_event_mask = 0xFF, | 874 | .raw_event_mask = 0xFF, |
862 | .max_period = (1LLU << 32) - 1, | 875 | .max_period = (1LLU << 32) - 1, |
863 | }; | 876 | }; |
864 | 877 | ||
865 | static u32 __init armv7_reset_read_pmnc(void) | 878 | static u32 __init armv7_read_num_pmnc_events(void) |
866 | { | 879 | { |
867 | u32 nb_cnt; | 880 | u32 nb_cnt; |
868 | 881 | ||
869 | /* Initialize & Reset PMNC: C and P bits */ | ||
870 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
871 | |||
872 | /* Read the nb of CNTx counters supported from PMNC */ | 882 | /* Read the nb of CNTx counters supported from PMNC */ |
873 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | 883 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; |
874 | 884 | ||
@@ -882,7 +892,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void) | |||
882 | armv7pmu.name = "ARMv7 Cortex-A8"; | 892 | armv7pmu.name = "ARMv7 Cortex-A8"; |
883 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | 893 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; |
884 | armv7pmu.event_map = &armv7_a8_perf_map; | 894 | armv7pmu.event_map = &armv7_a8_perf_map; |
885 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 895 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
886 | return &armv7pmu; | 896 | return &armv7pmu; |
887 | } | 897 | } |
888 | 898 | ||
@@ -892,7 +902,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
892 | armv7pmu.name = "ARMv7 Cortex-A9"; | 902 | armv7pmu.name = "ARMv7 Cortex-A9"; |
893 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | 903 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; |
894 | armv7pmu.event_map = &armv7_a9_perf_map; | 904 | armv7pmu.event_map = &armv7_a9_perf_map; |
895 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 905 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
896 | return &armv7pmu; | 906 | return &armv7pmu; |
897 | } | 907 | } |
898 | #else | 908 | #else |