diff options
author | Paul Mackerras <paulus@samba.org> | 2009-09-08 21:26:03 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-09-10 21:27:58 -0400 |
commit | a6dbf93a2ad853585409e715eb96dca9177e3c39 (patch) | |
tree | 617b29b2ae800d71f459288d232045a6efc6dfd0 | |
parent | 757cbd46d11cfa7506b7dd5dd6657ae645bf6a17 (diff) |
powerpc: Fix bug where perf_counters breaks oprofile
Currently there is a bug where if you use oprofile on a pSeries
machine, then use perf_counters, then use oprofile again, oprofile
will not work correctly; it will lose the PMU configuration the next
time the hypervisor does a partition context switch, and thereafter
won't count anything.
Maynard Johnson identified the sequence causing the problem:
- oprofile setup calls ppc_enable_pmcs(), which calls
pseries_lpar_enable_pmcs, which tells the hypervisor that we want
to use the PMU, and sets the "PMU in use" flag in the lppaca.
This flag tells the hypervisor whether it needs to save and restore
the PMU config.
- The perf_counter code sets and clears the "PMU in use" flag directly
as it context-switches the PMU between tasks, and leaves it clear
when it finishes.
- oprofile setup, called for a new oprofile run, calls ppc_enable_pmcs,
which does nothing because it has already been called. In particular
it doesn't set the "PMU in use" flag.
This fixes the problem by arranging for ppc_enable_pmcs to always set
the "PMU in use" flag. It makes the perf_counter code call
ppc_enable_pmcs also rather than calling the lower-level function
directly, and removes the setting of the "PMU in use" flag from
pseries_lpar_enable_pmcs, since that is now done in its caller.
This also removes the declaration of pasemi_enable_pmcs because it
isn't defined anywhere.
Reported-by: Maynard Johnson <mpjohn@us.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: <stable@kernel.org)
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r-- | arch/powerpc/include/asm/pmc.h | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 13 | ||||
-rw-r--r-- | arch/powerpc/kernel/sysfs.c | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/setup.c | 4 |
4 files changed, 20 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h index d6a616a1b3ea..ccc68b50d05d 100644 --- a/arch/powerpc/include/asm/pmc.h +++ b/arch/powerpc/include/asm/pmc.h | |||
@@ -27,10 +27,22 @@ extern perf_irq_t perf_irq; | |||
27 | 27 | ||
28 | int reserve_pmc_hardware(perf_irq_t new_perf_irq); | 28 | int reserve_pmc_hardware(perf_irq_t new_perf_irq); |
29 | void release_pmc_hardware(void); | 29 | void release_pmc_hardware(void); |
30 | void ppc_enable_pmcs(void); | ||
30 | 31 | ||
31 | #ifdef CONFIG_PPC64 | 32 | #ifdef CONFIG_PPC64 |
32 | void power4_enable_pmcs(void); | 33 | #include <asm/lppaca.h> |
33 | void pasemi_enable_pmcs(void); | 34 | |
35 | static inline void ppc_set_pmu_inuse(int inuse) | ||
36 | { | ||
37 | get_lppaca()->pmcregs_in_use = inuse; | ||
38 | } | ||
39 | |||
40 | extern void power4_enable_pmcs(void); | ||
41 | |||
42 | #else /* CONFIG_PPC64 */ | ||
43 | |||
44 | static inline void ppc_set_pmu_inuse(int inuse) { } | ||
45 | |||
34 | #endif | 46 | #endif |
35 | 47 | ||
36 | #endif /* __KERNEL__ */ | 48 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 70e1f57f7dd8..ccd6b2135642 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -62,7 +62,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | |||
62 | { | 62 | { |
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
65 | static inline void perf_set_pmu_inuse(int inuse) { } | ||
66 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } | 65 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } |
67 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) | 66 | static inline u32 perf_get_misc_flags(struct pt_regs *regs) |
68 | { | 67 | { |
@@ -93,11 +92,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | |||
93 | return 0; | 92 | return 0; |
94 | } | 93 | } |
95 | 94 | ||
96 | static inline void perf_set_pmu_inuse(int inuse) | ||
97 | { | ||
98 | get_lppaca()->pmcregs_in_use = inuse; | ||
99 | } | ||
100 | |||
101 | /* | 95 | /* |
102 | * The user wants a data address recorded. | 96 | * The user wants a data address recorded. |
103 | * If we're not doing instruction sampling, give them the SDAR | 97 | * If we're not doing instruction sampling, give them the SDAR |
@@ -531,8 +525,7 @@ void hw_perf_disable(void) | |||
531 | * Check if we ever enabled the PMU on this cpu. | 525 | * Check if we ever enabled the PMU on this cpu. |
532 | */ | 526 | */ |
533 | if (!cpuhw->pmcs_enabled) { | 527 | if (!cpuhw->pmcs_enabled) { |
534 | if (ppc_md.enable_pmcs) | 528 | ppc_enable_pmcs(); |
535 | ppc_md.enable_pmcs(); | ||
536 | cpuhw->pmcs_enabled = 1; | 529 | cpuhw->pmcs_enabled = 1; |
537 | } | 530 | } |
538 | 531 | ||
@@ -594,7 +587,7 @@ void hw_perf_enable(void) | |||
594 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 587 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
595 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 588 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
596 | if (cpuhw->n_counters == 0) | 589 | if (cpuhw->n_counters == 0) |
597 | perf_set_pmu_inuse(0); | 590 | ppc_set_pmu_inuse(0); |
598 | goto out_enable; | 591 | goto out_enable; |
599 | } | 592 | } |
600 | 593 | ||
@@ -627,7 +620,7 @@ void hw_perf_enable(void) | |||
627 | * bit set and set the hardware counters to their initial values. | 620 | * bit set and set the hardware counters to their initial values. |
628 | * Then unfreeze the counters. | 621 | * Then unfreeze the counters. |
629 | */ | 622 | */ |
630 | perf_set_pmu_inuse(1); | 623 | ppc_set_pmu_inuse(1); |
631 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | 624 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
632 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 625 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
633 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | 626 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index f41aec85aa49..956ab33fd73f 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/prom.h> | 17 | #include <asm/prom.h> |
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
20 | #include <asm/pmc.h> | ||
20 | 21 | ||
21 | #include "cacheinfo.h" | 22 | #include "cacheinfo.h" |
22 | 23 | ||
@@ -123,6 +124,8 @@ static DEFINE_PER_CPU(char, pmcs_enabled); | |||
123 | 124 | ||
124 | void ppc_enable_pmcs(void) | 125 | void ppc_enable_pmcs(void) |
125 | { | 126 | { |
127 | ppc_set_pmu_inuse(1); | ||
128 | |||
126 | /* Only need to enable them once */ | 129 | /* Only need to enable them once */ |
127 | if (__get_cpu_var(pmcs_enabled)) | 130 | if (__get_cpu_var(pmcs_enabled)) |
128 | return; | 131 | return; |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8d75ea21296f..ca5f2e10972c 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -223,10 +223,6 @@ static void pseries_lpar_enable_pmcs(void) | |||
223 | set = 1UL << 63; | 223 | set = 1UL << 63; |
224 | reset = 0; | 224 | reset = 0; |
225 | plpar_hcall_norets(H_PERFMON, set, reset); | 225 | plpar_hcall_norets(H_PERFMON, set, reset); |
226 | |||
227 | /* instruct hypervisor to maintain PMCs */ | ||
228 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) | ||
229 | get_lppaca()->pmcregs_in_use = 1; | ||
230 | } | 226 | } |
231 | 227 | ||
232 | static void __init pseries_discover_pic(void) | 228 | static void __init pseries_discover_pic(void) |