aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-13 21:44:19 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-13 21:44:19 -0500
commit01d0287f068de2934109ba9b989d8807526cccc2 (patch)
tree31e49140ecc61fd158dbd8d4e9f58358d7f84197
parentdd0e6ba22ea21bcc2c420b385a170593c58f4c08 (diff)
powerpc/perf_counter: Make sure PMU gets enabled properly
This makes sure that we call the platform-specific ppc_md.enable_pmcs function on each CPU before we try to use the PMU on that CPU. If the CPU goes off-line and then on-line, we need to do the enable_pmcs call again, so we use the hw_perf_counter_setup hook to ensure that. It gets called as each CPU comes online, but it isn't called on the CPU that is coming up, so this adds the CPU number as an argument to it (there were no non-empty instances of hw_perf_counter_setup before). This also arranges to set the pmcregs_in_use field of the lppaca (data structure shared with the hypervisor) on each CPU when we are using the PMU and clear it when we are not. This allows the hypervisor to optimize partition switches by not saving/restoring the PMU registers when we aren't using the PMU. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/perf_counter.c22
-rw-r--r--kernel/perf_counter.c4
2 files changed, 24 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index df3fe057dee9..85ad25923c2c 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -15,6 +15,7 @@
15#include <linux/hardirq.h> 15#include <linux/hardirq.h>
16#include <asm/reg.h> 16#include <asm/reg.h>
17#include <asm/pmc.h> 17#include <asm/pmc.h>
18#include <asm/machdep.h>
18 19
19struct cpu_hw_counters { 20struct cpu_hw_counters {
20 int n_counters; 21 int n_counters;
@@ -24,6 +25,7 @@ struct cpu_hw_counters {
24 struct perf_counter *counter[MAX_HWCOUNTERS]; 25 struct perf_counter *counter[MAX_HWCOUNTERS];
25 unsigned int events[MAX_HWCOUNTERS]; 26 unsigned int events[MAX_HWCOUNTERS];
26 u64 mmcr[3]; 27 u64 mmcr[3];
28 u8 pmcs_enabled;
27}; 29};
28DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); 30DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
29 31
@@ -262,6 +264,15 @@ u64 hw_perf_save_disable(void)
262 cpuhw->n_added = 0; 264 cpuhw->n_added = 0;
263 265
264 /* 266 /*
267 * Check if we ever enabled the PMU on this cpu.
268 */
269 if (!cpuhw->pmcs_enabled) {
270 if (ppc_md.enable_pmcs)
271 ppc_md.enable_pmcs();
272 cpuhw->pmcs_enabled = 1;
273 }
274
275 /*
265 * Set the 'freeze counters' bit. 276 * Set the 'freeze counters' bit.
266 * The barrier is to make sure the mtspr has been 277 * The barrier is to make sure the mtspr has been
267 * executed and the PMU has frozen the counters 278 * executed and the PMU has frozen the counters
@@ -305,6 +316,8 @@ void hw_perf_restore(u64 disable)
305 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); 316 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
306 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 317 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
307 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); 318 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
319 if (cpuhw->n_counters == 0)
320 get_lppaca()->pmcregs_in_use = 0;
308 goto out; 321 goto out;
309 } 322 }
310 323
@@ -323,6 +336,7 @@ void hw_perf_restore(u64 disable)
323 * bit set and set the hardware counters to their initial values. 336 * bit set and set the hardware counters to their initial values.
324 * Then unfreeze the counters. 337 * Then unfreeze the counters.
325 */ 338 */
339 get_lppaca()->pmcregs_in_use = 1;
326 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); 340 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
327 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 341 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
328 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) 342 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -741,6 +755,14 @@ static void perf_counter_interrupt(struct pt_regs *regs)
741 } 755 }
742} 756}
743 757
758void hw_perf_counter_setup(int cpu)
759{
760 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
761
762 memset(cpuhw, 0, sizeof(*cpuhw));
763 cpuhw->mmcr[0] = MMCR0_FC;
764}
765
744extern struct power_pmu ppc970_pmu; 766extern struct power_pmu ppc970_pmu;
745extern struct power_pmu power6_pmu; 767extern struct power_pmu power6_pmu;
746 768
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 3aef3062ff78..52f2f526248e 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -46,7 +46,7 @@ hw_perf_counter_init(struct perf_counter *counter)
46 46
47u64 __weak hw_perf_save_disable(void) { return 0; } 47u64 __weak hw_perf_save_disable(void) { return 0; }
48void __weak hw_perf_restore(u64 ctrl) { barrier(); } 48void __weak hw_perf_restore(u64 ctrl) { barrier(); }
49void __weak hw_perf_counter_setup(void) { barrier(); } 49void __weak hw_perf_counter_setup(int cpu) { barrier(); }
50int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, 50int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
51 struct perf_cpu_context *cpuctx, 51 struct perf_cpu_context *cpuctx,
52 struct perf_counter_context *ctx, int cpu) 52 struct perf_counter_context *ctx, int cpu)
@@ -1598,7 +1598,7 @@ static void __cpuinit perf_counter_init_cpu(int cpu)
1598 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 1598 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
1599 mutex_unlock(&perf_resource_mutex); 1599 mutex_unlock(&perf_resource_mutex);
1600 1600
1601 hw_perf_counter_setup(); 1601 hw_perf_counter_setup(cpu);
1602} 1602}
1603 1603
1604#ifdef CONFIG_HOTPLUG_CPU 1604#ifdef CONFIG_HOTPLUG_CPU