diff options
author | Paul Mackerras <paulus@samba.org> | 2009-01-13 21:44:19 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2009-01-13 21:44:19 -0500 |
commit | 01d0287f068de2934109ba9b989d8807526cccc2 (patch) | |
tree | 31e49140ecc61fd158dbd8d4e9f58358d7f84197 /kernel/perf_counter.c | |
parent | dd0e6ba22ea21bcc2c420b385a170593c58f4c08 (diff) |
powerpc/perf_counter: Make sure PMU gets enabled properly
This makes sure that we call the platform-specific ppc_md.enable_pmcs
function on each CPU before we try to use the PMU on that CPU. If the
CPU goes off-line and then on-line, we need to do the enable_pmcs call
again, so we use the hw_perf_counter_setup hook to ensure that. It gets
called as each CPU comes online, but it isn't called on the CPU that is
coming up, so this adds the CPU number as an argument to it (there were
no non-empty instances of hw_perf_counter_setup before).
This also arranges to set the pmcregs_in_use field of the lppaca (data
structure shared with the hypervisor) on each CPU when we are using the
PMU and clear it when we are not. This allows the hypervisor to optimize
partition switches by not saving/restoring the PMU registers when we
aren't using the PMU.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 3aef3062ff78..52f2f526248e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -46,7 +46,7 @@ hw_perf_counter_init(struct perf_counter *counter) | |||
46 | 46 | ||
47 | u64 __weak hw_perf_save_disable(void) { return 0; } | 47 | u64 __weak hw_perf_save_disable(void) { return 0; } |
48 | void __weak hw_perf_restore(u64 ctrl) { barrier(); } | 48 | void __weak hw_perf_restore(u64 ctrl) { barrier(); } |
49 | void __weak hw_perf_counter_setup(void) { barrier(); } | 49 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } |
50 | int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, | 50 | int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, |
51 | struct perf_cpu_context *cpuctx, | 51 | struct perf_cpu_context *cpuctx, |
52 | struct perf_counter_context *ctx, int cpu) | 52 | struct perf_counter_context *ctx, int cpu) |
@@ -1598,7 +1598,7 @@ static void __cpuinit perf_counter_init_cpu(int cpu) | |||
1598 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; | 1598 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; |
1599 | mutex_unlock(&perf_resource_mutex); | 1599 | mutex_unlock(&perf_resource_mutex); |
1600 | 1600 | ||
1601 | hw_perf_counter_setup(); | 1601 | hw_perf_counter_setup(cpu); |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | #ifdef CONFIG_HOTPLUG_CPU | 1604 | #ifdef CONFIG_HOTPLUG_CPU |