diff options
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 22 | ||||
-rw-r--r-- | kernel/perf_counter.c | 4 |
2 files changed, 24 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index df3fe057dee9..85ad25923c2c 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
16 | #include <asm/reg.h> | 16 | #include <asm/reg.h> |
17 | #include <asm/pmc.h> | 17 | #include <asm/pmc.h> |
18 | #include <asm/machdep.h> | ||
18 | 19 | ||
19 | struct cpu_hw_counters { | 20 | struct cpu_hw_counters { |
20 | int n_counters; | 21 | int n_counters; |
@@ -24,6 +25,7 @@ struct cpu_hw_counters { | |||
24 | struct perf_counter *counter[MAX_HWCOUNTERS]; | 25 | struct perf_counter *counter[MAX_HWCOUNTERS]; |
25 | unsigned int events[MAX_HWCOUNTERS]; | 26 | unsigned int events[MAX_HWCOUNTERS]; |
26 | u64 mmcr[3]; | 27 | u64 mmcr[3]; |
28 | u8 pmcs_enabled; | ||
27 | }; | 29 | }; |
28 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | 30 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); |
29 | 31 | ||
@@ -262,6 +264,15 @@ u64 hw_perf_save_disable(void) | |||
262 | cpuhw->n_added = 0; | 264 | cpuhw->n_added = 0; |
263 | 265 | ||
264 | /* | 266 | /* |
267 | * Check if we ever enabled the PMU on this cpu. | ||
268 | */ | ||
269 | if (!cpuhw->pmcs_enabled) { | ||
270 | if (ppc_md.enable_pmcs) | ||
271 | ppc_md.enable_pmcs(); | ||
272 | cpuhw->pmcs_enabled = 1; | ||
273 | } | ||
274 | |||
275 | /* | ||
265 | * Set the 'freeze counters' bit. | 276 | * Set the 'freeze counters' bit. |
266 | * The barrier is to make sure the mtspr has been | 277 | * The barrier is to make sure the mtspr has been |
267 | * executed and the PMU has frozen the counters | 278 | * executed and the PMU has frozen the counters |
@@ -305,6 +316,8 @@ void hw_perf_restore(u64 disable) | |||
305 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | 316 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); |
306 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 317 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
307 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | 318 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); |
319 | if (cpuhw->n_counters == 0) | ||
320 | get_lppaca()->pmcregs_in_use = 0; | ||
308 | goto out; | 321 | goto out; |
309 | } | 322 | } |
310 | 323 | ||
@@ -323,6 +336,7 @@ void hw_perf_restore(u64 disable) | |||
323 | * bit set and set the hardware counters to their initial values. | 336 | * bit set and set the hardware counters to their initial values. |
324 | * Then unfreeze the counters. | 337 | * Then unfreeze the counters. |
325 | */ | 338 | */ |
339 | get_lppaca()->pmcregs_in_use = 1; | ||
326 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | 340 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); |
327 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | 341 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
328 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | 342 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) |
@@ -741,6 +755,14 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
741 | } | 755 | } |
742 | } | 756 | } |
743 | 757 | ||
758 | void hw_perf_counter_setup(int cpu) | ||
759 | { | ||
760 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); | ||
761 | |||
762 | memset(cpuhw, 0, sizeof(*cpuhw)); | ||
763 | cpuhw->mmcr[0] = MMCR0_FC; | ||
764 | } | ||
765 | |||
744 | extern struct power_pmu ppc970_pmu; | 766 | extern struct power_pmu ppc970_pmu; |
745 | extern struct power_pmu power6_pmu; | 767 | extern struct power_pmu power6_pmu; |
746 | 768 | ||
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 3aef3062ff78..52f2f526248e 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -46,7 +46,7 @@ hw_perf_counter_init(struct perf_counter *counter) | |||
46 | 46 | ||
47 | u64 __weak hw_perf_save_disable(void) { return 0; } | 47 | u64 __weak hw_perf_save_disable(void) { return 0; } |
48 | void __weak hw_perf_restore(u64 ctrl) { barrier(); } | 48 | void __weak hw_perf_restore(u64 ctrl) { barrier(); } |
49 | void __weak hw_perf_counter_setup(void) { barrier(); } | 49 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } |
50 | int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, | 50 | int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, |
51 | struct perf_cpu_context *cpuctx, | 51 | struct perf_cpu_context *cpuctx, |
52 | struct perf_counter_context *ctx, int cpu) | 52 | struct perf_counter_context *ctx, int cpu) |
@@ -1598,7 +1598,7 @@ static void __cpuinit perf_counter_init_cpu(int cpu) | |||
1598 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; | 1598 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; |
1599 | mutex_unlock(&perf_resource_mutex); | 1599 | mutex_unlock(&perf_resource_mutex); |
1600 | 1600 | ||
1601 | hw_perf_counter_setup(); | 1601 | hw_perf_counter_setup(cpu); |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | #ifdef CONFIG_HOTPLUG_CPU | 1604 | #ifdef CONFIG_HOTPLUG_CPU |