aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-08-07 02:59:45 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-09 06:54:37 -0400
commitf36a1a133a947973efb8e6a1fbdcc23e4a011437 (patch)
treec635e0a3a8329aac348c2e861f91347abe59792f /arch/powerpc
parentb26bc5a7f81474937e427b0c855eabee5ad56f89 (diff)
perf_counter/powerpc: Fix oops on cpus without perf_counter hardware support
If we have the powerpc perf_counter backend compiled in, but the cpu we are running on is one where we don't support the PMU, we currently oops in hw_perf_group_sched_in if we try to use any counters, because ppmu is NULL in that case, and we unconditionally dereference ppmu. This fixes the problem by adding a check if ppmu is NULL at the beginning of hw_perf_group_sched_in, and also at the beginning of the other functions that get called from the perf_counter core, i.e. hw_perf_disable, hw_perf_enable, and hw_perf_counter_setup. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: benh@kernel.crashing.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_counter.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 809fdf94b95f..70e1f57f7dd8 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -518,6 +518,8 @@ void hw_perf_disable(void)
518 struct cpu_hw_counters *cpuhw; 518 struct cpu_hw_counters *cpuhw;
519 unsigned long flags; 519 unsigned long flags;
520 520
521 if (!ppmu)
522 return;
521 local_irq_save(flags); 523 local_irq_save(flags);
522 cpuhw = &__get_cpu_var(cpu_hw_counters); 524 cpuhw = &__get_cpu_var(cpu_hw_counters);
523 525
@@ -572,6 +574,8 @@ void hw_perf_enable(void)
572 int n_lim; 574 int n_lim;
573 int idx; 575 int idx;
574 576
577 if (!ppmu)
578 return;
575 local_irq_save(flags); 579 local_irq_save(flags);
576 cpuhw = &__get_cpu_var(cpu_hw_counters); 580 cpuhw = &__get_cpu_var(cpu_hw_counters);
577 if (!cpuhw->disabled) { 581 if (!cpuhw->disabled) {
@@ -737,6 +741,8 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
737 long i, n, n0; 741 long i, n, n0;
738 struct perf_counter *sub; 742 struct perf_counter *sub;
739 743
744 if (!ppmu)
745 return 0;
740 cpuhw = &__get_cpu_var(cpu_hw_counters); 746 cpuhw = &__get_cpu_var(cpu_hw_counters);
741 n0 = cpuhw->n_counters; 747 n0 = cpuhw->n_counters;
742 n = collect_events(group_leader, ppmu->n_counter - n0, 748 n = collect_events(group_leader, ppmu->n_counter - n0,
@@ -1281,6 +1287,8 @@ void hw_perf_counter_setup(int cpu)
1281{ 1287{
1282 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); 1288 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
1283 1289
1290 if (!ppmu)
1291 return;
1284 memset(cpuhw, 0, sizeof(*cpuhw)); 1292 memset(cpuhw, 0, sizeof(*cpuhw));
1285 cpuhw->mmcr[0] = MMCR0_FC; 1293 cpuhw->mmcr[0] = MMCR0_FC;
1286} 1294}