diff options
author | Paul Mackerras <paulus@samba.org> | 2009-04-09 00:42:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-09 01:56:08 -0400 |
commit | ca8f2d7f019a8547f39ddb9ed0144932f12807f2 (patch) | |
tree | 6bf7693de5751a305e4233dba288e603de83ff47 /arch/powerpc/kernel/perf_counter.c | |
parent | 6c0b324435ff49fb3c68fe808a93853d81c7fb97 (diff) |
perf_counter: powerpc: add nmi_enter/nmi_exit calls
Impact: fix potential deadlocks on powerpc
Now that the core is using in_nmi() (added in e30e08f6, "perf_counter:
fix NMI race in task clock"), we need the powerpc perf_counter_interrupt
to call nmi_enter() and nmi_exit() in those cases where the interrupt
happens when interrupts are soft-disabled.
If interrupts were soft-enabled, we can treat it as a regular interrupt
and do irq_enter/irq_exit around the whole routine. This lets us get rid
of the test_perf_counter_pending() call at the end of
perf_counter_interrupt, thus simplifying things a little.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <18909.31952.873098.336615@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index c9d019f19074..bd76d0fa2c35 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter) | |||
714 | * here so there is no possibility of being interrupted. | 714 | * here so there is no possibility of being interrupted. |
715 | */ | 715 | */ |
716 | static void record_and_restart(struct perf_counter *counter, long val, | 716 | static void record_and_restart(struct perf_counter *counter, long val, |
717 | struct pt_regs *regs) | 717 | struct pt_regs *regs, int nmi) |
718 | { | 718 | { |
719 | s64 prev, delta, left; | 719 | s64 prev, delta, left; |
720 | int record = 0; | 720 | int record = 0; |
@@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
749 | * Finally record data if requested. | 749 | * Finally record data if requested. |
750 | */ | 750 | */ |
751 | if (record) | 751 | if (record) |
752 | perf_counter_overflow(counter, 1, regs, 0); | 752 | perf_counter_overflow(counter, nmi, regs, 0); |
753 | } | 753 | } |
754 | 754 | ||
755 | /* | 755 | /* |
@@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
762 | struct perf_counter *counter; | 762 | struct perf_counter *counter; |
763 | long val; | 763 | long val; |
764 | int found = 0; | 764 | int found = 0; |
765 | int nmi; | ||
766 | |||
767 | /* | ||
768 | * If interrupts were soft-disabled when this PMU interrupt | ||
769 | * occurred, treat it as an NMI. | ||
770 | */ | ||
771 | nmi = !regs->softe; | ||
772 | if (nmi) | ||
773 | nmi_enter(); | ||
774 | else | ||
775 | irq_enter(); | ||
765 | 776 | ||
766 | for (i = 0; i < cpuhw->n_counters; ++i) { | 777 | for (i = 0; i < cpuhw->n_counters; ++i) { |
767 | counter = cpuhw->counter[i]; | 778 | counter = cpuhw->counter[i]; |
@@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
769 | if ((int)val < 0) { | 780 | if ((int)val < 0) { |
770 | /* counter has overflowed */ | 781 | /* counter has overflowed */ |
771 | found = 1; | 782 | found = 1; |
772 | record_and_restart(counter, val, regs); | 783 | record_and_restart(counter, val, regs, nmi); |
773 | } | 784 | } |
774 | } | 785 | } |
775 | 786 | ||
@@ -796,18 +807,10 @@ static void perf_counter_interrupt(struct pt_regs *regs) | |||
796 | */ | 807 | */ |
797 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | 808 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); |
798 | 809 | ||
799 | /* | 810 | if (nmi) |
800 | * If we need a wakeup, check whether interrupts were soft-enabled | 811 | nmi_exit(); |
801 | * when we took the interrupt. If they were, we can wake stuff up | 812 | else |
802 | * immediately; otherwise we'll have do the wakeup when interrupts | ||
803 | * get soft-enabled. | ||
804 | */ | ||
805 | if (test_perf_counter_pending() && regs->softe) { | ||
806 | irq_enter(); | ||
807 | clear_perf_counter_pending(); | ||
808 | perf_counter_do_pending(); | ||
809 | irq_exit(); | 813 | irq_exit(); |
810 | } | ||
811 | } | 814 | } |
812 | 815 | ||
813 | void hw_perf_counter_setup(int cpu) | 816 | void hw_perf_counter_setup(int cpu) |