aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_counter.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index c9d019f19074..bd76d0fa2c35 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter)
714 * here so there is no possibility of being interrupted. 714 * here so there is no possibility of being interrupted.
715 */ 715 */
716static void record_and_restart(struct perf_counter *counter, long val, 716static void record_and_restart(struct perf_counter *counter, long val,
717 struct pt_regs *regs) 717 struct pt_regs *regs, int nmi)
718{ 718{
719 s64 prev, delta, left; 719 s64 prev, delta, left;
720 int record = 0; 720 int record = 0;
@@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
749 * Finally record data if requested. 749 * Finally record data if requested.
750 */ 750 */
751 if (record) 751 if (record)
752 perf_counter_overflow(counter, 1, regs, 0); 752 perf_counter_overflow(counter, nmi, regs, 0);
753} 753}
754 754
755/* 755/*
@@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs)
762 struct perf_counter *counter; 762 struct perf_counter *counter;
763 long val; 763 long val;
764 int found = 0; 764 int found = 0;
765 int nmi;
766
767 /*
768 * If interrupts were soft-disabled when this PMU interrupt
769 * occurred, treat it as an NMI.
770 */
771 nmi = !regs->softe;
772 if (nmi)
773 nmi_enter();
774 else
775 irq_enter();
765 776
766 for (i = 0; i < cpuhw->n_counters; ++i) { 777 for (i = 0; i < cpuhw->n_counters; ++i) {
767 counter = cpuhw->counter[i]; 778 counter = cpuhw->counter[i];
@@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
769 if ((int)val < 0) { 780 if ((int)val < 0) {
770 /* counter has overflowed */ 781 /* counter has overflowed */
771 found = 1; 782 found = 1;
772 record_and_restart(counter, val, regs); 783 record_and_restart(counter, val, regs, nmi);
773 } 784 }
774 } 785 }
775 786
@@ -796,18 +807,10 @@ static void perf_counter_interrupt(struct pt_regs *regs)
796 */ 807 */
797 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); 808 mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
798 809
799 /* 810 if (nmi)
800 * If we need a wakeup, check whether interrupts were soft-enabled 811 nmi_exit();
801 * when we took the interrupt. If they were, we can wake stuff up 812 else
802 * immediately; otherwise we'll have do the wakeup when interrupts
803 * get soft-enabled.
804 */
805 if (test_perf_counter_pending() && regs->softe) {
806 irq_enter();
807 clear_perf_counter_pending();
808 perf_counter_do_pending();
809 irq_exit(); 813 irq_exit();
810 }
811} 814}
812 815
813void hw_perf_counter_setup(int cpu) 816void hw_perf_counter_setup(int cpu)