aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-02 15:07:49 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-03 02:05:18 -0400
commitde725dec9de7a7541996176d59cf8542365b8b0e (patch)
treeca58fb67f6e6086733fb32b818fe80a111abac1f /arch/x86/kernel
parent2e556b5b320838fde98480a1f6cf220a5af200fc (diff)
perf, x86: Fix handle_irq return values
Now that we rely on the number of handled overflows, ensure all handle_irq implementations actually return the right number. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Don Zickus <dzickus@redhat.com> Cc: peterz@infradead.org Cc: robert.richter@amd.com Cc: gorcunov@gmail.com Cc: fweisbec@gmail.com Cc: ying.huang@intel.com Cc: ming.m.lin@intel.com Cc: eranian@google.com LKML-Reference: <1283454469-1909-4-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
2 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 1297bf15cb8..ee05c90012d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -713,6 +713,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
713 struct cpu_hw_events *cpuc; 713 struct cpu_hw_events *cpuc;
714 int bit, loops; 714 int bit, loops;
715 u64 status; 715 u64 status;
716 int handled = 0;
716 717
717 perf_sample_data_init(&data, 0); 718 perf_sample_data_init(&data, 0);
718 719
@@ -743,12 +744,16 @@ again:
743 /* 744 /*
744 * PEBS overflow sets bit 62 in the global status register 745 * PEBS overflow sets bit 62 in the global status register
745 */ 746 */
746 if (__test_and_clear_bit(62, (unsigned long *)&status)) 747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
747 x86_pmu.drain_pebs(regs); 749 x86_pmu.drain_pebs(regs);
750 }
748 751
749 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
750 struct perf_event *event = cpuc->events[bit]; 753 struct perf_event *event = cpuc->events[bit];
751 754
755 handled++;
756
752 if (!test_bit(bit, cpuc->active_mask)) 757 if (!test_bit(bit, cpuc->active_mask))
753 continue; 758 continue;
754 759
@@ -770,7 +775,7 @@ again:
770 775
771done: 776done:
772 intel_pmu_enable_all(0); 777 intel_pmu_enable_all(0);
773 return 1; 778 return handled;
774} 779}
775 780
776static struct event_constraint * 781static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7e578e9cc58..b560db3305b 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
692 inc_irq_stat(apic_perf_irqs); 692 inc_irq_stat(apic_perf_irqs);
693 } 693 }
694 694
695 return handled > 0; 695 return handled;
696} 696}
697 697
698/* 698/*