aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-08 11:51:33 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-10 07:22:31 -0500
commit71e2d2828046133ed985696a02e2e1499ca0bfb8 (patch)
tree0c180d1da4be85254e59001420ff9c558b38e205 /arch/x86/kernel
parentc08053e627d23490a03431285b78b7a5b617fbad (diff)
perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE)
Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result in a double disable, cure this by using x86_pmu_{start,stop} for throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
2 files changed, 7 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9757b96f15f5..b68c4fb7a944 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)
983 983
984static void x86_pmu_unthrottle(struct perf_event *event) 984static void x86_pmu_unthrottle(struct perf_event *event)
985{ 985{
986 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 986 int ret = x86_pmu_start(event);
987 struct hw_perf_event *hwc = &event->hw; 987 WARN_ON_ONCE(ret);
988
989 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
990 cpuc->events[hwc->idx] != event))
991 return;
992
993 x86_pmu.enable(event);
994} 988}
995 989
996void perf_event_print_debug(void) 990void perf_event_print_debug(void)
@@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
1050 struct hw_perf_event *hwc = &event->hw; 1044 struct hw_perf_event *hwc = &event->hw;
1051 int idx = hwc->idx; 1045 int idx = hwc->idx;
1052 1046
1053 /* 1047 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1054 * Must be done before we disable, otherwise the nmi handler 1048 return;
1055 * could reenable again: 1049
1056 */
1057 __clear_bit(idx, cpuc->active_mask);
1058 x86_pmu.disable(event); 1050 x86_pmu.disable(event);
1059 1051
1060 /* 1052 /*
@@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1123 continue; 1115 continue;
1124 1116
1125 if (perf_event_overflow(event, 1, &data, regs)) 1117 if (perf_event_overflow(event, 1, &data, regs))
1126 x86_pmu.disable(event); 1118 x86_pmu_stop(event);
1127 } 1119 }
1128 1120
1129 if (handled) 1121 if (handled)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d87421c3f55b..84bfde64a337 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -774,7 +774,7 @@ again:
774 data.period = event->hw.last_period; 774 data.period = event->hw.last_period;
775 775
776 if (perf_event_overflow(event, 1, &data, regs)) 776 if (perf_event_overflow(event, 1, &data, regs))
777 intel_pmu_disable_event(event); 777 x86_pmu_stop(event);
778 } 778 }
779 779
780 intel_pmu_ack_status(ack); 780 intel_pmu_ack_status(ack);