diff options
author | Stephane Eranian <eranian@google.com> | 2012-02-07 08:39:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-07 10:58:56 -0500 |
commit | f39d47ff819ed52a2afbdbecbe35f23f7755f58d (patch) | |
tree | 6fcd6c2b9f6e7416da43065d866bd40d68bb9bc4 /kernel | |
parent | 136e0b8eabb2913b589fc7fbd418f4d6805dbb56 (diff) |
perf: Fix double start/stop in x86_pmu_start()
The following patch fixes a bug introduced by the following
commit:
e050e3f0a71b ("perf: Fix broken interrupt rate throttling")
The patch caused the following warning to pop up depending on
the sampling frequency adjustments:
------------[ cut here ]------------
WARNING: at arch/x86/kernel/cpu/perf_event.c:995 x86_pmu_start+0x79/0xd4()
It was caused by the following call sequence:
perf_adjust_freq_unthr_context.part() {
stop()
if (delta > 0) {
perf_adjust_period() {
if (period > 8*...) {
stop()
...
start()
}
}
}
start()
}
Which caused a double start and a double stop, thus triggering
the assert in x86_pmu_start().
The patch fixes the problem by avoiding the double calls. We
pass a new argument to perf_adjust_period() to indicate whether
or not the event is already stopped. We can't just remove the
start/stop from that function because it's called from
__perf_event_overflow where the event needs to be reloaded via a
stop/start back-toback call.
The patch reintroduces the assertion in x86_pmu_start() which
was removed by commit:
84f2b9b ("perf: Remove deprecated WARN_ON_ONCE()")
In this second version, we've added calls to disable/enable PMU
during unthrottling or frequency adjustment based on bug report
of spurious NMI interrupts from Eric Dumazet.
Reported-and-tested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: markus@trippelsdorf.de
Cc: paulus@samba.org
Link: http://lkml.kernel.org/r/20120207133956.GA4932@quad
[ Minor edits to the changelog and to the code ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index ba36013cfb21..1b5c081d8b9f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2303,7 +2303,7 @@ do { \ | |||
2303 | static DEFINE_PER_CPU(int, perf_throttled_count); | 2303 | static DEFINE_PER_CPU(int, perf_throttled_count); |
2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); | 2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); |
2305 | 2305 | ||
2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) |
2307 | { | 2307 | { |
2308 | struct hw_perf_event *hwc = &event->hw; | 2308 | struct hw_perf_event *hwc = &event->hw; |
2309 | s64 period, sample_period; | 2309 | s64 period, sample_period; |
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
2322 | hwc->sample_period = sample_period; | 2322 | hwc->sample_period = sample_period; |
2323 | 2323 | ||
2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { | 2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
2325 | event->pmu->stop(event, PERF_EF_UPDATE); | 2325 | if (disable) |
2326 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
2327 | |||
2326 | local64_set(&hwc->period_left, 0); | 2328 | local64_set(&hwc->period_left, 0); |
2327 | event->pmu->start(event, PERF_EF_RELOAD); | 2329 | |
2330 | if (disable) | ||
2331 | event->pmu->start(event, PERF_EF_RELOAD); | ||
2328 | } | 2332 | } |
2329 | } | 2333 | } |
2330 | 2334 | ||
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2350 | return; | 2354 | return; |
2351 | 2355 | ||
2352 | raw_spin_lock(&ctx->lock); | 2356 | raw_spin_lock(&ctx->lock); |
2357 | perf_pmu_disable(ctx->pmu); | ||
2353 | 2358 | ||
2354 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
2355 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2360 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2381 | /* | 2386 | /* |
2382 | * restart the event | 2387 | * restart the event |
2383 | * reload only if value has changed | 2388 | * reload only if value has changed |
2389 | * we have stopped the event so tell that | ||
2390 | * to perf_adjust_period() to avoid stopping it | ||
2391 | * twice. | ||
2384 | */ | 2392 | */ |
2385 | if (delta > 0) | 2393 | if (delta > 0) |
2386 | perf_adjust_period(event, period, delta); | 2394 | perf_adjust_period(event, period, delta, false); |
2387 | 2395 | ||
2388 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2396 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
2389 | } | 2397 | } |
2390 | 2398 | ||
2399 | perf_pmu_enable(ctx->pmu); | ||
2391 | raw_spin_unlock(&ctx->lock); | 2400 | raw_spin_unlock(&ctx->lock); |
2392 | } | 2401 | } |
2393 | 2402 | ||
@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
4562 | hwc->freq_time_stamp = now; | 4571 | hwc->freq_time_stamp = now; |
4563 | 4572 | ||
4564 | if (delta > 0 && delta < 2*TICK_NSEC) | 4573 | if (delta > 0 && delta < 2*TICK_NSEC) |
4565 | perf_adjust_period(event, delta, hwc->last_period); | 4574 | perf_adjust_period(event, delta, hwc->last_period, true); |
4566 | } | 4575 | } |
4567 | 4576 | ||
4568 | /* | 4577 | /* |