diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-10 12:05:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-02-10 12:05:07 -0500 |
commit | ce2814f227d3adae8456f7cbd0bd5f922fd284f0 (patch) | |
tree | 351a35a80fb62307ea48bc1b74f687d309dbb31a /kernel | |
parent | 1282ab3f8f7bca1d3a7fb701fbe4f0f772e72ea0 (diff) | |
parent | f39d47ff819ed52a2afbdbecbe35f23f7755f58d (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf: Fix double start/stop in x86_pmu_start()
perf evsel: Fix an issue where perf report fails to show the proper percentage
perf tools: Fix prefix matching for kernel maps
perf tools: Fix perf stack to non executable on x86_64
perf: Remove deprecated WARN_ON_ONCE()
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index ba36013cfb21..1b5c081d8b9f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2303,7 +2303,7 @@ do { \ | |||
2303 | static DEFINE_PER_CPU(int, perf_throttled_count); | 2303 | static DEFINE_PER_CPU(int, perf_throttled_count); |
2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); | 2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); |
2305 | 2305 | ||
2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) |
2307 | { | 2307 | { |
2308 | struct hw_perf_event *hwc = &event->hw; | 2308 | struct hw_perf_event *hwc = &event->hw; |
2309 | s64 period, sample_period; | 2309 | s64 period, sample_period; |
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
2322 | hwc->sample_period = sample_period; | 2322 | hwc->sample_period = sample_period; |
2323 | 2323 | ||
2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { | 2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
2325 | event->pmu->stop(event, PERF_EF_UPDATE); | 2325 | if (disable) |
2326 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
2327 | |||
2326 | local64_set(&hwc->period_left, 0); | 2328 | local64_set(&hwc->period_left, 0); |
2327 | event->pmu->start(event, PERF_EF_RELOAD); | 2329 | |
2330 | if (disable) | ||
2331 | event->pmu->start(event, PERF_EF_RELOAD); | ||
2328 | } | 2332 | } |
2329 | } | 2333 | } |
2330 | 2334 | ||
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2350 | return; | 2354 | return; |
2351 | 2355 | ||
2352 | raw_spin_lock(&ctx->lock); | 2356 | raw_spin_lock(&ctx->lock); |
2357 | perf_pmu_disable(ctx->pmu); | ||
2353 | 2358 | ||
2354 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
2355 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2360 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2381 | /* | 2386 | /* |
2382 | * restart the event | 2387 | * restart the event |
2383 | * reload only if value has changed | 2388 | * reload only if value has changed |
2389 | * we have stopped the event so tell that | ||
2390 | * to perf_adjust_period() to avoid stopping it | ||
2391 | * twice. | ||
2384 | */ | 2392 | */ |
2385 | if (delta > 0) | 2393 | if (delta > 0) |
2386 | perf_adjust_period(event, period, delta); | 2394 | perf_adjust_period(event, period, delta, false); |
2387 | 2395 | ||
2388 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2396 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
2389 | } | 2397 | } |
2390 | 2398 | ||
2399 | perf_pmu_enable(ctx->pmu); | ||
2391 | raw_spin_unlock(&ctx->lock); | 2400 | raw_spin_unlock(&ctx->lock); |
2392 | } | 2401 | } |
2393 | 2402 | ||
@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
4562 | hwc->freq_time_stamp = now; | 4571 | hwc->freq_time_stamp = now; |
4563 | 4572 | ||
4564 | if (delta > 0 && delta < 2*TICK_NSEC) | 4573 | if (delta > 0 && delta < 2*TICK_NSEC) |
4565 | perf_adjust_period(event, delta, hwc->last_period); | 4574 | perf_adjust_period(event, delta, hwc->last_period, true); |
4566 | } | 4575 | } |
4567 | 4576 | ||
4568 | /* | 4577 | /* |