diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-03-05 03:20:08 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-03-05 03:20:08 -0500 |
commit | 737f24bda723fdf89ecaacb99fa2bf5683c32799 (patch) | |
tree | 35495fff3e9956679cb5468e74e6814c8e44ee66 /kernel/events | |
parent | 8eedce996556d7d06522cd3a0e6069141c8dffe0 (diff) | |
parent | b7c924274c456499264d1cfa3d44063bb11eb5db (diff) |
Merge branch 'perf/urgent' into perf/core
Conflicts:
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/util/top.h
Merge reason: resolve these cherry-picking conflicts.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 19 | ||||
-rw-r--r-- | kernel/events/hw_breakpoint.c | 4 |
2 files changed, 16 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 5e0f8bb89b2..e8b32ac75ce 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2303,7 +2303,7 @@ do { \ | |||
2303 | static DEFINE_PER_CPU(int, perf_throttled_count); | 2303 | static DEFINE_PER_CPU(int, perf_throttled_count); |
2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); | 2304 | static DEFINE_PER_CPU(u64, perf_throttled_seq); |
2305 | 2305 | ||
2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | 2306 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) |
2307 | { | 2307 | { |
2308 | struct hw_perf_event *hwc = &event->hw; | 2308 | struct hw_perf_event *hwc = &event->hw; |
2309 | s64 period, sample_period; | 2309 | s64 period, sample_period; |
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
2322 | hwc->sample_period = sample_period; | 2322 | hwc->sample_period = sample_period; |
2323 | 2323 | ||
2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { | 2324 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
2325 | event->pmu->stop(event, PERF_EF_UPDATE); | 2325 | if (disable) |
2326 | event->pmu->stop(event, PERF_EF_UPDATE); | ||
2327 | |||
2326 | local64_set(&hwc->period_left, 0); | 2328 | local64_set(&hwc->period_left, 0); |
2327 | event->pmu->start(event, PERF_EF_RELOAD); | 2329 | |
2330 | if (disable) | ||
2331 | event->pmu->start(event, PERF_EF_RELOAD); | ||
2328 | } | 2332 | } |
2329 | } | 2333 | } |
2330 | 2334 | ||
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2350 | return; | 2354 | return; |
2351 | 2355 | ||
2352 | raw_spin_lock(&ctx->lock); | 2356 | raw_spin_lock(&ctx->lock); |
2357 | perf_pmu_disable(ctx->pmu); | ||
2353 | 2358 | ||
2354 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 2359 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
2355 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 2360 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2381 | /* | 2386 | /* |
2382 | * restart the event | 2387 | * restart the event |
2383 | * reload only if value has changed | 2388 | * reload only if value has changed |
2389 | * we have stopped the event so tell that | ||
2390 | * to perf_adjust_period() to avoid stopping it | ||
2391 | * twice. | ||
2384 | */ | 2392 | */ |
2385 | if (delta > 0) | 2393 | if (delta > 0) |
2386 | perf_adjust_period(event, period, delta); | 2394 | perf_adjust_period(event, period, delta, false); |
2387 | 2395 | ||
2388 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2396 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
2389 | } | 2397 | } |
2390 | 2398 | ||
2399 | perf_pmu_enable(ctx->pmu); | ||
2391 | raw_spin_unlock(&ctx->lock); | 2400 | raw_spin_unlock(&ctx->lock); |
2392 | } | 2401 | } |
2393 | 2402 | ||
@@ -4567,7 +4576,7 @@ static int __perf_event_overflow(struct perf_event *event, | |||
4567 | hwc->freq_time_stamp = now; | 4576 | hwc->freq_time_stamp = now; |
4568 | 4577 | ||
4569 | if (delta > 0 && delta < 2*TICK_NSEC) | 4578 | if (delta > 0 && delta < 2*TICK_NSEC) |
4570 | perf_adjust_period(event, delta, hwc->last_period); | 4579 | perf_adjust_period(event, delta, hwc->last_period, true); |
4571 | } | 4580 | } |
4572 | 4581 | ||
4573 | /* | 4582 | /* |
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b0309f76d77..3330022a7ac 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -658,10 +658,10 @@ int __init init_hw_breakpoint(void) | |||
658 | 658 | ||
659 | err_alloc: | 659 | err_alloc: |
660 | for_each_possible_cpu(err_cpu) { | 660 | for_each_possible_cpu(err_cpu) { |
661 | if (err_cpu == cpu) | ||
662 | break; | ||
663 | for (i = 0; i < TYPE_MAX; i++) | 661 | for (i = 0; i < TYPE_MAX; i++) |
664 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); | 662 | kfree(per_cpu(nr_task_bp_pinned[i], cpu)); |
663 | if (err_cpu == cpu) | ||
664 | break; | ||
665 | } | 665 | } |
666 | 666 | ||
667 | return -ENOMEM; | 667 | return -ENOMEM; |