diff options
author | Alexander Shishkin <alexander.shishkin@linux.intel.com> | 2013-12-16 07:17:36 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-12-17 09:04:00 -0500 |
commit | 443772776c69ac9293d66b4d69fd9af16299cc2a (patch) | |
tree | 2de33c59380501f7bb34ed9c39897bd30d942c04 /kernel/events | |
parent | cf30d52e2d11523c42048ab89ed4215b5021526a (diff) |
perf: Disable all pmus on unthrottling and rescheduling
Currently, only one PMU in a context gets disabled during unthrottling
and event_sched_{out,in}(), however, events in one context may belong to
different pmus, which results in PMUs being reprogrammed while they are
still enabled.
This means that mixed PMU use [which is rare in itself] resulted in
potentially completely unreliable results: corrupted events, bogus
results, etc.
This patch temporarily disables PMUs that correspond to
each event in the context while these events are being modified.
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Link: http://lkml.kernel.org/r/1387196256-8030-1-git-send-email-alexander.shishkin@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 21 |
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, | |||
1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1397 | return; | 1397 | return; |
1398 | 1398 | ||
1399 | perf_pmu_disable(event->pmu); | ||
1400 | |||
1399 | event->state = PERF_EVENT_STATE_INACTIVE; | 1401 | event->state = PERF_EVENT_STATE_INACTIVE; |
1400 | if (event->pending_disable) { | 1402 | if (event->pending_disable) { |
1401 | event->pending_disable = 0; | 1403 | event->pending_disable = 0; |
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, | |||
1412 | ctx->nr_freq--; | 1414 | ctx->nr_freq--; |
1413 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1415 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
1414 | cpuctx->exclusive = 0; | 1416 | cpuctx->exclusive = 0; |
1417 | |||
1418 | perf_pmu_enable(event->pmu); | ||
1415 | } | 1419 | } |
1416 | 1420 | ||
1417 | static void | 1421 | static void |
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, | |||
1652 | struct perf_event_context *ctx) | 1656 | struct perf_event_context *ctx) |
1653 | { | 1657 | { |
1654 | u64 tstamp = perf_event_time(event); | 1658 | u64 tstamp = perf_event_time(event); |
1659 | int ret = 0; | ||
1655 | 1660 | ||
1656 | if (event->state <= PERF_EVENT_STATE_OFF) | 1661 | if (event->state <= PERF_EVENT_STATE_OFF) |
1657 | return 0; | 1662 | return 0; |
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, | |||
1674 | */ | 1679 | */ |
1675 | smp_wmb(); | 1680 | smp_wmb(); |
1676 | 1681 | ||
1682 | perf_pmu_disable(event->pmu); | ||
1683 | |||
1677 | if (event->pmu->add(event, PERF_EF_START)) { | 1684 | if (event->pmu->add(event, PERF_EF_START)) { |
1678 | event->state = PERF_EVENT_STATE_INACTIVE; | 1685 | event->state = PERF_EVENT_STATE_INACTIVE; |
1679 | event->oncpu = -1; | 1686 | event->oncpu = -1; |
1680 | return -EAGAIN; | 1687 | ret = -EAGAIN; |
1688 | goto out; | ||
1681 | } | 1689 | } |
1682 | 1690 | ||
1683 | event->tstamp_running += tstamp - event->tstamp_stopped; | 1691 | event->tstamp_running += tstamp - event->tstamp_stopped; |
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, | |||
1693 | if (event->attr.exclusive) | 1701 | if (event->attr.exclusive) |
1694 | cpuctx->exclusive = 1; | 1702 | cpuctx->exclusive = 1; |
1695 | 1703 | ||
1696 | return 0; | 1704 | out: |
1705 | perf_pmu_enable(event->pmu); | ||
1706 | |||
1707 | return ret; | ||
1697 | } | 1708 | } |
1698 | 1709 | ||
1699 | static int | 1710 | static int |
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2743 | if (!event_filter_match(event)) | 2754 | if (!event_filter_match(event)) |
2744 | continue; | 2755 | continue; |
2745 | 2756 | ||
2757 | perf_pmu_disable(event->pmu); | ||
2758 | |||
2746 | hwc = &event->hw; | 2759 | hwc = &event->hw; |
2747 | 2760 | ||
2748 | if (hwc->interrupts == MAX_INTERRUPTS) { | 2761 | if (hwc->interrupts == MAX_INTERRUPTS) { |
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2752 | } | 2765 | } |
2753 | 2766 | ||
2754 | if (!event->attr.freq || !event->attr.sample_freq) | 2767 | if (!event->attr.freq || !event->attr.sample_freq) |
2755 | continue; | 2768 | goto next; |
2756 | 2769 | ||
2757 | /* | 2770 | /* |
2758 | * stop the event and update event->count | 2771 | * stop the event and update event->count |
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
2774 | perf_adjust_period(event, period, delta, false); | 2787 | perf_adjust_period(event, period, delta, false); |
2775 | 2788 | ||
2776 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2789 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
2790 | next: | ||
2791 | perf_pmu_enable(event->pmu); | ||
2777 | } | 2792 | } |
2778 | 2793 | ||
2779 | perf_pmu_enable(ctx->pmu); | 2794 | perf_pmu_enable(ctx->pmu); |