diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-19 12:10:46 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-19 12:10:46 -0500 |
| commit | 58cac3faefe26794dacda190dbd7bf374e3516d2 (patch) | |
| tree | e86765ea91c5ef5c2bba93af048bc0be74e3a61f /kernel | |
| parent | 9b1be0f976a158590b924ac8789afe69b06dd302 (diff) | |
| parent | 189b84fb54490ae24111124346a8e63f8e019385 (diff) | |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"An ABI documentation fix, and a mixed-PMU perf-info-corruption fix"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf: Document the new transaction sample type
perf: Disable all pmus on unthrottling and rescheduling
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 21 |
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 72348dc192c1..f5744010a8d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, | |||
| 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1396 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 1397 | return; | 1397 | return; |
| 1398 | 1398 | ||
| 1399 | perf_pmu_disable(event->pmu); | ||
| 1400 | |||
| 1399 | event->state = PERF_EVENT_STATE_INACTIVE; | 1401 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1400 | if (event->pending_disable) { | 1402 | if (event->pending_disable) { |
| 1401 | event->pending_disable = 0; | 1403 | event->pending_disable = 0; |
| @@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, | |||
| 1412 | ctx->nr_freq--; | 1414 | ctx->nr_freq--; |
| 1413 | if (event->attr.exclusive || !cpuctx->active_oncpu) | 1415 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 1414 | cpuctx->exclusive = 0; | 1416 | cpuctx->exclusive = 0; |
| 1417 | |||
| 1418 | perf_pmu_enable(event->pmu); | ||
| 1415 | } | 1419 | } |
| 1416 | 1420 | ||
| 1417 | static void | 1421 | static void |
| @@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, | |||
| 1652 | struct perf_event_context *ctx) | 1656 | struct perf_event_context *ctx) |
| 1653 | { | 1657 | { |
| 1654 | u64 tstamp = perf_event_time(event); | 1658 | u64 tstamp = perf_event_time(event); |
| 1659 | int ret = 0; | ||
| 1655 | 1660 | ||
| 1656 | if (event->state <= PERF_EVENT_STATE_OFF) | 1661 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 1657 | return 0; | 1662 | return 0; |
| @@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, | |||
| 1674 | */ | 1679 | */ |
| 1675 | smp_wmb(); | 1680 | smp_wmb(); |
| 1676 | 1681 | ||
| 1682 | perf_pmu_disable(event->pmu); | ||
| 1683 | |||
| 1677 | if (event->pmu->add(event, PERF_EF_START)) { | 1684 | if (event->pmu->add(event, PERF_EF_START)) { |
| 1678 | event->state = PERF_EVENT_STATE_INACTIVE; | 1685 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 1679 | event->oncpu = -1; | 1686 | event->oncpu = -1; |
| 1680 | return -EAGAIN; | 1687 | ret = -EAGAIN; |
| 1688 | goto out; | ||
| 1681 | } | 1689 | } |
| 1682 | 1690 | ||
| 1683 | event->tstamp_running += tstamp - event->tstamp_stopped; | 1691 | event->tstamp_running += tstamp - event->tstamp_stopped; |
| @@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, | |||
| 1693 | if (event->attr.exclusive) | 1701 | if (event->attr.exclusive) |
| 1694 | cpuctx->exclusive = 1; | 1702 | cpuctx->exclusive = 1; |
| 1695 | 1703 | ||
| 1696 | return 0; | 1704 | out: |
| 1705 | perf_pmu_enable(event->pmu); | ||
| 1706 | |||
| 1707 | return ret; | ||
| 1697 | } | 1708 | } |
| 1698 | 1709 | ||
| 1699 | static int | 1710 | static int |
| @@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2743 | if (!event_filter_match(event)) | 2754 | if (!event_filter_match(event)) |
| 2744 | continue; | 2755 | continue; |
| 2745 | 2756 | ||
| 2757 | perf_pmu_disable(event->pmu); | ||
| 2758 | |||
| 2746 | hwc = &event->hw; | 2759 | hwc = &event->hw; |
| 2747 | 2760 | ||
| 2748 | if (hwc->interrupts == MAX_INTERRUPTS) { | 2761 | if (hwc->interrupts == MAX_INTERRUPTS) { |
| @@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2752 | } | 2765 | } |
| 2753 | 2766 | ||
| 2754 | if (!event->attr.freq || !event->attr.sample_freq) | 2767 | if (!event->attr.freq || !event->attr.sample_freq) |
| 2755 | continue; | 2768 | goto next; |
| 2756 | 2769 | ||
| 2757 | /* | 2770 | /* |
| 2758 | * stop the event and update event->count | 2771 | * stop the event and update event->count |
| @@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, | |||
| 2774 | perf_adjust_period(event, period, delta, false); | 2787 | perf_adjust_period(event, period, delta, false); |
| 2775 | 2788 | ||
| 2776 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); | 2789 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
| 2790 | next: | ||
| 2791 | perf_pmu_enable(event->pmu); | ||
| 2777 | } | 2792 | } |
| 2778 | 2793 | ||
| 2779 | perf_pmu_enable(ctx->pmu); | 2794 | perf_pmu_enable(ctx->pmu); |
