aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-01-16 03:33:30 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-16 03:33:30 -0500
commit860fc2f2640ec348b9520ca4649b1bfd23d91bc2 (patch)
tree73d90d6ef86893c89bb70e78a2b63295d531f371 /kernel/events
parent197749981e539c1eb5863f417de6dd4e2c02b76c (diff)
parentbee09ed91cacdbffdbcd3b05de8409c77ec9fcd6 (diff)
Merge branch 'perf/urgent' into perf/core
Pick up the latest fixes, refresh the development tree. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5c8726473006..56003c6edfd3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1397,6 +1397,8 @@ event_sched_out(struct perf_event *event,
1397 if (event->state != PERF_EVENT_STATE_ACTIVE) 1397 if (event->state != PERF_EVENT_STATE_ACTIVE)
1398 return; 1398 return;
1399 1399
1400 perf_pmu_disable(event->pmu);
1401
1400 event->state = PERF_EVENT_STATE_INACTIVE; 1402 event->state = PERF_EVENT_STATE_INACTIVE;
1401 if (event->pending_disable) { 1403 if (event->pending_disable) {
1402 event->pending_disable = 0; 1404 event->pending_disable = 0;
@@ -1413,6 +1415,8 @@ event_sched_out(struct perf_event *event,
1413 ctx->nr_freq--; 1415 ctx->nr_freq--;
1414 if (event->attr.exclusive || !cpuctx->active_oncpu) 1416 if (event->attr.exclusive || !cpuctx->active_oncpu)
1415 cpuctx->exclusive = 0; 1417 cpuctx->exclusive = 0;
1418
1419 perf_pmu_enable(event->pmu);
1416} 1420}
1417 1421
1418static void 1422static void
@@ -1653,6 +1657,7 @@ event_sched_in(struct perf_event *event,
1653 struct perf_event_context *ctx) 1657 struct perf_event_context *ctx)
1654{ 1658{
1655 u64 tstamp = perf_event_time(event); 1659 u64 tstamp = perf_event_time(event);
1660 int ret = 0;
1656 1661
1657 if (event->state <= PERF_EVENT_STATE_OFF) 1662 if (event->state <= PERF_EVENT_STATE_OFF)
1658 return 0; 1663 return 0;
@@ -1675,10 +1680,13 @@ event_sched_in(struct perf_event *event,
1675 */ 1680 */
1676 smp_wmb(); 1681 smp_wmb();
1677 1682
1683 perf_pmu_disable(event->pmu);
1684
1678 if (event->pmu->add(event, PERF_EF_START)) { 1685 if (event->pmu->add(event, PERF_EF_START)) {
1679 event->state = PERF_EVENT_STATE_INACTIVE; 1686 event->state = PERF_EVENT_STATE_INACTIVE;
1680 event->oncpu = -1; 1687 event->oncpu = -1;
1681 return -EAGAIN; 1688 ret = -EAGAIN;
1689 goto out;
1682 } 1690 }
1683 1691
1684 event->tstamp_running += tstamp - event->tstamp_stopped; 1692 event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1694,7 +1702,10 @@ event_sched_in(struct perf_event *event,
1694 if (event->attr.exclusive) 1702 if (event->attr.exclusive)
1695 cpuctx->exclusive = 1; 1703 cpuctx->exclusive = 1;
1696 1704
1697 return 0; 1705out:
1706 perf_pmu_enable(event->pmu);
1707
1708 return ret;
1698} 1709}
1699 1710
1700static int 1711static int
@@ -2744,6 +2755,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2744 if (!event_filter_match(event)) 2755 if (!event_filter_match(event))
2745 continue; 2756 continue;
2746 2757
2758 perf_pmu_disable(event->pmu);
2759
2747 hwc = &event->hw; 2760 hwc = &event->hw;
2748 2761
2749 if (hwc->interrupts == MAX_INTERRUPTS) { 2762 if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2753,7 +2766,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2753 } 2766 }
2754 2767
2755 if (!event->attr.freq || !event->attr.sample_freq) 2768 if (!event->attr.freq || !event->attr.sample_freq)
2756 continue; 2769 goto next;
2757 2770
2758 /* 2771 /*
2759 * stop the event and update event->count 2772 * stop the event and update event->count
@@ -2775,6 +2788,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2775 perf_adjust_period(event, period, delta, false); 2788 perf_adjust_period(event, period, delta, false);
2776 2789
2777 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2790 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2791 next:
2792 perf_pmu_enable(event->pmu);
2778 } 2793 }
2779 2794
2780 perf_pmu_enable(ctx->pmu); 2795 perf_pmu_enable(ctx->pmu);