diff options
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 357ee8d5e8ae..9819a69a61a1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1065,6 +1065,7 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
1065 | struct perf_event *event; | 1065 | struct perf_event *event; |
1066 | 1066 | ||
1067 | raw_spin_lock(&ctx->lock); | 1067 | raw_spin_lock(&ctx->lock); |
1068 | perf_pmu_disable(ctx->pmu); | ||
1068 | ctx->is_active = 0; | 1069 | ctx->is_active = 0; |
1069 | if (likely(!ctx->nr_events)) | 1070 | if (likely(!ctx->nr_events)) |
1070 | goto out; | 1071 | goto out; |
@@ -1083,6 +1084,7 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
1083 | group_sched_out(event, cpuctx, ctx); | 1084 | group_sched_out(event, cpuctx, ctx); |
1084 | } | 1085 | } |
1085 | out: | 1086 | out: |
1087 | perf_pmu_enable(ctx->pmu); | ||
1086 | raw_spin_unlock(&ctx->lock); | 1088 | raw_spin_unlock(&ctx->lock); |
1087 | } | 1089 | } |
1088 | 1090 | ||
@@ -1400,6 +1402,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) | |||
1400 | if (cpuctx->task_ctx == ctx) | 1402 | if (cpuctx->task_ctx == ctx) |
1401 | return; | 1403 | return; |
1402 | 1404 | ||
1405 | perf_pmu_disable(ctx->pmu); | ||
1403 | /* | 1406 | /* |
1404 | * We want to keep the following priority order: | 1407 | * We want to keep the following priority order: |
1405 | * cpu pinned (that don't need to move), task pinned, | 1408 | * cpu pinned (that don't need to move), task pinned, |
@@ -1418,6 +1421,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx) | |||
1418 | * cpu-context we got scheduled on is actually rotating. | 1421 | * cpu-context we got scheduled on is actually rotating. |
1419 | */ | 1422 | */ |
1420 | perf_pmu_rotate_start(ctx->pmu); | 1423 | perf_pmu_rotate_start(ctx->pmu); |
1424 | perf_pmu_enable(ctx->pmu); | ||
1421 | } | 1425 | } |
1422 | 1426 | ||
1423 | /* | 1427 | /* |
@@ -1629,6 +1633,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) | |||
1629 | rotate = 1; | 1633 | rotate = 1; |
1630 | } | 1634 | } |
1631 | 1635 | ||
1636 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
1632 | perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); | 1637 | perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); |
1633 | if (ctx) | 1638 | if (ctx) |
1634 | perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); | 1639 | perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); |
@@ -1649,6 +1654,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer) | |||
1649 | task_ctx_sched_in(ctx, EVENT_FLEXIBLE); | 1654 | task_ctx_sched_in(ctx, EVENT_FLEXIBLE); |
1650 | 1655 | ||
1651 | done: | 1656 | done: |
1657 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
1652 | hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); | 1658 | hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); |
1653 | 1659 | ||
1654 | return restart; | 1660 | return restart; |