diff options
| -rw-r--r-- | kernel/events/core.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 935aefd16354..89b47050a2e8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -2329,6 +2329,12 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
| 2329 | lockdep_assert_held(&ctx->lock); | 2329 | lockdep_assert_held(&ctx->lock); |
| 2330 | 2330 | ||
| 2331 | ctx->is_active &= ~event_type; | 2331 | ctx->is_active &= ~event_type; |
| 2332 | if (ctx->task) { | ||
| 2333 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); | ||
| 2334 | if (!ctx->is_active) | ||
| 2335 | cpuctx->task_ctx = NULL; | ||
| 2336 | } | ||
| 2337 | |||
| 2332 | if (likely(!ctx->nr_events)) | 2338 | if (likely(!ctx->nr_events)) |
| 2333 | return; | 2339 | return; |
| 2334 | 2340 | ||
| @@ -2629,7 +2635,6 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, | |||
| 2629 | return; | 2635 | return; |
| 2630 | 2636 | ||
| 2631 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); | 2637 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); |
| 2632 | cpuctx->task_ctx = NULL; | ||
| 2633 | } | 2638 | } |
| 2634 | 2639 | ||
| 2635 | /* | 2640 | /* |
| @@ -2712,6 +2717,13 @@ ctx_sched_in(struct perf_event_context *ctx, | |||
| 2712 | lockdep_assert_held(&ctx->lock); | 2717 | lockdep_assert_held(&ctx->lock); |
| 2713 | 2718 | ||
| 2714 | ctx->is_active |= event_type; | 2719 | ctx->is_active |= event_type; |
| 2720 | if (ctx->task) { | ||
| 2721 | if (!is_active) | ||
| 2722 | cpuctx->task_ctx = ctx; | ||
| 2723 | else | ||
| 2724 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); | ||
| 2725 | } | ||
| 2726 | |||
| 2715 | if (likely(!ctx->nr_events)) | 2727 | if (likely(!ctx->nr_events)) |
| 2716 | return; | 2728 | return; |
| 2717 | 2729 | ||
| @@ -2756,12 +2768,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, | |||
| 2756 | * cpu flexible, task flexible. | 2768 | * cpu flexible, task flexible. |
| 2757 | */ | 2769 | */ |
| 2758 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 2770 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
| 2759 | 2771 | perf_event_sched_in(cpuctx, ctx, task); | |
| 2760 | if (ctx->nr_events) | ||
| 2761 | cpuctx->task_ctx = ctx; | ||
| 2762 | |||
| 2763 | perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); | ||
| 2764 | |||
| 2765 | perf_pmu_enable(ctx->pmu); | 2772 | perf_pmu_enable(ctx->pmu); |
| 2766 | perf_ctx_unlock(cpuctx, ctx); | 2773 | perf_ctx_unlock(cpuctx, ctx); |
| 2767 | } | 2774 | } |
