aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/events/core.c42
1 files changed, 30 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index de14b67a3b0b..75bde93eb76f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -314,6 +314,7 @@ again:
314enum event_type_t { 314enum event_type_t {
315 EVENT_FLEXIBLE = 0x1, 315 EVENT_FLEXIBLE = 0x1,
316 EVENT_PINNED = 0x2, 316 EVENT_PINNED = 0x2,
317 EVENT_TIME = 0x4,
317 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 318 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
318}; 319};
319 320
@@ -1294,16 +1295,18 @@ static u64 perf_event_time(struct perf_event *event)
1294 1295
1295/* 1296/*
1296 * Update the total_time_enabled and total_time_running fields for a event. 1297 * Update the total_time_enabled and total_time_running fields for a event.
1297 * The caller of this function needs to hold the ctx->lock.
1298 */ 1298 */
1299static void update_event_times(struct perf_event *event) 1299static void update_event_times(struct perf_event *event)
1300{ 1300{
1301 struct perf_event_context *ctx = event->ctx; 1301 struct perf_event_context *ctx = event->ctx;
1302 u64 run_end; 1302 u64 run_end;
1303 1303
1304 lockdep_assert_held(&ctx->lock);
1305
1304 if (event->state < PERF_EVENT_STATE_INACTIVE || 1306 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1305 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1307 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1306 return; 1308 return;
1309
1307 /* 1310 /*
1308 * in cgroup mode, time_enabled represents 1311 * in cgroup mode, time_enabled represents
1309 * the time the event was enabled AND active 1312 * the time the event was enabled AND active
@@ -2349,24 +2352,33 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2349 } 2352 }
2350 2353
2351 ctx->is_active &= ~event_type; 2354 ctx->is_active &= ~event_type;
2355 if (!(ctx->is_active & EVENT_ALL))
2356 ctx->is_active = 0;
2357
2352 if (ctx->task) { 2358 if (ctx->task) {
2353 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2359 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2354 if (!ctx->is_active) 2360 if (!ctx->is_active)
2355 cpuctx->task_ctx = NULL; 2361 cpuctx->task_ctx = NULL;
2356 } 2362 }
2357 2363
2358 update_context_time(ctx); 2364 is_active ^= ctx->is_active; /* changed bits */
2359 update_cgrp_time_from_cpuctx(cpuctx); 2365
2360 if (!ctx->nr_active) 2366 if (is_active & EVENT_TIME) {
2367 /* update (and stop) ctx time */
2368 update_context_time(ctx);
2369 update_cgrp_time_from_cpuctx(cpuctx);
2370 }
2371
2372 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2361 return; 2373 return;
2362 2374
2363 perf_pmu_disable(ctx->pmu); 2375 perf_pmu_disable(ctx->pmu);
2364 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2376 if (is_active & EVENT_PINNED) {
2365 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2377 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2366 group_sched_out(event, cpuctx, ctx); 2378 group_sched_out(event, cpuctx, ctx);
2367 } 2379 }
2368 2380
2369 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2381 if (is_active & EVENT_FLEXIBLE) {
2370 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2382 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2371 group_sched_out(event, cpuctx, ctx); 2383 group_sched_out(event, cpuctx, ctx);
2372 } 2384 }
@@ -2740,7 +2752,7 @@ ctx_sched_in(struct perf_event_context *ctx,
2740 if (likely(!ctx->nr_events)) 2752 if (likely(!ctx->nr_events))
2741 return; 2753 return;
2742 2754
2743 ctx->is_active |= event_type; 2755 ctx->is_active |= (event_type | EVENT_TIME);
2744 if (ctx->task) { 2756 if (ctx->task) {
2745 if (!is_active) 2757 if (!is_active)
2746 cpuctx->task_ctx = ctx; 2758 cpuctx->task_ctx = ctx;
@@ -2748,18 +2760,24 @@ ctx_sched_in(struct perf_event_context *ctx,
2748 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2760 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2749 } 2761 }
2750 2762
2751 now = perf_clock(); 2763 is_active ^= ctx->is_active; /* changed bits */
2752 ctx->timestamp = now; 2764
2753 perf_cgroup_set_timestamp(task, ctx); 2765 if (is_active & EVENT_TIME) {
2766 /* start ctx time */
2767 now = perf_clock();
2768 ctx->timestamp = now;
2769 perf_cgroup_set_timestamp(task, ctx);
2770 }
2771
2754 /* 2772 /*
2755 * First go through the list and put on any pinned groups 2773 * First go through the list and put on any pinned groups
2756 * in order to give them the best chance of going on. 2774 * in order to give them the best chance of going on.
2757 */ 2775 */
2758 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2776 if (is_active & EVENT_PINNED)
2759 ctx_pinned_sched_in(ctx, cpuctx); 2777 ctx_pinned_sched_in(ctx, cpuctx);
2760 2778
2761 /* Then walk through the lower prio flexible groups */ 2779 /* Then walk through the lower prio flexible groups */
2762 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2780 if (is_active & EVENT_FLEXIBLE)
2763 ctx_flexible_sched_in(ctx, cpuctx); 2781 ctx_flexible_sched_in(ctx, cpuctx);
2764} 2782}
2765 2783