aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2011-01-03 11:20:01 -0500
committerIngo Molnar <mingo@elte.hu>2011-01-07 09:08:51 -0500
commit4158755d3136f4cb05c1a8a260e9c06f93baeb48 (patch)
tree4ca2b7b020582a4b353af84d5684a91d4bcf995d /kernel
parent5632ab12e9e1fcd7e94058567e181d8f35e83798 (diff)
perf_events: Add perf_event_time()
Adds perf_event_time() to try and centralize access to event timing and in particular ctx->time. Prepares for cgroup support. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <4d22059c.122ae30a.5e0e.ffff8b8b@mx.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index dcdb19ed83a6..b782b7a79f00 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -268,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx)
268 ctx->timestamp = now; 268 ctx->timestamp = now;
269} 269}
270 270
271static u64 perf_event_time(struct perf_event *event)
272{
273 struct perf_event_context *ctx = event->ctx;
274 return ctx ? ctx->time : 0;
275}
276
271/* 277/*
272 * Update the total_time_enabled and total_time_running fields for a event. 278 * Update the total_time_enabled and total_time_running fields for a event.
273 */ 279 */
@@ -281,7 +287,7 @@ static void update_event_times(struct perf_event *event)
281 return; 287 return;
282 288
283 if (ctx->is_active) 289 if (ctx->is_active)
284 run_end = ctx->time; 290 run_end = perf_event_time(event);
285 else 291 else
286 run_end = event->tstamp_stopped; 292 run_end = event->tstamp_stopped;
287 293
@@ -290,7 +296,7 @@ static void update_event_times(struct perf_event *event)
290 if (event->state == PERF_EVENT_STATE_INACTIVE) 296 if (event->state == PERF_EVENT_STATE_INACTIVE)
291 run_end = event->tstamp_stopped; 297 run_end = event->tstamp_stopped;
292 else 298 else
293 run_end = ctx->time; 299 run_end = perf_event_time(event);
294 300
295 event->total_time_running = run_end - event->tstamp_running; 301 event->total_time_running = run_end - event->tstamp_running;
296} 302}
@@ -546,6 +552,7 @@ event_sched_out(struct perf_event *event,
546 struct perf_cpu_context *cpuctx, 552 struct perf_cpu_context *cpuctx,
547 struct perf_event_context *ctx) 553 struct perf_event_context *ctx)
548{ 554{
555 u64 tstamp = perf_event_time(event);
549 u64 delta; 556 u64 delta;
550 /* 557 /*
551 * An event which could not be activated because of 558 * An event which could not be activated because of
@@ -557,7 +564,7 @@ event_sched_out(struct perf_event *event,
557 && !event_filter_match(event)) { 564 && !event_filter_match(event)) {
558 delta = ctx->time - event->tstamp_stopped; 565 delta = ctx->time - event->tstamp_stopped;
559 event->tstamp_running += delta; 566 event->tstamp_running += delta;
560 event->tstamp_stopped = ctx->time; 567 event->tstamp_stopped = tstamp;
561 } 568 }
562 569
563 if (event->state != PERF_EVENT_STATE_ACTIVE) 570 if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -568,7 +575,7 @@ event_sched_out(struct perf_event *event,
568 event->pending_disable = 0; 575 event->pending_disable = 0;
569 event->state = PERF_EVENT_STATE_OFF; 576 event->state = PERF_EVENT_STATE_OFF;
570 } 577 }
571 event->tstamp_stopped = ctx->time; 578 event->tstamp_stopped = tstamp;
572 event->pmu->del(event, 0); 579 event->pmu->del(event, 0);
573 event->oncpu = -1; 580 event->oncpu = -1;
574 581
@@ -780,6 +787,8 @@ event_sched_in(struct perf_event *event,
780 struct perf_cpu_context *cpuctx, 787 struct perf_cpu_context *cpuctx,
781 struct perf_event_context *ctx) 788 struct perf_event_context *ctx)
782{ 789{
790 u64 tstamp = perf_event_time(event);
791
783 if (event->state <= PERF_EVENT_STATE_OFF) 792 if (event->state <= PERF_EVENT_STATE_OFF)
784 return 0; 793 return 0;
785 794
@@ -796,9 +805,9 @@ event_sched_in(struct perf_event *event,
796 return -EAGAIN; 805 return -EAGAIN;
797 } 806 }
798 807
799 event->tstamp_running += ctx->time - event->tstamp_stopped; 808 event->tstamp_running += tstamp - event->tstamp_stopped;
800 809
801 event->shadow_ctx_time = ctx->time - ctx->timestamp; 810 event->shadow_ctx_time = tstamp - ctx->timestamp;
802 811
803 if (!is_software_event(event)) 812 if (!is_software_event(event))
804 cpuctx->active_oncpu++; 813 cpuctx->active_oncpu++;
@@ -910,11 +919,13 @@ static int group_can_go_on(struct perf_event *event,
910static void add_event_to_ctx(struct perf_event *event, 919static void add_event_to_ctx(struct perf_event *event,
911 struct perf_event_context *ctx) 920 struct perf_event_context *ctx)
912{ 921{
922 u64 tstamp = perf_event_time(event);
923
913 list_add_event(event, ctx); 924 list_add_event(event, ctx);
914 perf_group_attach(event); 925 perf_group_attach(event);
915 event->tstamp_enabled = ctx->time; 926 event->tstamp_enabled = tstamp;
916 event->tstamp_running = ctx->time; 927 event->tstamp_running = tstamp;
917 event->tstamp_stopped = ctx->time; 928 event->tstamp_stopped = tstamp;
918} 929}
919 930
920/* 931/*
@@ -1054,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
1054 struct perf_event_context *ctx) 1065 struct perf_event_context *ctx)
1055{ 1066{
1056 struct perf_event *sub; 1067 struct perf_event *sub;
1068 u64 tstamp = perf_event_time(event);
1057 1069
1058 event->state = PERF_EVENT_STATE_INACTIVE; 1070 event->state = PERF_EVENT_STATE_INACTIVE;
1059 event->tstamp_enabled = ctx->time - event->total_time_enabled; 1071 event->tstamp_enabled = tstamp - event->total_time_enabled;
1060 list_for_each_entry(sub, &event->sibling_list, group_entry) { 1072 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1061 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 1073 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1062 sub->tstamp_enabled = 1074 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1063 ctx->time - sub->total_time_enabled;
1064 }
1065 } 1075 }
1066} 1076}
1067 1077