From 0b3fcf178deefd7b64154c2c0760a2c63df0b74f Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 3 Jan 2011 18:20:01 +0200 Subject: perf_events: Move code around to prepare for cgroup In particular this patch move perf_event_exit_task() before cgroup_exit() to allow for cgroup support. The cgroup_exit() function detaches the cgroups attached to a task. Other movements include hoisting some definitions and inlines at the top of perf_event.c Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4d22058b.cdace30a.4657.ffff95b1@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 11847bf1e8cc..2c14e3afdf0d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -38,6 +38,12 @@ #include +enum event_type_t { + EVENT_FLEXIBLE = 0x1, + EVENT_PINNED = 0x2, + EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, +}; + atomic_t perf_task_events __read_mostly; static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; @@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; static atomic64_t perf_event_id; +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); + +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); + void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) @@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void) return "pmu"; } +static inline u64 perf_clock(void) +{ + return local_clock(); +} + void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); @@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx) put_ctx(ctx); } -static inline u64 perf_clock(void) -{ - return local_clock(); -} - /* * Update the record of the current time in a context. */ @@ -1193,12 +1205,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh) return 0; } -enum event_type_t { - EVENT_FLEXIBLE = 0x1, - EVENT_PINNED = 0x2, - EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, -}; - static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type) -- cgit v1.2.2 From 5632ab12e9e1fcd7e94058567e181d8f35e83798 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 3 Jan 2011 18:20:01 +0200 Subject: perf_events: Generalize use of event_filter_match() Replace all occurrences of: event->cpu != -1 && event->cpu == smp_processor_id() by a call to: event_filter_match(event) This makes the code more consistent and will make the cgroup patch smaller. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4d220593.2308e30a.48c5.ffff8ae9@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2c14e3afdf0d..dcdb19ed83a6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -949,7 +949,7 @@ static void __perf_install_in_context(void *info) add_event_to_ctx(event, ctx); - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) goto unlock; /* @@ -1094,7 +1094,7 @@ static void __perf_event_enable(void *info) goto unlock; __perf_event_mark_enabled(event, ctx); - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) goto unlock; /* @@ -1441,7 +1441,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx, list_for_each_entry(event, &ctx->pinned_groups, group_entry) { if (event->state <= PERF_EVENT_STATE_OFF) continue; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) continue; if (group_can_go_on(event, cpuctx, 1)) @@ -1473,7 +1473,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx, * Listen to the 'cpu' scheduling filter constraint * of events: */ - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) continue; if (group_can_go_on(event, cpuctx, can_add_hw)) { @@ -1700,7 +1700,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) if (event->state != PERF_EVENT_STATE_ACTIVE) continue; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) continue; hwc = &event->hw; @@ -3899,7 +3899,7 @@ static int perf_event_task_match(struct perf_event *event) if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) return 0; if (event->attr.comm || event->attr.mmap || @@ -4036,7 +4036,7 @@ static int perf_event_comm_match(struct perf_event *event) if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) return 0; if (event->attr.comm) @@ -4184,7 +4184,7 @@ static int perf_event_mmap_match(struct perf_event *event, if (event->state < PERF_EVENT_STATE_INACTIVE) return 0; - if (event->cpu != -1 && event->cpu != smp_processor_id()) + if (!event_filter_match(event)) return 0; if ((!executable && event->attr.mmap_data) || -- cgit v1.2.2 From 4158755d3136f4cb05c1a8a260e9c06f93baeb48 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 3 Jan 2011 18:20:01 +0200 Subject: perf_events: Add perf_event_time() Adds perf_event_time() to try and centralize access to event timing and in particular ctx->time. Prepares for cgroup support. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4d22059c.122ae30a.5e0e.ffff8b8b@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index dcdb19ed83a6..b782b7a79f00 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -268,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx) ctx->timestamp = now; } +static u64 perf_event_time(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + return ctx ? ctx->time : 0; +} + /* * Update the total_time_enabled and total_time_running fields for a event. */ @@ -281,7 +287,7 @@ static void update_event_times(struct perf_event *event) return; if (ctx->is_active) - run_end = ctx->time; + run_end = perf_event_time(event); else run_end = event->tstamp_stopped; @@ -290,7 +296,7 @@ static void update_event_times(struct perf_event *event) if (event->state == PERF_EVENT_STATE_INACTIVE) run_end = event->tstamp_stopped; else - run_end = ctx->time; + run_end = perf_event_time(event); event->total_time_running = run_end - event->tstamp_running; } @@ -546,6 +552,7 @@ event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { + u64 tstamp = perf_event_time(event); u64 delta; /* * An event which could not be activated because of @@ -557,7 +564,7 @@ event_sched_out(struct perf_event *event, && !event_filter_match(event)) { delta = ctx->time - event->tstamp_stopped; event->tstamp_running += delta; - event->tstamp_stopped = ctx->time; + event->tstamp_stopped = tstamp; } if (event->state != PERF_EVENT_STATE_ACTIVE) @@ -568,7 +575,7 @@ event_sched_out(struct perf_event *event, event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } - event->tstamp_stopped = ctx->time; + event->tstamp_stopped = tstamp; event->pmu->del(event, 0); event->oncpu = -1; @@ -780,6 +787,8 @@ event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { + u64 tstamp = perf_event_time(event); + if (event->state <= PERF_EVENT_STATE_OFF) return 0; @@ -796,9 +805,9 @@ event_sched_in(struct perf_event *event, return -EAGAIN; } - event->tstamp_running += ctx->time - event->tstamp_stopped; + event->tstamp_running += tstamp - event->tstamp_stopped; - event->shadow_ctx_time = ctx->time - ctx->timestamp; + event->shadow_ctx_time = tstamp - ctx->timestamp; if (!is_software_event(event)) cpuctx->active_oncpu++; @@ -910,11 +919,13 @@ static int group_can_go_on(struct perf_event *event, static void add_event_to_ctx(struct perf_event *event, struct perf_event_context *ctx) { + u64 tstamp = perf_event_time(event); + list_add_event(event, ctx); perf_group_attach(event); - event->tstamp_enabled = ctx->time; - event->tstamp_running = ctx->time; - event->tstamp_stopped = ctx->time; + event->tstamp_enabled = tstamp; + event->tstamp_running = tstamp; + event->tstamp_stopped = tstamp; } /* @@ -1054,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *sub; + u64 tstamp = perf_event_time(event); event->state = PERF_EVENT_STATE_INACTIVE; - event->tstamp_enabled = ctx->time - event->total_time_enabled; + event->tstamp_enabled = tstamp - event->total_time_enabled; list_for_each_entry(sub, &event->sibling_list, group_entry) { - if (sub->state >= PERF_EVENT_STATE_INACTIVE) { - sub->tstamp_enabled = - ctx->time - sub->total_time_enabled; - } + if (sub->state >= PERF_EVENT_STATE_INACTIVE) + sub->tstamp_enabled = tstamp - sub->total_time_enabled; } } -- cgit v1.2.2