diff options
author | Stephane Eranian <eranian@google.com> | 2011-01-03 11:20:01 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-07 09:08:50 -0500 |
commit | 0b3fcf178deefd7b64154c2c0760a2c63df0b74f (patch) | |
tree | b84509c9ac5935ac41f6fc9c444bc406cd2c54d8 /kernel | |
parent | 23a2f3ab46596d9fd0b0e592d2101bea90970594 (diff) |
perf_events: Move code around to prepare for cgroup
In particular this patch move perf_event_exit_task() before
cgroup_exit() to allow for cgroup support. The cgroup_exit()
function detaches the cgroups attached to a task.
Other movements include hoisting some definitions and inlines
at the top of perf_event.c
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d22058b.cdace30a.4657.ffff95b1@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 14 | ||||
-rw-r--r-- | kernel/perf_event.c | 28 |
2 files changed, 26 insertions, 16 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 676149a4ac5..8cb89045ecf 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -994,6 +994,15 @@ NORET_TYPE void do_exit(long code) | |||
994 | exit_fs(tsk); | 994 | exit_fs(tsk); |
995 | check_stack_usage(); | 995 | check_stack_usage(); |
996 | exit_thread(); | 996 | exit_thread(); |
997 | |||
998 | /* | ||
999 | * Flush inherited counters to the parent - before the parent | ||
1000 | * gets woken up by child-exit notifications. | ||
1001 | * | ||
1002 | * because of cgroup mode, must be called before cgroup_exit() | ||
1003 | */ | ||
1004 | perf_event_exit_task(tsk); | ||
1005 | |||
997 | cgroup_exit(tsk, 1); | 1006 | cgroup_exit(tsk, 1); |
998 | 1007 | ||
999 | if (group_dead) | 1008 | if (group_dead) |
@@ -1007,11 +1016,6 @@ NORET_TYPE void do_exit(long code) | |||
1007 | * FIXME: do that only when needed, using sched_exit tracepoint | 1016 | * FIXME: do that only when needed, using sched_exit tracepoint |
1008 | */ | 1017 | */ |
1009 | flush_ptrace_hw_breakpoint(tsk); | 1018 | flush_ptrace_hw_breakpoint(tsk); |
1010 | /* | ||
1011 | * Flush inherited counters to the parent - before the parent | ||
1012 | * gets woken up by child-exit notifications. | ||
1013 | */ | ||
1014 | perf_event_exit_task(tsk); | ||
1015 | 1019 | ||
1016 | exit_notify(tsk, group_dead); | 1020 | exit_notify(tsk, group_dead); |
1017 | #ifdef CONFIG_NUMA | 1021 | #ifdef CONFIG_NUMA |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 11847bf1e8c..2c14e3afdf0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -38,6 +38,12 @@ | |||
38 | 38 | ||
39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
40 | 40 | ||
41 | enum event_type_t { | ||
42 | EVENT_FLEXIBLE = 0x1, | ||
43 | EVENT_PINNED = 0x2, | ||
44 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | ||
45 | }; | ||
46 | |||
41 | atomic_t perf_task_events __read_mostly; | 47 | atomic_t perf_task_events __read_mostly; |
42 | static atomic_t nr_mmap_events __read_mostly; | 48 | static atomic_t nr_mmap_events __read_mostly; |
43 | static atomic_t nr_comm_events __read_mostly; | 49 | static atomic_t nr_comm_events __read_mostly; |
@@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000; | |||
65 | 71 | ||
66 | static atomic64_t perf_event_id; | 72 | static atomic64_t perf_event_id; |
67 | 73 | ||
74 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | ||
75 | enum event_type_t event_type); | ||
76 | |||
77 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | ||
78 | enum event_type_t event_type); | ||
79 | |||
68 | void __weak perf_event_print_debug(void) { } | 80 | void __weak perf_event_print_debug(void) { } |
69 | 81 | ||
70 | extern __weak const char *perf_pmu_name(void) | 82 | extern __weak const char *perf_pmu_name(void) |
@@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void) | |||
72 | return "pmu"; | 84 | return "pmu"; |
73 | } | 85 | } |
74 | 86 | ||
87 | static inline u64 perf_clock(void) | ||
88 | { | ||
89 | return local_clock(); | ||
90 | } | ||
91 | |||
75 | void perf_pmu_disable(struct pmu *pmu) | 92 | void perf_pmu_disable(struct pmu *pmu) |
76 | { | 93 | { |
77 | int *count = this_cpu_ptr(pmu->pmu_disable_count); | 94 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
@@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx) | |||
240 | put_ctx(ctx); | 257 | put_ctx(ctx); |
241 | } | 258 | } |
242 | 259 | ||
243 | static inline u64 perf_clock(void) | ||
244 | { | ||
245 | return local_clock(); | ||
246 | } | ||
247 | |||
248 | /* | 260 | /* |
249 | * Update the record of the current time in a context. | 261 | * Update the record of the current time in a context. |
250 | */ | 262 | */ |
@@ -1193,12 +1205,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh) | |||
1193 | return 0; | 1205 | return 0; |
1194 | } | 1206 | } |
1195 | 1207 | ||
1196 | enum event_type_t { | ||
1197 | EVENT_FLEXIBLE = 0x1, | ||
1198 | EVENT_PINNED = 0x2, | ||
1199 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | ||
1200 | }; | ||
1201 | |||
1202 | static void ctx_sched_out(struct perf_event_context *ctx, | 1208 | static void ctx_sched_out(struct perf_event_context *ctx, |
1203 | struct perf_cpu_context *cpuctx, | 1209 | struct perf_cpu_context *cpuctx, |
1204 | enum event_type_t event_type) | 1210 | enum event_type_t event_type) |