diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-09-07 09:55:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 14:46:33 -0400 |
commit | eb184479874238393ac186c4e054d24311c34aaa (patch) | |
tree | ac7206becfb3e0d18600252d8f5aa15478c32390 /kernel | |
parent | 97dee4f3206622f31396dede2b5ddb8670458f56 (diff) |
perf: Clean up perf_event_context allocation
Unify the two perf_event_context allocation sites.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index dae0e2f30293..13d98d756347 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1959,9 +1959,7 @@ exit_put: | |||
1959 | /* | 1959 | /* |
1960 | * Initialize the perf_event context in a task_struct: | 1960 | * Initialize the perf_event context in a task_struct: |
1961 | */ | 1961 | */ |
1962 | static void | 1962 | static void __perf_event_init_context(struct perf_event_context *ctx) |
1963 | __perf_event_init_context(struct perf_event_context *ctx, | ||
1964 | struct task_struct *task) | ||
1965 | { | 1963 | { |
1966 | raw_spin_lock_init(&ctx->lock); | 1964 | raw_spin_lock_init(&ctx->lock); |
1967 | mutex_init(&ctx->mutex); | 1965 | mutex_init(&ctx->mutex); |
@@ -1969,7 +1967,25 @@ __perf_event_init_context(struct perf_event_context *ctx, | |||
1969 | INIT_LIST_HEAD(&ctx->flexible_groups); | 1967 | INIT_LIST_HEAD(&ctx->flexible_groups); |
1970 | INIT_LIST_HEAD(&ctx->event_list); | 1968 | INIT_LIST_HEAD(&ctx->event_list); |
1971 | atomic_set(&ctx->refcount, 1); | 1969 | atomic_set(&ctx->refcount, 1); |
1972 | ctx->task = task; | 1970 | } |
1971 | |||
1972 | static struct perf_event_context * | ||
1973 | alloc_perf_context(struct pmu *pmu, struct task_struct *task) | ||
1974 | { | ||
1975 | struct perf_event_context *ctx; | ||
1976 | |||
1977 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); | ||
1978 | if (!ctx) | ||
1979 | return NULL; | ||
1980 | |||
1981 | __perf_event_init_context(ctx); | ||
1982 | if (task) { | ||
1983 | ctx->task = task; | ||
1984 | get_task_struct(task); | ||
1985 | } | ||
1986 | ctx->pmu = pmu; | ||
1987 | |||
1988 | return ctx; | ||
1973 | } | 1989 | } |
1974 | 1990 | ||
1975 | static struct perf_event_context * | 1991 | static struct perf_event_context * |
@@ -2036,22 +2052,22 @@ retry: | |||
2036 | } | 2052 | } |
2037 | 2053 | ||
2038 | if (!ctx) { | 2054 | if (!ctx) { |
2039 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 2055 | ctx = alloc_perf_context(pmu, task); |
2040 | err = -ENOMEM; | 2056 | err = -ENOMEM; |
2041 | if (!ctx) | 2057 | if (!ctx) |
2042 | goto errout; | 2058 | goto errout; |
2043 | __perf_event_init_context(ctx, task); | 2059 | |
2044 | ctx->pmu = pmu; | ||
2045 | get_ctx(ctx); | 2060 | get_ctx(ctx); |
2061 | |||
2046 | if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { | 2062 | if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { |
2047 | /* | 2063 | /* |
2048 | * We raced with some other task; use | 2064 | * We raced with some other task; use |
2049 | * the context they set. | 2065 | * the context they set. |
2050 | */ | 2066 | */ |
2067 | put_task_struct(task); | ||
2051 | kfree(ctx); | 2068 | kfree(ctx); |
2052 | goto retry; | 2069 | goto retry; |
2053 | } | 2070 | } |
2054 | get_task_struct(task); | ||
2055 | } | 2071 | } |
2056 | 2072 | ||
2057 | put_task_struct(task); | 2073 | put_task_struct(task); |
@@ -5044,7 +5060,7 @@ int perf_pmu_register(struct pmu *pmu) | |||
5044 | struct perf_cpu_context *cpuctx; | 5060 | struct perf_cpu_context *cpuctx; |
5045 | 5061 | ||
5046 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 5062 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
5047 | __perf_event_init_context(&cpuctx->ctx, NULL); | 5063 | __perf_event_init_context(&cpuctx->ctx); |
5048 | cpuctx->ctx.pmu = pmu; | 5064 | cpuctx->ctx.pmu = pmu; |
5049 | cpuctx->timer_interval = TICK_NSEC; | 5065 | cpuctx->timer_interval = TICK_NSEC; |
5050 | hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 5066 | hrtimer_init(&cpuctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
@@ -5866,15 +5882,11 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
5866 | * child. | 5882 | * child. |
5867 | */ | 5883 | */ |
5868 | 5884 | ||
5869 | child_ctx = kzalloc(sizeof(struct perf_event_context), | 5885 | child_ctx = alloc_perf_context(event->pmu, child); |
5870 | GFP_KERNEL); | ||
5871 | if (!child_ctx) | 5886 | if (!child_ctx) |
5872 | return -ENOMEM; | 5887 | return -ENOMEM; |
5873 | 5888 | ||
5874 | __perf_event_init_context(child_ctx, child); | ||
5875 | child_ctx->pmu = event->pmu; | ||
5876 | child->perf_event_ctxp = child_ctx; | 5889 | child->perf_event_ctxp = child_ctx; |
5877 | get_task_struct(child); | ||
5878 | } | 5890 | } |
5879 | 5891 | ||
5880 | ret = inherit_group(event, parent, parent_ctx, | 5892 | ret = inherit_group(event, parent, parent_ctx, |
@@ -5886,7 +5898,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, | |||
5886 | return ret; | 5898 | return ret; |
5887 | } | 5899 | } |
5888 | 5900 | ||
5889 | |||
5890 | /* | 5901 | /* |
5891 | * Initialize the perf_event context in task_struct | 5902 | * Initialize the perf_event context in task_struct |
5892 | */ | 5903 | */ |