diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-08-07 13:49:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-09 06:54:46 -0400 |
commit | 3a80b4a3539696f4b0574876326860323035a302 (patch) | |
tree | 9d2901259a2b25b181997f89495452fd7f06827b /kernel | |
parent | 3a43ce68ae1758fa6a839386025ef45acb6baa22 (diff) |
perf_counter: Fix a race on perf_counter_ctx
While extending perfcounters with BTS hw-tracing, Markus
Metzger managed to trigger this warning:
[ 995.557128] WARNING: at kernel/perf_counter.c:1191 __perf_counter_task_sched_out+0x48/0x6b()
triggers because commit
9f498cc5be7e013d8d6e4c616980ed0ffc8680d2 (perf_counter: Full
task tracing) removed clearing of tsk->perf_counter_ctxp out
from under ctx->lock which introduced a race (against
perf_lock_task_context).
Move it back and deal with the exit notification by explicitly
passing along the former task context.
Reported-by: Markus T Metzger <markus.t.metzger@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1249667341.17467.5.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 002310540417..546e62d62941 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2850,7 +2850,8 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2850 | */ | 2850 | */ |
2851 | 2851 | ||
2852 | struct perf_task_event { | 2852 | struct perf_task_event { |
2853 | struct task_struct *task; | 2853 | struct task_struct *task; |
2854 | struct perf_counter_context *task_ctx; | ||
2854 | 2855 | ||
2855 | struct { | 2856 | struct { |
2856 | struct perf_event_header header; | 2857 | struct perf_event_header header; |
@@ -2910,24 +2911,23 @@ static void perf_counter_task_ctx(struct perf_counter_context *ctx, | |||
2910 | static void perf_counter_task_event(struct perf_task_event *task_event) | 2911 | static void perf_counter_task_event(struct perf_task_event *task_event) |
2911 | { | 2912 | { |
2912 | struct perf_cpu_context *cpuctx; | 2913 | struct perf_cpu_context *cpuctx; |
2913 | struct perf_counter_context *ctx; | 2914 | struct perf_counter_context *ctx = task_event->task_ctx; |
2914 | 2915 | ||
2915 | cpuctx = &get_cpu_var(perf_cpu_context); | 2916 | cpuctx = &get_cpu_var(perf_cpu_context); |
2916 | perf_counter_task_ctx(&cpuctx->ctx, task_event); | 2917 | perf_counter_task_ctx(&cpuctx->ctx, task_event); |
2917 | put_cpu_var(perf_cpu_context); | 2918 | put_cpu_var(perf_cpu_context); |
2918 | 2919 | ||
2919 | rcu_read_lock(); | 2920 | rcu_read_lock(); |
2920 | /* | 2921 | if (!ctx) |
2921 | * doesn't really matter which of the child contexts the | 2922 | ctx = rcu_dereference(task_event->task->perf_counter_ctxp); |
2922 | * events ends up in. | ||
2923 | */ | ||
2924 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
2925 | if (ctx) | 2923 | if (ctx) |
2926 | perf_counter_task_ctx(ctx, task_event); | 2924 | perf_counter_task_ctx(ctx, task_event); |
2927 | rcu_read_unlock(); | 2925 | rcu_read_unlock(); |
2928 | } | 2926 | } |
2929 | 2927 | ||
2930 | static void perf_counter_task(struct task_struct *task, int new) | 2928 | static void perf_counter_task(struct task_struct *task, |
2929 | struct perf_counter_context *task_ctx, | ||
2930 | int new) | ||
2931 | { | 2931 | { |
2932 | struct perf_task_event task_event; | 2932 | struct perf_task_event task_event; |
2933 | 2933 | ||
@@ -2937,8 +2937,9 @@ static void perf_counter_task(struct task_struct *task, int new) | |||
2937 | return; | 2937 | return; |
2938 | 2938 | ||
2939 | task_event = (struct perf_task_event){ | 2939 | task_event = (struct perf_task_event){ |
2940 | .task = task, | 2940 | .task = task, |
2941 | .event = { | 2941 | .task_ctx = task_ctx, |
2942 | .event = { | ||
2942 | .header = { | 2943 | .header = { |
2943 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, | 2944 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, |
2944 | .misc = 0, | 2945 | .misc = 0, |
@@ -2956,7 +2957,7 @@ static void perf_counter_task(struct task_struct *task, int new) | |||
2956 | 2957 | ||
2957 | void perf_counter_fork(struct task_struct *task) | 2958 | void perf_counter_fork(struct task_struct *task) |
2958 | { | 2959 | { |
2959 | perf_counter_task(task, 1); | 2960 | perf_counter_task(task, NULL, 1); |
2960 | } | 2961 | } |
2961 | 2962 | ||
2962 | /* | 2963 | /* |
@@ -4310,7 +4311,7 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4310 | unsigned long flags; | 4311 | unsigned long flags; |
4311 | 4312 | ||
4312 | if (likely(!child->perf_counter_ctxp)) { | 4313 | if (likely(!child->perf_counter_ctxp)) { |
4313 | perf_counter_task(child, 0); | 4314 | perf_counter_task(child, NULL, 0); |
4314 | return; | 4315 | return; |
4315 | } | 4316 | } |
4316 | 4317 | ||
@@ -4330,6 +4331,7 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4330 | * incremented the context's refcount before we do put_ctx below. | 4331 | * incremented the context's refcount before we do put_ctx below. |
4331 | */ | 4332 | */ |
4332 | spin_lock(&child_ctx->lock); | 4333 | spin_lock(&child_ctx->lock); |
4334 | child->perf_counter_ctxp = NULL; | ||
4333 | /* | 4335 | /* |
4334 | * If this context is a clone; unclone it so it can't get | 4336 | * If this context is a clone; unclone it so it can't get |
4335 | * swapped to another process while we're removing all | 4337 | * swapped to another process while we're removing all |
@@ -4343,9 +4345,7 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4343 | * won't get any samples after PERF_EVENT_EXIT. We can however still | 4345 | * won't get any samples after PERF_EVENT_EXIT. We can however still |
4344 | * get a few PERF_EVENT_READ events. | 4346 | * get a few PERF_EVENT_READ events. |
4345 | */ | 4347 | */ |
4346 | perf_counter_task(child, 0); | 4348 | perf_counter_task(child, child_ctx, 0); |
4347 | |||
4348 | child->perf_counter_ctxp = NULL; | ||
4349 | 4349 | ||
4350 | /* | 4350 | /* |
4351 | * We can recurse on the same lock type through: | 4351 | * We can recurse on the same lock type through: |