aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-29 05:25:09 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-29 08:28:36 -0400
commit3f4dee227348daac32f36daad9a91059efd0723e (patch)
tree9aa0311f0c4f961a2c2cab1d2fbb0994cff1b6d9
parentad3a37de81c45f6c20d410ece86004b98f7b6d84 (diff)
perf_counter: Fix cpuctx->task_ctx races
Peter noticed that we are sometimes reading cpuctx->task_ctx with interrupts enabled. Noticed-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_counter.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index db843f812a60..eb346048f00f 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -234,15 +234,18 @@ static void __perf_counter_remove_from_context(void *info)
234 struct perf_counter_context *ctx = counter->ctx; 234 struct perf_counter_context *ctx = counter->ctx;
235 unsigned long flags; 235 unsigned long flags;
236 236
237 local_irq_save(flags);
237 /* 238 /*
238 * If this is a task context, we need to check whether it is 239 * If this is a task context, we need to check whether it is
239 * the current task context of this cpu. If not it has been 240 * the current task context of this cpu. If not it has been
240 * scheduled out before the smp call arrived. 241 * scheduled out before the smp call arrived.
241 */ 242 */
242 if (ctx->task && cpuctx->task_ctx != ctx) 243 if (ctx->task && cpuctx->task_ctx != ctx) {
244 local_irq_restore(flags);
243 return; 245 return;
246 }
244 247
245 spin_lock_irqsave(&ctx->lock, flags); 248 spin_lock(&ctx->lock);
246 /* 249 /*
247 * Protect the list operation against NMI by disabling the 250 * Protect the list operation against NMI by disabling the
248 * counters on a global level. 251 * counters on a global level.
@@ -382,14 +385,17 @@ static void __perf_counter_disable(void *info)
382 struct perf_counter_context *ctx = counter->ctx; 385 struct perf_counter_context *ctx = counter->ctx;
383 unsigned long flags; 386 unsigned long flags;
384 387
388 local_irq_save(flags);
385 /* 389 /*
386 * If this is a per-task counter, need to check whether this 390 * If this is a per-task counter, need to check whether this
387 * counter's task is the current task on this cpu. 391 * counter's task is the current task on this cpu.
388 */ 392 */
389 if (ctx->task && cpuctx->task_ctx != ctx) 393 if (ctx->task && cpuctx->task_ctx != ctx) {
394 local_irq_restore(flags);
390 return; 395 return;
396 }
391 397
392 spin_lock_irqsave(&ctx->lock, flags); 398 spin_lock(&ctx->lock);
393 399
394 /* 400 /*
395 * If the counter is on, turn it off. 401 * If the counter is on, turn it off.
@@ -615,6 +621,7 @@ static void __perf_install_in_context(void *info)
615 unsigned long flags; 621 unsigned long flags;
616 int err; 622 int err;
617 623
624 local_irq_save(flags);
618 /* 625 /*
619 * If this is a task context, we need to check whether it is 626 * If this is a task context, we need to check whether it is
620 * the current task context of this cpu. If not it has been 627 * the current task context of this cpu. If not it has been
@@ -623,12 +630,14 @@ static void __perf_install_in_context(void *info)
623 * on this cpu because it had no counters. 630 * on this cpu because it had no counters.
624 */ 631 */
625 if (ctx->task && cpuctx->task_ctx != ctx) { 632 if (ctx->task && cpuctx->task_ctx != ctx) {
626 if (cpuctx->task_ctx || ctx->task != current) 633 if (cpuctx->task_ctx || ctx->task != current) {
634 local_irq_restore(flags);
627 return; 635 return;
636 }
628 cpuctx->task_ctx = ctx; 637 cpuctx->task_ctx = ctx;
629 } 638 }
630 639
631 spin_lock_irqsave(&ctx->lock, flags); 640 spin_lock(&ctx->lock);
632 ctx->is_active = 1; 641 ctx->is_active = 1;
633 update_context_time(ctx); 642 update_context_time(ctx);
634 643
@@ -745,17 +754,20 @@ static void __perf_counter_enable(void *info)
745 unsigned long flags; 754 unsigned long flags;
746 int err; 755 int err;
747 756
757 local_irq_save(flags);
748 /* 758 /*
749 * If this is a per-task counter, need to check whether this 759 * If this is a per-task counter, need to check whether this
750 * counter's task is the current task on this cpu. 760 * counter's task is the current task on this cpu.
751 */ 761 */
752 if (ctx->task && cpuctx->task_ctx != ctx) { 762 if (ctx->task && cpuctx->task_ctx != ctx) {
753 if (cpuctx->task_ctx || ctx->task != current) 763 if (cpuctx->task_ctx || ctx->task != current) {
764 local_irq_restore(flags);
754 return; 765 return;
766 }
755 cpuctx->task_ctx = ctx; 767 cpuctx->task_ctx = ctx;
756 } 768 }
757 769
758 spin_lock_irqsave(&ctx->lock, flags); 770 spin_lock(&ctx->lock);
759 ctx->is_active = 1; 771 ctx->is_active = 1;
760 update_context_time(ctx); 772 update_context_time(ctx);
761 773