aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-19 07:22:51 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-19 07:43:12 -0400
commite5289d4a181fb6c0b7a7607649af2ffdc491335c (patch)
tree6c9e9ab46a29c538e4c505d3d3cd135265db08a2 /kernel
parentf5970550d5ccf90453cbd7d260370ea99d1f6513 (diff)
perf_counter: Simplify and fix task migration counting
The task migrations counter was causing rare and hard to decypher memory corruptions under load. After a day of debugging and bisection we found that the problem was introduced with: 3f731ca: perf_counter: Fix cpu migration counter Turning them off fixes the crashes. Incidentally, the whole perf_counter_task_migration() logic can be done simpler as well, by injecting a proper sw-counter event. This cleanup also fixed the crashes. The precise failure mode is not completely clear yet, but we are clearly not unhappy about having a fix ;-) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c23
-rw-r--r--kernel/sched.c3
2 files changed, 3 insertions, 23 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 7e9108efd305..8d4f0dd41c22 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -124,7 +124,7 @@ void perf_enable(void)
124 124
125static void get_ctx(struct perf_counter_context *ctx) 125static void get_ctx(struct perf_counter_context *ctx)
126{ 126{
127 atomic_inc(&ctx->refcount); 127 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
128} 128}
129 129
130static void free_ctx(struct rcu_head *head) 130static void free_ctx(struct rcu_head *head)
@@ -3467,27 +3467,6 @@ static const struct pmu perf_ops_task_clock = {
3467 .read = task_clock_perf_counter_read, 3467 .read = task_clock_perf_counter_read,
3468}; 3468};
3469 3469
3470/*
3471 * Software counter: cpu migrations
3472 */
3473void perf_counter_task_migration(struct task_struct *task, int cpu)
3474{
3475 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3476 struct perf_counter_context *ctx;
3477
3478 perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3479 PERF_COUNT_SW_CPU_MIGRATIONS,
3480 1, 1, NULL, 0);
3481
3482 ctx = perf_pin_task_context(task);
3483 if (ctx) {
3484 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3485 PERF_COUNT_SW_CPU_MIGRATIONS,
3486 1, 1, NULL, 0);
3487 perf_unpin_context(ctx);
3488 }
3489}
3490
3491#ifdef CONFIG_EVENT_PROFILE 3470#ifdef CONFIG_EVENT_PROFILE
3492void perf_tpcounter_event(int event_id) 3471void perf_tpcounter_event(int event_id)
3493{ 3472{
diff --git a/kernel/sched.c b/kernel/sched.c
index 8fb88a906aaa..f46540b359c0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1978,7 +1978,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1978 if (task_hot(p, old_rq->clock, NULL)) 1978 if (task_hot(p, old_rq->clock, NULL))
1979 schedstat_inc(p, se.nr_forced2_migrations); 1979 schedstat_inc(p, se.nr_forced2_migrations);
1980#endif 1980#endif
1981 perf_counter_task_migration(p, new_cpu); 1981 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
1982 1, 1, NULL, 0);
1982 } 1983 }
1983 p->se.vruntime -= old_cfsrq->min_vruntime - 1984 p->se.vruntime -= old_cfsrq->min_vruntime -
1984 new_cfsrq->min_vruntime; 1985 new_cfsrq->min_vruntime;