aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-01 03:52:30 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-02 07:10:54 -0400
commit3f731ca60afc29f5bcdb5fd2a04391466313a9ac (patch)
treeca9953e902e5043f62f56db31a0e990eed755e78 /kernel/sched.c
parentf38b082081bf69a06fffb8b32a175999e2320c5b (diff)
perf_counter: Fix cpu migration counter
This fixes the cpu migration software counter to count correctly even when contexts get swapped from one task to another. Previously the cpu migration counts reported by perf stat were bogus, ranging from negative to several thousand for a single "lat_ctx 2 8 32" run. With this patch the cpu migration count reported for "lat_ctx 2 8 32" is almost always between 35 and 44. This fixes the problem by adding a call into the perf_counter code from set_task_cpu when tasks are migrated. This enables us to use the generic swcounter code (with some modifications) for the cpu migration counter. This modifies the swcounter code to allow a NULL regs pointer to be passed in to perf_swcounter_ctx_event() etc. The cpu migration counter does this because there isn't necessarily a pt_regs struct for the task available. In this case, the counter will not have interrupt capability - but the migration counter didn't have interrupt capability before, so this is no loss. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <18979.35006.819769.416327@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c1
1 files changed, 1 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3226cc132e9f..8d43347a0c0d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1977,6 +1977,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1977 if (task_hot(p, old_rq->clock, NULL)) 1977 if (task_hot(p, old_rq->clock, NULL))
1978 schedstat_inc(p, se.nr_forced2_migrations); 1978 schedstat_inc(p, se.nr_forced2_migrations);
1979#endif 1979#endif
1980 perf_counter_task_migration(p, new_cpu);
1980 } 1981 }
1981 p->se.vruntime -= old_cfsrq->min_vruntime - 1982 p->se.vruntime -= old_cfsrq->min_vruntime -
1982 new_cfsrq->min_vruntime; 1983 new_cfsrq->min_vruntime;