diff options
author | Paul Mackerras <paulus@samba.org> | 2009-06-01 03:52:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-02 07:10:54 -0400 |
commit | 3f731ca60afc29f5bcdb5fd2a04391466313a9ac (patch) | |
tree | ca9953e902e5043f62f56db31a0e990eed755e78 /include/linux/perf_counter.h | |
parent | f38b082081bf69a06fffb8b32a175999e2320c5b (diff) |
perf_counter: Fix cpu migration counter
This fixes the cpu migration software counter to count
correctly even when contexts get swapped from one task to
another. Previously the cpu migration counts reported by perf
stat were bogus, ranging from negative to several thousand for
a single "lat_ctx 2 8 32" run. With this patch the cpu
migration count reported for "lat_ctx 2 8 32" is almost always
between 35 and 44.
This fixes the problem by adding a call into the perf_counter
code from set_task_cpu when tasks are migrated. This enables
us to use the generic swcounter code (with some modifications)
for the cpu migration counter.
This modifies the swcounter code to allow a NULL regs pointer
to be passed in to perf_swcounter_ctx_event() etc. The cpu
migration counter does this because there isn't necessarily a
pt_regs struct for the task available. In this case, the
counter will not have interrupt capability - but the migration
counter didn't have interrupt capability before, so this is no
loss.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.35006.819769.416327@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 0e57d8cc5a3d..deb9acf9ad2a 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -615,6 +615,8 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len, | |||
615 | 615 | ||
616 | extern void perf_counter_comm(struct task_struct *tsk); | 616 | extern void perf_counter_comm(struct task_struct *tsk); |
617 | 617 | ||
618 | extern void perf_counter_task_migration(struct task_struct *task, int cpu); | ||
619 | |||
618 | #define MAX_STACK_DEPTH 255 | 620 | #define MAX_STACK_DEPTH 255 |
619 | 621 | ||
620 | struct perf_callchain_entry { | 622 | struct perf_callchain_entry { |
@@ -668,6 +670,8 @@ perf_counter_munmap(unsigned long addr, unsigned long len, | |||
668 | 670 | ||
669 | static inline void perf_counter_comm(struct task_struct *tsk) { } | 671 | static inline void perf_counter_comm(struct task_struct *tsk) { } |
670 | static inline void perf_counter_init(void) { } | 672 | static inline void perf_counter_init(void) { } |
673 | static inline void perf_counter_task_migration(struct task_struct *task, | ||
674 | int cpu) { } | ||
671 | #endif | 675 | #endif |
672 | 676 | ||
673 | #endif /* __KERNEL__ */ | 677 | #endif /* __KERNEL__ */ |