diff options
author | Paul Mackerras <paulus@samba.org> | 2009-05-11 01:46:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-11 06:10:53 -0400 |
commit | a08b159fc243dbfe415250466d24cfc5010deee5 (patch) | |
tree | 39bb59aaf183021e6d9b02ed26dc8a4930d00f0b /kernel/perf_counter.c | |
parent | 6751b71ea2c7ab8c0d65f01973a3fc8ea16992f4 (diff) |
perf_counter: don't count scheduler ticks as context switches
The context-switch software counter gives inflated values at present
because each scheduler tick and each process-wide counter
enable/disable prctl gets counted as a context switch.
This happens because perf_counter_task_tick, perf_counter_task_disable
and perf_counter_task_enable all call perf_counter_task_sched_out,
which calls perf_swcounter_event to record a context switch event.
This fixes it by introducing a variant of perf_counter_task_sched_out
with two underscores in front for internal use within the perf_counter
code, and makes perf_counter_task_{tick,disable,enable} call it. This
variant doesn't record a context switch event, and takes a struct
perf_counter_context *. This adds the new variant rather than
changing the behaviour or interface of perf_counter_task_sched_out
because that is called from other code.
[ Impact: fix inflated context-switch event counts ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <18951.48034.485580.498953@drongo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a5bdc93ac477..7373b96bc36c 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -837,6 +837,14 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu) | |||
837 | cpuctx->task_ctx = NULL; | 837 | cpuctx->task_ctx = NULL; |
838 | } | 838 | } |
839 | 839 | ||
840 | static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) | ||
841 | { | ||
842 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
843 | |||
844 | __perf_counter_sched_out(ctx, cpuctx); | ||
845 | cpuctx->task_ctx = NULL; | ||
846 | } | ||
847 | |||
840 | static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) | 848 | static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) |
841 | { | 849 | { |
842 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); | 850 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); |
@@ -943,15 +951,13 @@ int perf_counter_task_disable(void) | |||
943 | struct perf_counter *counter; | 951 | struct perf_counter *counter; |
944 | unsigned long flags; | 952 | unsigned long flags; |
945 | u64 perf_flags; | 953 | u64 perf_flags; |
946 | int cpu; | ||
947 | 954 | ||
948 | if (likely(!ctx->nr_counters)) | 955 | if (likely(!ctx->nr_counters)) |
949 | return 0; | 956 | return 0; |
950 | 957 | ||
951 | local_irq_save(flags); | 958 | local_irq_save(flags); |
952 | cpu = smp_processor_id(); | ||
953 | 959 | ||
954 | perf_counter_task_sched_out(curr, cpu); | 960 | __perf_counter_task_sched_out(ctx); |
955 | 961 | ||
956 | spin_lock(&ctx->lock); | 962 | spin_lock(&ctx->lock); |
957 | 963 | ||
@@ -989,7 +995,7 @@ int perf_counter_task_enable(void) | |||
989 | local_irq_save(flags); | 995 | local_irq_save(flags); |
990 | cpu = smp_processor_id(); | 996 | cpu = smp_processor_id(); |
991 | 997 | ||
992 | perf_counter_task_sched_out(curr, cpu); | 998 | __perf_counter_task_sched_out(ctx); |
993 | 999 | ||
994 | spin_lock(&ctx->lock); | 1000 | spin_lock(&ctx->lock); |
995 | 1001 | ||
@@ -1054,7 +1060,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
1054 | ctx = &curr->perf_counter_ctx; | 1060 | ctx = &curr->perf_counter_ctx; |
1055 | 1061 | ||
1056 | perf_counter_cpu_sched_out(cpuctx); | 1062 | perf_counter_cpu_sched_out(cpuctx); |
1057 | perf_counter_task_sched_out(curr, cpu); | 1063 | __perf_counter_task_sched_out(ctx); |
1058 | 1064 | ||
1059 | rotate_ctx(&cpuctx->ctx); | 1065 | rotate_ctx(&cpuctx->ctx); |
1060 | rotate_ctx(ctx); | 1066 | rotate_ctx(ctx); |