aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-01 03:49:14 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-01 04:04:06 -0400
commit880ca15adf2392770a68047e7a98e076ff4d21da (patch)
treea1707991a13651bcefb5d92a2ddee9b3470552d9 /kernel/perf_counter.c
parent25346b93ca079080c9cb23331db5c4f6404e8530 (diff)
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software counters can count events that occur while their associated task is not running. This will allow us to use the generic software counter code for counting task migrations, which can occur while the task is not scheduled in. To do this, we have to distinguish between the situations where the counter is inactive because its task has been scheduled out, and those where the counter is inactive because it is part of a group that was not able to go on the PMU. In the former case we want the counter to count, but not in the latter case. If the context is active, we have the latter case. If the context is inactive then we need to know whether the counter was counting when the context was last active, which we can determine by comparing its ->tstamp_stopped timestamp with the context's timestamp. This also folds three checks in perf_swcounter_match, checking perf_event_raw(), perf_event_type() and perf_event_id() individually, into a single 64-bit comparison on counter->hw_event.config, as an optimization. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c48
1 files changed, 42 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index da8dfef4b472..ff8b4636f845 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2867,20 +2867,56 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
2867 2867
2868} 2868}
2869 2869
2870static int perf_swcounter_is_counting(struct perf_counter *counter)
2871{
2872 struct perf_counter_context *ctx;
2873 unsigned long flags;
2874 int count;
2875
2876 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
2877 return 1;
2878
2879 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
2880 return 0;
2881
2882 /*
2883 * If the counter is inactive, it could be just because
2884 * its task is scheduled out, or because it's in a group
2885 * which could not go on the PMU. We want to count in
2886 * the first case but not the second. If the context is
2887 * currently active then an inactive software counter must
2888 * be the second case. If it's not currently active then
2889 * we need to know whether the counter was active when the
2890 * context was last active, which we can determine by
2891 * comparing counter->tstamp_stopped with ctx->time.
2892 *
2893 * We are within an RCU read-side critical section,
2894 * which protects the existence of *ctx.
2895 */
2896 ctx = counter->ctx;
2897 spin_lock_irqsave(&ctx->lock, flags);
2898 count = 1;
2899 /* Re-check state now we have the lock */
2900 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
2901 counter->ctx->is_active ||
2902 counter->tstamp_stopped < ctx->time)
2903 count = 0;
2904 spin_unlock_irqrestore(&ctx->lock, flags);
2905 return count;
2906}
2907
2870static int perf_swcounter_match(struct perf_counter *counter, 2908static int perf_swcounter_match(struct perf_counter *counter,
2871 enum perf_event_types type, 2909 enum perf_event_types type,
2872 u32 event, struct pt_regs *regs) 2910 u32 event, struct pt_regs *regs)
2873{ 2911{
2874 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2912 u64 event_config;
2875 return 0;
2876 2913
2877 if (perf_event_raw(&counter->hw_event)) 2914 event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
2878 return 0;
2879 2915
2880 if (perf_event_type(&counter->hw_event) != type) 2916 if (!perf_swcounter_is_counting(counter))
2881 return 0; 2917 return 0;
2882 2918
2883 if (perf_event_id(&counter->hw_event) != event) 2919 if (counter->hw_event.config != event_config)
2884 return 0; 2920 return 0;
2885 2921
2886 if (counter->hw_event.exclude_user && user_mode(regs)) 2922 if (counter->hw_event.exclude_user && user_mode(regs))