aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c48
1 files changed, 42 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index da8dfef4b472..ff8b4636f845 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2867,20 +2867,56 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
2867 2867
2868} 2868}
2869 2869
2870static int perf_swcounter_is_counting(struct perf_counter *counter)
2871{
2872 struct perf_counter_context *ctx;
2873 unsigned long flags;
2874 int count;
2875
2876 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
2877 return 1;
2878
2879 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
2880 return 0;
2881
2882 /*
2883 * If the counter is inactive, it could be just because
2884 * its task is scheduled out, or because it's in a group
2885 * which could not go on the PMU. We want to count in
2886 * the first case but not the second. If the context is
2887 * currently active then an inactive software counter must
2888 * be the second case. If it's not currently active then
2889 * we need to know whether the counter was active when the
2890 * context was last active, which we can determine by
2891 * comparing counter->tstamp_stopped with ctx->time.
2892 *
2893 * We are within an RCU read-side critical section,
2894 * which protects the existence of *ctx.
2895 */
2896 ctx = counter->ctx;
2897 spin_lock_irqsave(&ctx->lock, flags);
2898 count = 1;
2899 /* Re-check state now we have the lock */
2900 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
2901 counter->ctx->is_active ||
2902 counter->tstamp_stopped < ctx->time)
2903 count = 0;
2904 spin_unlock_irqrestore(&ctx->lock, flags);
2905 return count;
2906}
2907
2870static int perf_swcounter_match(struct perf_counter *counter, 2908static int perf_swcounter_match(struct perf_counter *counter,
2871 enum perf_event_types type, 2909 enum perf_event_types type,
2872 u32 event, struct pt_regs *regs) 2910 u32 event, struct pt_regs *regs)
2873{ 2911{
2874 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2912 u64 event_config;
2875 return 0;
2876 2913
2877 if (perf_event_raw(&counter->hw_event)) 2914 event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
2878 return 0;
2879 2915
2880 if (perf_event_type(&counter->hw_event) != type) 2916 if (!perf_swcounter_is_counting(counter))
2881 return 0; 2917 return 0;
2882 2918
2883 if (perf_event_id(&counter->hw_event) != event) 2919 if (counter->hw_event.config != event_config)
2884 return 0; 2920 return 0;
2885 2921
2886 if (counter->hw_event.exclude_user && user_mode(regs)) 2922 if (counter->hw_event.exclude_user && user_mode(regs))