aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-14 05:00:30 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-14 05:00:30 -0500
commit3b6f9e5cb21964b7ce12bf81076f830885563ec8 (patch)
treee9d5ecffafa66cc3aeb259ade15a2611ad795327 /include/linux/perf_counter.h
parent01d0287f068de2934109ba9b989d8807526cccc2 (diff)
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features A pinned counter group is one that the user wants to have on the CPU whenever possible, i.e. whenever the associated task is running, for a per-task group, or always for a per-cpu group. If the system cannot satisfy that, it puts the group into an error state where it is not scheduled any more and reads from it return EOF (i.e. 0 bytes read). The group can be released from error state and made readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we have finer-grained enable/disable controls on counters we'll be able to reset the error state on individual groups. An exclusive group is one that the user wants to be the only group using the CPU performance monitor hardware whenever it is on. The counter group scheduler will not schedule an exclusive group if there are already other groups on the CPU and will not schedule other groups onto the CPU if there is an exclusive group scheduled (that statement does not apply to groups containing only software counters, which can always go on and which do not prevent an exclusive group from going on). With an exclusive group, we will be able to let users program PMU registers at a low level without the concern that those settings will perturb other measurements. Along the way this reorganizes things a little: - is_software_counter() is moved to perf_counter.h. - cpuctx->active_oncpu now records the number of hardware counters on the CPU, i.e. it now excludes software counters. Nothing was reading cpuctx->active_oncpu before, so this change is harmless. - A new cpuctx->exclusive field records whether we currently have an exclusive group on the CPU. - counter_sched_out moves higher up in perf_counter.c and gets called from __perf_counter_remove_from_context and __perf_counter_exit_task, where we used to have essentially the same code. - __perf_counter_sched_in now goes through the counter list twice, doing the pinned counters in the first loop and the non-pinned counters in the second loop, in order to give the pinned counters the best chance to be scheduled in. Note that only a group leader can be exclusive or pinned, and that attribute applies to the whole group. This avoids some awkwardness in some corner cases (e.g. where a group leader is closed and the other group members get added to the context list). If we want to relax that restriction later, we can, and it is easier to relax a restriction than to apply a new one. This doesn't yet handle the case where a pinned counter is inherited and goes into error state in the child - the error state is not propagated up to the parent when the child exits, and arguably it should. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index b21d1ea4c054..7ab8e5f96f5b 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -86,7 +86,10 @@ struct perf_counter_hw_event {
86 nmi : 1, /* NMI sampling */ 86 nmi : 1, /* NMI sampling */
87 raw : 1, /* raw event type */ 87 raw : 1, /* raw event type */
88 inherit : 1, /* children inherit it */ 88 inherit : 1, /* children inherit it */
89 __reserved_1 : 28; 89 pinned : 1, /* must always be on PMU */
90 exclusive : 1, /* only counter on PMU */
91
92 __reserved_1 : 26;
90 93
91 u64 __reserved_2; 94 u64 __reserved_2;
92}; 95};
@@ -141,6 +144,7 @@ struct hw_perf_counter_ops {
141 * enum perf_counter_active_state - the states of a counter 144 * enum perf_counter_active_state - the states of a counter
142 */ 145 */
143enum perf_counter_active_state { 146enum perf_counter_active_state {
147 PERF_COUNTER_STATE_ERROR = -2,
144 PERF_COUNTER_STATE_OFF = -1, 148 PERF_COUNTER_STATE_OFF = -1,
145 PERF_COUNTER_STATE_INACTIVE = 0, 149 PERF_COUNTER_STATE_INACTIVE = 0,
146 PERF_COUNTER_STATE_ACTIVE = 1, 150 PERF_COUNTER_STATE_ACTIVE = 1,
@@ -214,6 +218,7 @@ struct perf_cpu_context {
214 struct perf_counter_context *task_ctx; 218 struct perf_counter_context *task_ctx;
215 int active_oncpu; 219 int active_oncpu;
216 int max_pertask; 220 int max_pertask;
221 int exclusive;
217}; 222};
218 223
219/* 224/*
@@ -240,6 +245,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
240 struct perf_cpu_context *cpuctx, 245 struct perf_cpu_context *cpuctx,
241 struct perf_counter_context *ctx, int cpu); 246 struct perf_counter_context *ctx, int cpu);
242 247
248/*
249 * Return 1 for a software counter, 0 for a hardware counter
250 */
251static inline int is_software_counter(struct perf_counter *counter)
252{
253 return !counter->hw_event.raw && counter->hw_event.type < 0;
254}
255
243#else 256#else
244static inline void 257static inline void
245perf_counter_task_sched_in(struct task_struct *task, int cpu) { } 258perf_counter_task_sched_in(struct task_struct *task, int cpu) { }