aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-17 02:10:22 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-17 02:10:22 -0500
commitd859e29fe34cb833071b20aef860ee94fbad9bb2 (patch)
tree6359fe345851db2b7e8379fa65b7ed6a873d3ee3 /include/linux/perf_counter.h
parent3b6f9e5cb21964b7ce12bf81076f830885563ec8 (diff)
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features This primarily adds a way for perf_counter users to enable and disable counters and groups. Enabling or disabling a counter or group also enables or disables all of the child counters that have been cloned from it to monitor children of the task monitored by the top-level counter. The userspace interface to enable/disable counters is via ioctl on the counter file descriptor. Along the way this extends the code that handles child counters to handle child counter groups properly. A group with multiple counters will be cloned to child tasks if and only if the group leader has the hw_event.inherit bit set - if it is set the whole group is cloned as a group in the child task. In order to be able to enable or disable all child counters of a given top-level counter, we need a way to find them all. Hence I have added a child_list field to struct perf_counter, which is the head of the list of children for a top-level counter, or the link in that list for a child counter. That list is protected by the perf_counter.mutex field. This also adds a mutex to the perf_counter_context struct. Previously the list of counters was protected just by the lock field in the context, which meant that perf_counter_init_task had to take that lock and then take whatever lock/mutex protects the top-level counter's child_list. But the counter enable/disable functions need to take that lock in order to traverse the list, then for each counter take the lock in that counter's context in order to change the counter's state safely, which will lead to a deadlock. To solve this, we now have both a mutex and a spinlock in the context, and taking either is sufficient to ensure the list of counters can't change - you have to take both before changing the list. Now perf_counter_init_task takes the mutex instead of the lock (which incidentally means that inherit_counter can use GFP_KERNEL instead of GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new enable/disable functions can take the mutex while traversing the list of child counters without incurring a possible deadlock when the counter manipulation code locks the context for a child counter. We also had an misfeature that the first counter added to a context would possibly not go on until the next sched-in, because we were using ctx->nr_active to detect if the context was running on a CPU. But nr_active is the number of active counters, and if that was zero (because the context didn't have any counters yet) it would look like the context wasn't running on a cpu and so the retry code in __perf_install_in_context wouldn't retry. So this adds an 'is_active' field that is set when the context is on a CPU, even if it has no counters. The is_active field is only used for task contexts, not for per-cpu contexts. If we enable a subsidiary counter in a group that is active on a CPU, and the arch code can't enable the counter, then we have to pull the whole group off the CPU. We do this with group_sched_out, which gets moved up in the file so it comes before all its callers. This also adds similar logic to __perf_install_in_context so that the "all on, or none" invariant of groups is preserved when adding a new counter to a group. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h21
1 files changed, 19 insertions, 2 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 7ab8e5f96f5b..33ba9fe0a781 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -14,6 +14,7 @@
14#define _LINUX_PERF_COUNTER_H 14#define _LINUX_PERF_COUNTER_H
15 15
16#include <asm/atomic.h> 16#include <asm/atomic.h>
17#include <asm/ioctl.h>
17 18
18#ifdef CONFIG_PERF_COUNTERS 19#ifdef CONFIG_PERF_COUNTERS
19# include <asm/perf_counter.h> 20# include <asm/perf_counter.h>
@@ -95,6 +96,12 @@ struct perf_counter_hw_event {
95}; 96};
96 97
97/* 98/*
99 * Ioctls that can be done on a perf counter fd:
100 */
101#define PERF_COUNTER_IOC_ENABLE _IO('$', 0)
102#define PERF_COUNTER_IOC_DISABLE _IO('$', 1)
103
104/*
98 * Kernel-internal data types: 105 * Kernel-internal data types:
99 */ 106 */
100 107
@@ -173,8 +180,10 @@ struct perf_counter {
173 struct file *filp; 180 struct file *filp;
174 181
175 struct perf_counter *parent; 182 struct perf_counter *parent;
183 struct list_head child_list;
184
176 /* 185 /*
177 * Protect attach/detach: 186 * Protect attach/detach and child_list:
178 */ 187 */
179 struct mutex mutex; 188 struct mutex mutex;
180 189
@@ -199,13 +208,21 @@ struct perf_counter {
199struct perf_counter_context { 208struct perf_counter_context {
200#ifdef CONFIG_PERF_COUNTERS 209#ifdef CONFIG_PERF_COUNTERS
201 /* 210 /*
202 * Protect the list of counters: 211 * Protect the states of the counters in the list,
212 * nr_active, and the list:
203 */ 213 */
204 spinlock_t lock; 214 spinlock_t lock;
215 /*
216 * Protect the list of counters. Locking either mutex or lock
217 * is sufficient to ensure the list doesn't change; to change
218 * the list you need to lock both the mutex and the spinlock.
219 */
220 struct mutex mutex;
205 221
206 struct list_head counter_list; 222 struct list_head counter_list;
207 int nr_counters; 223 int nr_counters;
208 int nr_active; 224 int nr_active;
225 int is_active;
209 struct task_struct *task; 226 struct task_struct *task;
210#endif 227#endif
211}; 228};