diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/exit.c | 13 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/perf_counter.c | 2212 | ||||
-rw-r--r-- | kernel/sched.c | 93 | ||||
-rw-r--r-- | kernel/sys.c | 7 | ||||
-rw-r--r-- | kernel/sys_ni.c | 3 |
7 files changed, 2323 insertions, 7 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index bab1dffe37e9..63c697529ca1 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -94,6 +94,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/ | |||
94 | obj-$(CONFIG_TRACING) += trace/ | 94 | obj-$(CONFIG_TRACING) += trace/ |
95 | obj-$(CONFIG_SMP) += sched_cpupri.o | 95 | obj-$(CONFIG_SMP) += sched_cpupri.o |
96 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 96 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
97 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o | ||
97 | 98 | ||
98 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 99 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
99 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 100 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
diff --git a/kernel/exit.c b/kernel/exit.c index 6686ed1e4aa3..7a14a2b504f5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -158,6 +158,9 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
158 | { | 158 | { |
159 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); | 159 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
160 | 160 | ||
161 | #ifdef CONFIG_PERF_COUNTERS | ||
162 | WARN_ON_ONCE(!list_empty(&tsk->perf_counter_ctx.counter_list)); | ||
163 | #endif | ||
161 | trace_sched_process_free(tsk); | 164 | trace_sched_process_free(tsk); |
162 | put_task_struct(tsk); | 165 | put_task_struct(tsk); |
163 | } | 166 | } |
@@ -980,10 +983,6 @@ NORET_TYPE void do_exit(long code) | |||
980 | tsk->mempolicy = NULL; | 983 | tsk->mempolicy = NULL; |
981 | #endif | 984 | #endif |
982 | #ifdef CONFIG_FUTEX | 985 | #ifdef CONFIG_FUTEX |
983 | /* | ||
984 | * This must happen late, after the PID is not | ||
985 | * hashed anymore: | ||
986 | */ | ||
987 | if (unlikely(!list_empty(&tsk->pi_state_list))) | 986 | if (unlikely(!list_empty(&tsk->pi_state_list))) |
988 | exit_pi_state_list(tsk); | 987 | exit_pi_state_list(tsk); |
989 | if (unlikely(current->pi_state_cache)) | 988 | if (unlikely(current->pi_state_cache)) |
@@ -1250,6 +1249,12 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1250 | */ | 1249 | */ |
1251 | read_unlock(&tasklist_lock); | 1250 | read_unlock(&tasklist_lock); |
1252 | 1251 | ||
1252 | /* | ||
1253 | * Flush inherited counters to the parent - before the parent | ||
1254 | * gets woken up by child-exit notifications. | ||
1255 | */ | ||
1256 | perf_counter_exit_task(p); | ||
1257 | |||
1253 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | 1258 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; |
1254 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) | 1259 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) |
1255 | ? p->signal->group_exit_code : p->exit_code; | 1260 | ? p->signal->group_exit_code : p->exit_code; |
diff --git a/kernel/fork.c b/kernel/fork.c index 660c2b8765bc..381d7f9b70fb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -975,6 +975,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
975 | goto fork_out; | 975 | goto fork_out; |
976 | 976 | ||
977 | rt_mutex_init_task(p); | 977 | rt_mutex_init_task(p); |
978 | perf_counter_init_task(p); | ||
978 | 979 | ||
979 | #ifdef CONFIG_PROVE_LOCKING | 980 | #ifdef CONFIG_PROVE_LOCKING |
980 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); | 981 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c new file mode 100644 index 000000000000..b2e838959f3e --- /dev/null +++ b/kernel/perf_counter.c | |||
@@ -0,0 +1,2212 @@ | |||
1 | /* | ||
2 | * Performance counter core code | ||
3 | * | ||
4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar | ||
6 | * | ||
7 | * For licencing details see kernel-base/COPYING | ||
8 | */ | ||
9 | |||
10 | #include <linux/fs.h> | ||
11 | #include <linux/cpu.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/file.h> | ||
14 | #include <linux/poll.h> | ||
15 | #include <linux/sysfs.h> | ||
16 | #include <linux/ptrace.h> | ||
17 | #include <linux/percpu.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <linux/syscalls.h> | ||
20 | #include <linux/anon_inodes.h> | ||
21 | #include <linux/kernel_stat.h> | ||
22 | #include <linux/perf_counter.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/vmstat.h> | ||
25 | |||
26 | /* | ||
27 | * Each CPU has a list of per CPU counters: | ||
28 | */ | ||
29 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | ||
30 | |||
31 | int perf_max_counters __read_mostly = 1; | ||
32 | static int perf_reserved_percpu __read_mostly; | ||
33 | static int perf_overcommit __read_mostly = 1; | ||
34 | |||
35 | /* | ||
36 | * Mutex for (sysadmin-configurable) counter reservations: | ||
37 | */ | ||
38 | static DEFINE_MUTEX(perf_resource_mutex); | ||
39 | |||
40 | /* | ||
41 | * Architecture provided APIs - weak aliases: | ||
42 | */ | ||
43 | extern __weak const struct hw_perf_counter_ops * | ||
44 | hw_perf_counter_init(struct perf_counter *counter) | ||
45 | { | ||
46 | return NULL; | ||
47 | } | ||
48 | |||
49 | u64 __weak hw_perf_save_disable(void) { return 0; } | ||
50 | void __weak hw_perf_restore(u64 ctrl) { barrier(); } | ||
51 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } | ||
52 | int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
53 | struct perf_cpu_context *cpuctx, | ||
54 | struct perf_counter_context *ctx, int cpu) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | void __weak perf_counter_print_debug(void) { } | ||
60 | |||
61 | static void | ||
62 | list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | ||
63 | { | ||
64 | struct perf_counter *group_leader = counter->group_leader; | ||
65 | |||
66 | /* | ||
67 | * Depending on whether it is a standalone or sibling counter, | ||
68 | * add it straight to the context's counter list, or to the group | ||
69 | * leader's sibling list: | ||
70 | */ | ||
71 | if (counter->group_leader == counter) | ||
72 | list_add_tail(&counter->list_entry, &ctx->counter_list); | ||
73 | else | ||
74 | list_add_tail(&counter->list_entry, &group_leader->sibling_list); | ||
75 | } | ||
76 | |||
77 | static void | ||
78 | list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | ||
79 | { | ||
80 | struct perf_counter *sibling, *tmp; | ||
81 | |||
82 | list_del_init(&counter->list_entry); | ||
83 | |||
84 | /* | ||
85 | * If this was a group counter with sibling counters then | ||
86 | * upgrade the siblings to singleton counters by adding them | ||
87 | * to the context list directly: | ||
88 | */ | ||
89 | list_for_each_entry_safe(sibling, tmp, | ||
90 | &counter->sibling_list, list_entry) { | ||
91 | |||
92 | list_del_init(&sibling->list_entry); | ||
93 | list_add_tail(&sibling->list_entry, &ctx->counter_list); | ||
94 | sibling->group_leader = sibling; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static void | ||
99 | counter_sched_out(struct perf_counter *counter, | ||
100 | struct perf_cpu_context *cpuctx, | ||
101 | struct perf_counter_context *ctx) | ||
102 | { | ||
103 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
104 | return; | ||
105 | |||
106 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
107 | counter->hw_ops->disable(counter); | ||
108 | counter->oncpu = -1; | ||
109 | |||
110 | if (!is_software_counter(counter)) | ||
111 | cpuctx->active_oncpu--; | ||
112 | ctx->nr_active--; | ||
113 | if (counter->hw_event.exclusive || !cpuctx->active_oncpu) | ||
114 | cpuctx->exclusive = 0; | ||
115 | } | ||
116 | |||
117 | static void | ||
118 | group_sched_out(struct perf_counter *group_counter, | ||
119 | struct perf_cpu_context *cpuctx, | ||
120 | struct perf_counter_context *ctx) | ||
121 | { | ||
122 | struct perf_counter *counter; | ||
123 | |||
124 | if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
125 | return; | ||
126 | |||
127 | counter_sched_out(group_counter, cpuctx, ctx); | ||
128 | |||
129 | /* | ||
130 | * Schedule out siblings (if any): | ||
131 | */ | ||
132 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) | ||
133 | counter_sched_out(counter, cpuctx, ctx); | ||
134 | |||
135 | if (group_counter->hw_event.exclusive) | ||
136 | cpuctx->exclusive = 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * Cross CPU call to remove a performance counter | ||
141 | * | ||
142 | * We disable the counter on the hardware level first. After that we | ||
143 | * remove it from the context list. | ||
144 | */ | ||
145 | static void __perf_counter_remove_from_context(void *info) | ||
146 | { | ||
147 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
148 | struct perf_counter *counter = info; | ||
149 | struct perf_counter_context *ctx = counter->ctx; | ||
150 | unsigned long flags; | ||
151 | u64 perf_flags; | ||
152 | |||
153 | /* | ||
154 | * If this is a task context, we need to check whether it is | ||
155 | * the current task context of this cpu. If not it has been | ||
156 | * scheduled out before the smp call arrived. | ||
157 | */ | ||
158 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
159 | return; | ||
160 | |||
161 | curr_rq_lock_irq_save(&flags); | ||
162 | spin_lock(&ctx->lock); | ||
163 | |||
164 | counter_sched_out(counter, cpuctx, ctx); | ||
165 | |||
166 | counter->task = NULL; | ||
167 | ctx->nr_counters--; | ||
168 | |||
169 | /* | ||
170 | * Protect the list operation against NMI by disabling the | ||
171 | * counters on a global level. NOP for non NMI based counters. | ||
172 | */ | ||
173 | perf_flags = hw_perf_save_disable(); | ||
174 | list_del_counter(counter, ctx); | ||
175 | hw_perf_restore(perf_flags); | ||
176 | |||
177 | if (!ctx->task) { | ||
178 | /* | ||
179 | * Allow more per task counters with respect to the | ||
180 | * reservation: | ||
181 | */ | ||
182 | cpuctx->max_pertask = | ||
183 | min(perf_max_counters - ctx->nr_counters, | ||
184 | perf_max_counters - perf_reserved_percpu); | ||
185 | } | ||
186 | |||
187 | spin_unlock(&ctx->lock); | ||
188 | curr_rq_unlock_irq_restore(&flags); | ||
189 | } | ||
190 | |||
191 | |||
192 | /* | ||
193 | * Remove the counter from a task's (or a CPU's) list of counters. | ||
194 | * | ||
195 | * Must be called with counter->mutex and ctx->mutex held. | ||
196 | * | ||
197 | * CPU counters are removed with a smp call. For task counters we only | ||
198 | * call when the task is on a CPU. | ||
199 | */ | ||
200 | static void perf_counter_remove_from_context(struct perf_counter *counter) | ||
201 | { | ||
202 | struct perf_counter_context *ctx = counter->ctx; | ||
203 | struct task_struct *task = ctx->task; | ||
204 | |||
205 | if (!task) { | ||
206 | /* | ||
207 | * Per cpu counters are removed via an smp call and | ||
208 | * the removal is always sucessful. | ||
209 | */ | ||
210 | smp_call_function_single(counter->cpu, | ||
211 | __perf_counter_remove_from_context, | ||
212 | counter, 1); | ||
213 | return; | ||
214 | } | ||
215 | |||
216 | retry: | ||
217 | task_oncpu_function_call(task, __perf_counter_remove_from_context, | ||
218 | counter); | ||
219 | |||
220 | spin_lock_irq(&ctx->lock); | ||
221 | /* | ||
222 | * If the context is active we need to retry the smp call. | ||
223 | */ | ||
224 | if (ctx->nr_active && !list_empty(&counter->list_entry)) { | ||
225 | spin_unlock_irq(&ctx->lock); | ||
226 | goto retry; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * The lock prevents that this context is scheduled in so we | ||
231 | * can remove the counter safely, if the call above did not | ||
232 | * succeed. | ||
233 | */ | ||
234 | if (!list_empty(&counter->list_entry)) { | ||
235 | ctx->nr_counters--; | ||
236 | list_del_counter(counter, ctx); | ||
237 | counter->task = NULL; | ||
238 | } | ||
239 | spin_unlock_irq(&ctx->lock); | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * Cross CPU call to disable a performance counter | ||
244 | */ | ||
245 | static void __perf_counter_disable(void *info) | ||
246 | { | ||
247 | struct perf_counter *counter = info; | ||
248 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
249 | struct perf_counter_context *ctx = counter->ctx; | ||
250 | unsigned long flags; | ||
251 | |||
252 | /* | ||
253 | * If this is a per-task counter, need to check whether this | ||
254 | * counter's task is the current task on this cpu. | ||
255 | */ | ||
256 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
257 | return; | ||
258 | |||
259 | curr_rq_lock_irq_save(&flags); | ||
260 | spin_lock(&ctx->lock); | ||
261 | |||
262 | /* | ||
263 | * If the counter is on, turn it off. | ||
264 | * If it is in error state, leave it in error state. | ||
265 | */ | ||
266 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { | ||
267 | if (counter == counter->group_leader) | ||
268 | group_sched_out(counter, cpuctx, ctx); | ||
269 | else | ||
270 | counter_sched_out(counter, cpuctx, ctx); | ||
271 | counter->state = PERF_COUNTER_STATE_OFF; | ||
272 | } | ||
273 | |||
274 | spin_unlock(&ctx->lock); | ||
275 | curr_rq_unlock_irq_restore(&flags); | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Disable a counter. | ||
280 | */ | ||
281 | static void perf_counter_disable(struct perf_counter *counter) | ||
282 | { | ||
283 | struct perf_counter_context *ctx = counter->ctx; | ||
284 | struct task_struct *task = ctx->task; | ||
285 | |||
286 | if (!task) { | ||
287 | /* | ||
288 | * Disable the counter on the cpu that it's on | ||
289 | */ | ||
290 | smp_call_function_single(counter->cpu, __perf_counter_disable, | ||
291 | counter, 1); | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | retry: | ||
296 | task_oncpu_function_call(task, __perf_counter_disable, counter); | ||
297 | |||
298 | spin_lock_irq(&ctx->lock); | ||
299 | /* | ||
300 | * If the counter is still active, we need to retry the cross-call. | ||
301 | */ | ||
302 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | ||
303 | spin_unlock_irq(&ctx->lock); | ||
304 | goto retry; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * Since we have the lock this context can't be scheduled | ||
309 | * in, so we can change the state safely. | ||
310 | */ | ||
311 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) | ||
312 | counter->state = PERF_COUNTER_STATE_OFF; | ||
313 | |||
314 | spin_unlock_irq(&ctx->lock); | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Disable a counter and all its children. | ||
319 | */ | ||
320 | static void perf_counter_disable_family(struct perf_counter *counter) | ||
321 | { | ||
322 | struct perf_counter *child; | ||
323 | |||
324 | perf_counter_disable(counter); | ||
325 | |||
326 | /* | ||
327 | * Lock the mutex to protect the list of children | ||
328 | */ | ||
329 | mutex_lock(&counter->mutex); | ||
330 | list_for_each_entry(child, &counter->child_list, child_list) | ||
331 | perf_counter_disable(child); | ||
332 | mutex_unlock(&counter->mutex); | ||
333 | } | ||
334 | |||
335 | static int | ||
336 | counter_sched_in(struct perf_counter *counter, | ||
337 | struct perf_cpu_context *cpuctx, | ||
338 | struct perf_counter_context *ctx, | ||
339 | int cpu) | ||
340 | { | ||
341 | if (counter->state <= PERF_COUNTER_STATE_OFF) | ||
342 | return 0; | ||
343 | |||
344 | counter->state = PERF_COUNTER_STATE_ACTIVE; | ||
345 | counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ | ||
346 | /* | ||
347 | * The new state must be visible before we turn it on in the hardware: | ||
348 | */ | ||
349 | smp_wmb(); | ||
350 | |||
351 | if (counter->hw_ops->enable(counter)) { | ||
352 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
353 | counter->oncpu = -1; | ||
354 | return -EAGAIN; | ||
355 | } | ||
356 | |||
357 | if (!is_software_counter(counter)) | ||
358 | cpuctx->active_oncpu++; | ||
359 | ctx->nr_active++; | ||
360 | |||
361 | if (counter->hw_event.exclusive) | ||
362 | cpuctx->exclusive = 1; | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * Return 1 for a group consisting entirely of software counters, | ||
369 | * 0 if the group contains any hardware counters. | ||
370 | */ | ||
371 | static int is_software_only_group(struct perf_counter *leader) | ||
372 | { | ||
373 | struct perf_counter *counter; | ||
374 | |||
375 | if (!is_software_counter(leader)) | ||
376 | return 0; | ||
377 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | ||
378 | if (!is_software_counter(counter)) | ||
379 | return 0; | ||
380 | return 1; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Work out whether we can put this counter group on the CPU now. | ||
385 | */ | ||
386 | static int group_can_go_on(struct perf_counter *counter, | ||
387 | struct perf_cpu_context *cpuctx, | ||
388 | int can_add_hw) | ||
389 | { | ||
390 | /* | ||
391 | * Groups consisting entirely of software counters can always go on. | ||
392 | */ | ||
393 | if (is_software_only_group(counter)) | ||
394 | return 1; | ||
395 | /* | ||
396 | * If an exclusive group is already on, no other hardware | ||
397 | * counters can go on. | ||
398 | */ | ||
399 | if (cpuctx->exclusive) | ||
400 | return 0; | ||
401 | /* | ||
402 | * If this group is exclusive and there are already | ||
403 | * counters on the CPU, it can't go on. | ||
404 | */ | ||
405 | if (counter->hw_event.exclusive && cpuctx->active_oncpu) | ||
406 | return 0; | ||
407 | /* | ||
408 | * Otherwise, try to add it if all previous groups were able | ||
409 | * to go on. | ||
410 | */ | ||
411 | return can_add_hw; | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Cross CPU call to install and enable a performance counter | ||
416 | */ | ||
417 | static void __perf_install_in_context(void *info) | ||
418 | { | ||
419 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
420 | struct perf_counter *counter = info; | ||
421 | struct perf_counter_context *ctx = counter->ctx; | ||
422 | struct perf_counter *leader = counter->group_leader; | ||
423 | int cpu = smp_processor_id(); | ||
424 | unsigned long flags; | ||
425 | u64 perf_flags; | ||
426 | int err; | ||
427 | |||
428 | /* | ||
429 | * If this is a task context, we need to check whether it is | ||
430 | * the current task context of this cpu. If not it has been | ||
431 | * scheduled out before the smp call arrived. | ||
432 | */ | ||
433 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
434 | return; | ||
435 | |||
436 | curr_rq_lock_irq_save(&flags); | ||
437 | spin_lock(&ctx->lock); | ||
438 | |||
439 | /* | ||
440 | * Protect the list operation against NMI by disabling the | ||
441 | * counters on a global level. NOP for non NMI based counters. | ||
442 | */ | ||
443 | perf_flags = hw_perf_save_disable(); | ||
444 | |||
445 | list_add_counter(counter, ctx); | ||
446 | ctx->nr_counters++; | ||
447 | counter->prev_state = PERF_COUNTER_STATE_OFF; | ||
448 | |||
449 | /* | ||
450 | * Don't put the counter on if it is disabled or if | ||
451 | * it is in a group and the group isn't on. | ||
452 | */ | ||
453 | if (counter->state != PERF_COUNTER_STATE_INACTIVE || | ||
454 | (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) | ||
455 | goto unlock; | ||
456 | |||
457 | /* | ||
458 | * An exclusive counter can't go on if there are already active | ||
459 | * hardware counters, and no hardware counter can go on if there | ||
460 | * is already an exclusive counter on. | ||
461 | */ | ||
462 | if (!group_can_go_on(counter, cpuctx, 1)) | ||
463 | err = -EEXIST; | ||
464 | else | ||
465 | err = counter_sched_in(counter, cpuctx, ctx, cpu); | ||
466 | |||
467 | if (err) { | ||
468 | /* | ||
469 | * This counter couldn't go on. If it is in a group | ||
470 | * then we have to pull the whole group off. | ||
471 | * If the counter group is pinned then put it in error state. | ||
472 | */ | ||
473 | if (leader != counter) | ||
474 | group_sched_out(leader, cpuctx, ctx); | ||
475 | if (leader->hw_event.pinned) | ||
476 | leader->state = PERF_COUNTER_STATE_ERROR; | ||
477 | } | ||
478 | |||
479 | if (!err && !ctx->task && cpuctx->max_pertask) | ||
480 | cpuctx->max_pertask--; | ||
481 | |||
482 | unlock: | ||
483 | hw_perf_restore(perf_flags); | ||
484 | |||
485 | spin_unlock(&ctx->lock); | ||
486 | curr_rq_unlock_irq_restore(&flags); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Attach a performance counter to a context | ||
491 | * | ||
492 | * First we add the counter to the list with the hardware enable bit | ||
493 | * in counter->hw_config cleared. | ||
494 | * | ||
495 | * If the counter is attached to a task which is on a CPU we use a smp | ||
496 | * call to enable it in the task context. The task might have been | ||
497 | * scheduled away, but we check this in the smp call again. | ||
498 | * | ||
499 | * Must be called with ctx->mutex held. | ||
500 | */ | ||
501 | static void | ||
502 | perf_install_in_context(struct perf_counter_context *ctx, | ||
503 | struct perf_counter *counter, | ||
504 | int cpu) | ||
505 | { | ||
506 | struct task_struct *task = ctx->task; | ||
507 | |||
508 | if (!task) { | ||
509 | /* | ||
510 | * Per cpu counters are installed via an smp call and | ||
511 | * the install is always sucessful. | ||
512 | */ | ||
513 | smp_call_function_single(cpu, __perf_install_in_context, | ||
514 | counter, 1); | ||
515 | return; | ||
516 | } | ||
517 | |||
518 | counter->task = task; | ||
519 | retry: | ||
520 | task_oncpu_function_call(task, __perf_install_in_context, | ||
521 | counter); | ||
522 | |||
523 | spin_lock_irq(&ctx->lock); | ||
524 | /* | ||
525 | * we need to retry the smp call. | ||
526 | */ | ||
527 | if (ctx->is_active && list_empty(&counter->list_entry)) { | ||
528 | spin_unlock_irq(&ctx->lock); | ||
529 | goto retry; | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * The lock prevents that this context is scheduled in so we | ||
534 | * can add the counter safely, if it the call above did not | ||
535 | * succeed. | ||
536 | */ | ||
537 | if (list_empty(&counter->list_entry)) { | ||
538 | list_add_counter(counter, ctx); | ||
539 | ctx->nr_counters++; | ||
540 | } | ||
541 | spin_unlock_irq(&ctx->lock); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Cross CPU call to enable a performance counter | ||
546 | */ | ||
547 | static void __perf_counter_enable(void *info) | ||
548 | { | ||
549 | struct perf_counter *counter = info; | ||
550 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
551 | struct perf_counter_context *ctx = counter->ctx; | ||
552 | struct perf_counter *leader = counter->group_leader; | ||
553 | unsigned long flags; | ||
554 | int err; | ||
555 | |||
556 | /* | ||
557 | * If this is a per-task counter, need to check whether this | ||
558 | * counter's task is the current task on this cpu. | ||
559 | */ | ||
560 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
561 | return; | ||
562 | |||
563 | curr_rq_lock_irq_save(&flags); | ||
564 | spin_lock(&ctx->lock); | ||
565 | |||
566 | counter->prev_state = counter->state; | ||
567 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
568 | goto unlock; | ||
569 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
570 | |||
571 | /* | ||
572 | * If the counter is in a group and isn't the group leader, | ||
573 | * then don't put it on unless the group is on. | ||
574 | */ | ||
575 | if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) | ||
576 | goto unlock; | ||
577 | |||
578 | if (!group_can_go_on(counter, cpuctx, 1)) | ||
579 | err = -EEXIST; | ||
580 | else | ||
581 | err = counter_sched_in(counter, cpuctx, ctx, | ||
582 | smp_processor_id()); | ||
583 | |||
584 | if (err) { | ||
585 | /* | ||
586 | * If this counter can't go on and it's part of a | ||
587 | * group, then the whole group has to come off. | ||
588 | */ | ||
589 | if (leader != counter) | ||
590 | group_sched_out(leader, cpuctx, ctx); | ||
591 | if (leader->hw_event.pinned) | ||
592 | leader->state = PERF_COUNTER_STATE_ERROR; | ||
593 | } | ||
594 | |||
595 | unlock: | ||
596 | spin_unlock(&ctx->lock); | ||
597 | curr_rq_unlock_irq_restore(&flags); | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Enable a counter. | ||
602 | */ | ||
603 | static void perf_counter_enable(struct perf_counter *counter) | ||
604 | { | ||
605 | struct perf_counter_context *ctx = counter->ctx; | ||
606 | struct task_struct *task = ctx->task; | ||
607 | |||
608 | if (!task) { | ||
609 | /* | ||
610 | * Enable the counter on the cpu that it's on | ||
611 | */ | ||
612 | smp_call_function_single(counter->cpu, __perf_counter_enable, | ||
613 | counter, 1); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | spin_lock_irq(&ctx->lock); | ||
618 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
619 | goto out; | ||
620 | |||
621 | /* | ||
622 | * If the counter is in error state, clear that first. | ||
623 | * That way, if we see the counter in error state below, we | ||
624 | * know that it has gone back into error state, as distinct | ||
625 | * from the task having been scheduled away before the | ||
626 | * cross-call arrived. | ||
627 | */ | ||
628 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
629 | counter->state = PERF_COUNTER_STATE_OFF; | ||
630 | |||
631 | retry: | ||
632 | spin_unlock_irq(&ctx->lock); | ||
633 | task_oncpu_function_call(task, __perf_counter_enable, counter); | ||
634 | |||
635 | spin_lock_irq(&ctx->lock); | ||
636 | |||
637 | /* | ||
638 | * If the context is active and the counter is still off, | ||
639 | * we need to retry the cross-call. | ||
640 | */ | ||
641 | if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) | ||
642 | goto retry; | ||
643 | |||
644 | /* | ||
645 | * Since we have the lock this context can't be scheduled | ||
646 | * in, so we can change the state safely. | ||
647 | */ | ||
648 | if (counter->state == PERF_COUNTER_STATE_OFF) | ||
649 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
650 | out: | ||
651 | spin_unlock_irq(&ctx->lock); | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * Enable a counter and all its children. | ||
656 | */ | ||
657 | static void perf_counter_enable_family(struct perf_counter *counter) | ||
658 | { | ||
659 | struct perf_counter *child; | ||
660 | |||
661 | perf_counter_enable(counter); | ||
662 | |||
663 | /* | ||
664 | * Lock the mutex to protect the list of children | ||
665 | */ | ||
666 | mutex_lock(&counter->mutex); | ||
667 | list_for_each_entry(child, &counter->child_list, child_list) | ||
668 | perf_counter_enable(child); | ||
669 | mutex_unlock(&counter->mutex); | ||
670 | } | ||
671 | |||
672 | void __perf_counter_sched_out(struct perf_counter_context *ctx, | ||
673 | struct perf_cpu_context *cpuctx) | ||
674 | { | ||
675 | struct perf_counter *counter; | ||
676 | u64 flags; | ||
677 | |||
678 | spin_lock(&ctx->lock); | ||
679 | ctx->is_active = 0; | ||
680 | if (likely(!ctx->nr_counters)) | ||
681 | goto out; | ||
682 | |||
683 | flags = hw_perf_save_disable(); | ||
684 | if (ctx->nr_active) { | ||
685 | list_for_each_entry(counter, &ctx->counter_list, list_entry) | ||
686 | group_sched_out(counter, cpuctx, ctx); | ||
687 | } | ||
688 | hw_perf_restore(flags); | ||
689 | out: | ||
690 | spin_unlock(&ctx->lock); | ||
691 | } | ||
692 | |||
693 | /* | ||
694 | * Called from scheduler to remove the counters of the current task, | ||
695 | * with interrupts disabled. | ||
696 | * | ||
697 | * We stop each counter and update the counter value in counter->count. | ||
698 | * | ||
699 | * This does not protect us against NMI, but disable() | ||
700 | * sets the disabled bit in the control field of counter _before_ | ||
701 | * accessing the counter control register. If a NMI hits, then it will | ||
702 | * not restart the counter. | ||
703 | */ | ||
704 | void perf_counter_task_sched_out(struct task_struct *task, int cpu) | ||
705 | { | ||
706 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
707 | struct perf_counter_context *ctx = &task->perf_counter_ctx; | ||
708 | |||
709 | if (likely(!cpuctx->task_ctx)) | ||
710 | return; | ||
711 | |||
712 | __perf_counter_sched_out(ctx, cpuctx); | ||
713 | |||
714 | cpuctx->task_ctx = NULL; | ||
715 | } | ||
716 | |||
717 | static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) | ||
718 | { | ||
719 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); | ||
720 | } | ||
721 | |||
722 | static int | ||
723 | group_sched_in(struct perf_counter *group_counter, | ||
724 | struct perf_cpu_context *cpuctx, | ||
725 | struct perf_counter_context *ctx, | ||
726 | int cpu) | ||
727 | { | ||
728 | struct perf_counter *counter, *partial_group; | ||
729 | int ret; | ||
730 | |||
731 | if (group_counter->state == PERF_COUNTER_STATE_OFF) | ||
732 | return 0; | ||
733 | |||
734 | ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); | ||
735 | if (ret) | ||
736 | return ret < 0 ? ret : 0; | ||
737 | |||
738 | group_counter->prev_state = group_counter->state; | ||
739 | if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) | ||
740 | return -EAGAIN; | ||
741 | |||
742 | /* | ||
743 | * Schedule in siblings as one group (if any): | ||
744 | */ | ||
745 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
746 | counter->prev_state = counter->state; | ||
747 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) { | ||
748 | partial_group = counter; | ||
749 | goto group_error; | ||
750 | } | ||
751 | } | ||
752 | |||
753 | return 0; | ||
754 | |||
755 | group_error: | ||
756 | /* | ||
757 | * Groups can be scheduled in as one unit only, so undo any | ||
758 | * partial group before returning: | ||
759 | */ | ||
760 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
761 | if (counter == partial_group) | ||
762 | break; | ||
763 | counter_sched_out(counter, cpuctx, ctx); | ||
764 | } | ||
765 | counter_sched_out(group_counter, cpuctx, ctx); | ||
766 | |||
767 | return -EAGAIN; | ||
768 | } | ||
769 | |||
770 | static void | ||
771 | __perf_counter_sched_in(struct perf_counter_context *ctx, | ||
772 | struct perf_cpu_context *cpuctx, int cpu) | ||
773 | { | ||
774 | struct perf_counter *counter; | ||
775 | u64 flags; | ||
776 | int can_add_hw = 1; | ||
777 | |||
778 | spin_lock(&ctx->lock); | ||
779 | ctx->is_active = 1; | ||
780 | if (likely(!ctx->nr_counters)) | ||
781 | goto out; | ||
782 | |||
783 | flags = hw_perf_save_disable(); | ||
784 | |||
785 | /* | ||
786 | * First go through the list and put on any pinned groups | ||
787 | * in order to give them the best chance of going on. | ||
788 | */ | ||
789 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
790 | if (counter->state <= PERF_COUNTER_STATE_OFF || | ||
791 | !counter->hw_event.pinned) | ||
792 | continue; | ||
793 | if (counter->cpu != -1 && counter->cpu != cpu) | ||
794 | continue; | ||
795 | |||
796 | if (group_can_go_on(counter, cpuctx, 1)) | ||
797 | group_sched_in(counter, cpuctx, ctx, cpu); | ||
798 | |||
799 | /* | ||
800 | * If this pinned group hasn't been scheduled, | ||
801 | * put it in error state. | ||
802 | */ | ||
803 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) | ||
804 | counter->state = PERF_COUNTER_STATE_ERROR; | ||
805 | } | ||
806 | |||
807 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
808 | /* | ||
809 | * Ignore counters in OFF or ERROR state, and | ||
810 | * ignore pinned counters since we did them already. | ||
811 | */ | ||
812 | if (counter->state <= PERF_COUNTER_STATE_OFF || | ||
813 | counter->hw_event.pinned) | ||
814 | continue; | ||
815 | |||
816 | /* | ||
817 | * Listen to the 'cpu' scheduling filter constraint | ||
818 | * of counters: | ||
819 | */ | ||
820 | if (counter->cpu != -1 && counter->cpu != cpu) | ||
821 | continue; | ||
822 | |||
823 | if (group_can_go_on(counter, cpuctx, can_add_hw)) { | ||
824 | if (group_sched_in(counter, cpuctx, ctx, cpu)) | ||
825 | can_add_hw = 0; | ||
826 | } | ||
827 | } | ||
828 | hw_perf_restore(flags); | ||
829 | out: | ||
830 | spin_unlock(&ctx->lock); | ||
831 | } | ||
832 | |||
833 | /* | ||
834 | * Called from scheduler to add the counters of the current task | ||
835 | * with interrupts disabled. | ||
836 | * | ||
837 | * We restore the counter value and then enable it. | ||
838 | * | ||
839 | * This does not protect us against NMI, but enable() | ||
840 | * sets the enabled bit in the control field of counter _before_ | ||
841 | * accessing the counter control register. If a NMI hits, then it will | ||
842 | * keep the counter running. | ||
843 | */ | ||
844 | void perf_counter_task_sched_in(struct task_struct *task, int cpu) | ||
845 | { | ||
846 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
847 | struct perf_counter_context *ctx = &task->perf_counter_ctx; | ||
848 | |||
849 | __perf_counter_sched_in(ctx, cpuctx, cpu); | ||
850 | cpuctx->task_ctx = ctx; | ||
851 | } | ||
852 | |||
853 | static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | ||
854 | { | ||
855 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
856 | |||
857 | __perf_counter_sched_in(ctx, cpuctx, cpu); | ||
858 | } | ||
859 | |||
860 | int perf_counter_task_disable(void) | ||
861 | { | ||
862 | struct task_struct *curr = current; | ||
863 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | ||
864 | struct perf_counter *counter; | ||
865 | unsigned long flags; | ||
866 | u64 perf_flags; | ||
867 | int cpu; | ||
868 | |||
869 | if (likely(!ctx->nr_counters)) | ||
870 | return 0; | ||
871 | |||
872 | curr_rq_lock_irq_save(&flags); | ||
873 | cpu = smp_processor_id(); | ||
874 | |||
875 | /* force the update of the task clock: */ | ||
876 | __task_delta_exec(curr, 1); | ||
877 | |||
878 | perf_counter_task_sched_out(curr, cpu); | ||
879 | |||
880 | spin_lock(&ctx->lock); | ||
881 | |||
882 | /* | ||
883 | * Disable all the counters: | ||
884 | */ | ||
885 | perf_flags = hw_perf_save_disable(); | ||
886 | |||
887 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
888 | if (counter->state != PERF_COUNTER_STATE_ERROR) | ||
889 | counter->state = PERF_COUNTER_STATE_OFF; | ||
890 | } | ||
891 | |||
892 | hw_perf_restore(perf_flags); | ||
893 | |||
894 | spin_unlock(&ctx->lock); | ||
895 | |||
896 | curr_rq_unlock_irq_restore(&flags); | ||
897 | |||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | int perf_counter_task_enable(void) | ||
902 | { | ||
903 | struct task_struct *curr = current; | ||
904 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | ||
905 | struct perf_counter *counter; | ||
906 | unsigned long flags; | ||
907 | u64 perf_flags; | ||
908 | int cpu; | ||
909 | |||
910 | if (likely(!ctx->nr_counters)) | ||
911 | return 0; | ||
912 | |||
913 | curr_rq_lock_irq_save(&flags); | ||
914 | cpu = smp_processor_id(); | ||
915 | |||
916 | /* force the update of the task clock: */ | ||
917 | __task_delta_exec(curr, 1); | ||
918 | |||
919 | perf_counter_task_sched_out(curr, cpu); | ||
920 | |||
921 | spin_lock(&ctx->lock); | ||
922 | |||
923 | /* | ||
924 | * Disable all the counters: | ||
925 | */ | ||
926 | perf_flags = hw_perf_save_disable(); | ||
927 | |||
928 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
929 | if (counter->state > PERF_COUNTER_STATE_OFF) | ||
930 | continue; | ||
931 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
932 | counter->hw_event.disabled = 0; | ||
933 | } | ||
934 | hw_perf_restore(perf_flags); | ||
935 | |||
936 | spin_unlock(&ctx->lock); | ||
937 | |||
938 | perf_counter_task_sched_in(curr, cpu); | ||
939 | |||
940 | curr_rq_unlock_irq_restore(&flags); | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | /* | ||
946 | * Round-robin a context's counters: | ||
947 | */ | ||
948 | static void rotate_ctx(struct perf_counter_context *ctx) | ||
949 | { | ||
950 | struct perf_counter *counter; | ||
951 | u64 perf_flags; | ||
952 | |||
953 | if (!ctx->nr_counters) | ||
954 | return; | ||
955 | |||
956 | spin_lock(&ctx->lock); | ||
957 | /* | ||
958 | * Rotate the first entry last (works just fine for group counters too): | ||
959 | */ | ||
960 | perf_flags = hw_perf_save_disable(); | ||
961 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
962 | list_del(&counter->list_entry); | ||
963 | list_add_tail(&counter->list_entry, &ctx->counter_list); | ||
964 | break; | ||
965 | } | ||
966 | hw_perf_restore(perf_flags); | ||
967 | |||
968 | spin_unlock(&ctx->lock); | ||
969 | } | ||
970 | |||
971 | void perf_counter_task_tick(struct task_struct *curr, int cpu) | ||
972 | { | ||
973 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
974 | struct perf_counter_context *ctx = &curr->perf_counter_ctx; | ||
975 | const int rotate_percpu = 0; | ||
976 | |||
977 | if (rotate_percpu) | ||
978 | perf_counter_cpu_sched_out(cpuctx); | ||
979 | perf_counter_task_sched_out(curr, cpu); | ||
980 | |||
981 | if (rotate_percpu) | ||
982 | rotate_ctx(&cpuctx->ctx); | ||
983 | rotate_ctx(ctx); | ||
984 | |||
985 | if (rotate_percpu) | ||
986 | perf_counter_cpu_sched_in(cpuctx, cpu); | ||
987 | perf_counter_task_sched_in(curr, cpu); | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * Cross CPU call to read the hardware counter | ||
992 | */ | ||
993 | static void __read(void *info) | ||
994 | { | ||
995 | struct perf_counter *counter = info; | ||
996 | unsigned long flags; | ||
997 | |||
998 | curr_rq_lock_irq_save(&flags); | ||
999 | counter->hw_ops->read(counter); | ||
1000 | curr_rq_unlock_irq_restore(&flags); | ||
1001 | } | ||
1002 | |||
1003 | static u64 perf_counter_read(struct perf_counter *counter) | ||
1004 | { | ||
1005 | /* | ||
1006 | * If counter is enabled and currently active on a CPU, update the | ||
1007 | * value in the counter structure: | ||
1008 | */ | ||
1009 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | ||
1010 | smp_call_function_single(counter->oncpu, | ||
1011 | __read, counter, 1); | ||
1012 | } | ||
1013 | |||
1014 | return atomic64_read(&counter->count); | ||
1015 | } | ||
1016 | |||
1017 | /* | ||
1018 | * Cross CPU call to switch performance data pointers | ||
1019 | */ | ||
1020 | static void __perf_switch_irq_data(void *info) | ||
1021 | { | ||
1022 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1023 | struct perf_counter *counter = info; | ||
1024 | struct perf_counter_context *ctx = counter->ctx; | ||
1025 | struct perf_data *oldirqdata = counter->irqdata; | ||
1026 | |||
1027 | /* | ||
1028 | * If this is a task context, we need to check whether it is | ||
1029 | * the current task context of this cpu. If not it has been | ||
1030 | * scheduled out before the smp call arrived. | ||
1031 | */ | ||
1032 | if (ctx->task) { | ||
1033 | if (cpuctx->task_ctx != ctx) | ||
1034 | return; | ||
1035 | spin_lock(&ctx->lock); | ||
1036 | } | ||
1037 | |||
1038 | /* Change the pointer NMI safe */ | ||
1039 | atomic_long_set((atomic_long_t *)&counter->irqdata, | ||
1040 | (unsigned long) counter->usrdata); | ||
1041 | counter->usrdata = oldirqdata; | ||
1042 | |||
1043 | if (ctx->task) | ||
1044 | spin_unlock(&ctx->lock); | ||
1045 | } | ||
1046 | |||
1047 | static struct perf_data *perf_switch_irq_data(struct perf_counter *counter) | ||
1048 | { | ||
1049 | struct perf_counter_context *ctx = counter->ctx; | ||
1050 | struct perf_data *oldirqdata = counter->irqdata; | ||
1051 | struct task_struct *task = ctx->task; | ||
1052 | |||
1053 | if (!task) { | ||
1054 | smp_call_function_single(counter->cpu, | ||
1055 | __perf_switch_irq_data, | ||
1056 | counter, 1); | ||
1057 | return counter->usrdata; | ||
1058 | } | ||
1059 | |||
1060 | retry: | ||
1061 | spin_lock_irq(&ctx->lock); | ||
1062 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) { | ||
1063 | counter->irqdata = counter->usrdata; | ||
1064 | counter->usrdata = oldirqdata; | ||
1065 | spin_unlock_irq(&ctx->lock); | ||
1066 | return oldirqdata; | ||
1067 | } | ||
1068 | spin_unlock_irq(&ctx->lock); | ||
1069 | task_oncpu_function_call(task, __perf_switch_irq_data, counter); | ||
1070 | /* Might have failed, because task was scheduled out */ | ||
1071 | if (counter->irqdata == oldirqdata) | ||
1072 | goto retry; | ||
1073 | |||
1074 | return counter->usrdata; | ||
1075 | } | ||
1076 | |||
1077 | static void put_context(struct perf_counter_context *ctx) | ||
1078 | { | ||
1079 | if (ctx->task) | ||
1080 | put_task_struct(ctx->task); | ||
1081 | } | ||
1082 | |||
1083 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | ||
1084 | { | ||
1085 | struct perf_cpu_context *cpuctx; | ||
1086 | struct perf_counter_context *ctx; | ||
1087 | struct task_struct *task; | ||
1088 | |||
1089 | /* | ||
1090 | * If cpu is not a wildcard then this is a percpu counter: | ||
1091 | */ | ||
1092 | if (cpu != -1) { | ||
1093 | /* Must be root to operate on a CPU counter: */ | ||
1094 | if (!capable(CAP_SYS_ADMIN)) | ||
1095 | return ERR_PTR(-EACCES); | ||
1096 | |||
1097 | if (cpu < 0 || cpu > num_possible_cpus()) | ||
1098 | return ERR_PTR(-EINVAL); | ||
1099 | |||
1100 | /* | ||
1101 | * We could be clever and allow to attach a counter to an | ||
1102 | * offline CPU and activate it when the CPU comes up, but | ||
1103 | * that's for later. | ||
1104 | */ | ||
1105 | if (!cpu_isset(cpu, cpu_online_map)) | ||
1106 | return ERR_PTR(-ENODEV); | ||
1107 | |||
1108 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1109 | ctx = &cpuctx->ctx; | ||
1110 | |||
1111 | return ctx; | ||
1112 | } | ||
1113 | |||
1114 | rcu_read_lock(); | ||
1115 | if (!pid) | ||
1116 | task = current; | ||
1117 | else | ||
1118 | task = find_task_by_vpid(pid); | ||
1119 | if (task) | ||
1120 | get_task_struct(task); | ||
1121 | rcu_read_unlock(); | ||
1122 | |||
1123 | if (!task) | ||
1124 | return ERR_PTR(-ESRCH); | ||
1125 | |||
1126 | ctx = &task->perf_counter_ctx; | ||
1127 | ctx->task = task; | ||
1128 | |||
1129 | /* Reuse ptrace permission checks for now. */ | ||
1130 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) { | ||
1131 | put_context(ctx); | ||
1132 | return ERR_PTR(-EACCES); | ||
1133 | } | ||
1134 | |||
1135 | return ctx; | ||
1136 | } | ||
1137 | |||
1138 | /* | ||
1139 | * Called when the last reference to the file is gone. | ||
1140 | */ | ||
1141 | static int perf_release(struct inode *inode, struct file *file) | ||
1142 | { | ||
1143 | struct perf_counter *counter = file->private_data; | ||
1144 | struct perf_counter_context *ctx = counter->ctx; | ||
1145 | |||
1146 | file->private_data = NULL; | ||
1147 | |||
1148 | mutex_lock(&ctx->mutex); | ||
1149 | mutex_lock(&counter->mutex); | ||
1150 | |||
1151 | perf_counter_remove_from_context(counter); | ||
1152 | |||
1153 | mutex_unlock(&counter->mutex); | ||
1154 | mutex_unlock(&ctx->mutex); | ||
1155 | |||
1156 | kfree(counter); | ||
1157 | put_context(ctx); | ||
1158 | |||
1159 | return 0; | ||
1160 | } | ||
1161 | |||
1162 | /* | ||
1163 | * Read the performance counter - simple non blocking version for now | ||
1164 | */ | ||
1165 | static ssize_t | ||
1166 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | ||
1167 | { | ||
1168 | u64 cntval; | ||
1169 | |||
1170 | if (count != sizeof(cntval)) | ||
1171 | return -EINVAL; | ||
1172 | |||
1173 | /* | ||
1174 | * Return end-of-file for a read on a counter that is in | ||
1175 | * error state (i.e. because it was pinned but it couldn't be | ||
1176 | * scheduled on to the CPU at some point). | ||
1177 | */ | ||
1178 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
1179 | return 0; | ||
1180 | |||
1181 | mutex_lock(&counter->mutex); | ||
1182 | cntval = perf_counter_read(counter); | ||
1183 | mutex_unlock(&counter->mutex); | ||
1184 | |||
1185 | return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval); | ||
1186 | } | ||
1187 | |||
1188 | static ssize_t | ||
1189 | perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count) | ||
1190 | { | ||
1191 | if (!usrdata->len) | ||
1192 | return 0; | ||
1193 | |||
1194 | count = min(count, (size_t)usrdata->len); | ||
1195 | if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count)) | ||
1196 | return -EFAULT; | ||
1197 | |||
1198 | /* Adjust the counters */ | ||
1199 | usrdata->len -= count; | ||
1200 | if (!usrdata->len) | ||
1201 | usrdata->rd_idx = 0; | ||
1202 | else | ||
1203 | usrdata->rd_idx += count; | ||
1204 | |||
1205 | return count; | ||
1206 | } | ||
1207 | |||
1208 | static ssize_t | ||
1209 | perf_read_irq_data(struct perf_counter *counter, | ||
1210 | char __user *buf, | ||
1211 | size_t count, | ||
1212 | int nonblocking) | ||
1213 | { | ||
1214 | struct perf_data *irqdata, *usrdata; | ||
1215 | DECLARE_WAITQUEUE(wait, current); | ||
1216 | ssize_t res, res2; | ||
1217 | |||
1218 | irqdata = counter->irqdata; | ||
1219 | usrdata = counter->usrdata; | ||
1220 | |||
1221 | if (usrdata->len + irqdata->len >= count) | ||
1222 | goto read_pending; | ||
1223 | |||
1224 | if (nonblocking) | ||
1225 | return -EAGAIN; | ||
1226 | |||
1227 | spin_lock_irq(&counter->waitq.lock); | ||
1228 | __add_wait_queue(&counter->waitq, &wait); | ||
1229 | for (;;) { | ||
1230 | set_current_state(TASK_INTERRUPTIBLE); | ||
1231 | if (usrdata->len + irqdata->len >= count) | ||
1232 | break; | ||
1233 | |||
1234 | if (signal_pending(current)) | ||
1235 | break; | ||
1236 | |||
1237 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
1238 | break; | ||
1239 | |||
1240 | spin_unlock_irq(&counter->waitq.lock); | ||
1241 | schedule(); | ||
1242 | spin_lock_irq(&counter->waitq.lock); | ||
1243 | } | ||
1244 | __remove_wait_queue(&counter->waitq, &wait); | ||
1245 | __set_current_state(TASK_RUNNING); | ||
1246 | spin_unlock_irq(&counter->waitq.lock); | ||
1247 | |||
1248 | if (usrdata->len + irqdata->len < count && | ||
1249 | counter->state != PERF_COUNTER_STATE_ERROR) | ||
1250 | return -ERESTARTSYS; | ||
1251 | read_pending: | ||
1252 | mutex_lock(&counter->mutex); | ||
1253 | |||
1254 | /* Drain pending data first: */ | ||
1255 | res = perf_copy_usrdata(usrdata, buf, count); | ||
1256 | if (res < 0 || res == count) | ||
1257 | goto out; | ||
1258 | |||
1259 | /* Switch irq buffer: */ | ||
1260 | usrdata = perf_switch_irq_data(counter); | ||
1261 | res2 = perf_copy_usrdata(usrdata, buf + res, count - res); | ||
1262 | if (res2 < 0) { | ||
1263 | if (!res) | ||
1264 | res = -EFAULT; | ||
1265 | } else { | ||
1266 | res += res2; | ||
1267 | } | ||
1268 | out: | ||
1269 | mutex_unlock(&counter->mutex); | ||
1270 | |||
1271 | return res; | ||
1272 | } | ||
1273 | |||
1274 | static ssize_t | ||
1275 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
1276 | { | ||
1277 | struct perf_counter *counter = file->private_data; | ||
1278 | |||
1279 | switch (counter->hw_event.record_type) { | ||
1280 | case PERF_RECORD_SIMPLE: | ||
1281 | return perf_read_hw(counter, buf, count); | ||
1282 | |||
1283 | case PERF_RECORD_IRQ: | ||
1284 | case PERF_RECORD_GROUP: | ||
1285 | return perf_read_irq_data(counter, buf, count, | ||
1286 | file->f_flags & O_NONBLOCK); | ||
1287 | } | ||
1288 | return -EINVAL; | ||
1289 | } | ||
1290 | |||
1291 | static unsigned int perf_poll(struct file *file, poll_table *wait) | ||
1292 | { | ||
1293 | struct perf_counter *counter = file->private_data; | ||
1294 | unsigned int events = 0; | ||
1295 | unsigned long flags; | ||
1296 | |||
1297 | poll_wait(file, &counter->waitq, wait); | ||
1298 | |||
1299 | spin_lock_irqsave(&counter->waitq.lock, flags); | ||
1300 | if (counter->usrdata->len || counter->irqdata->len) | ||
1301 | events |= POLLIN; | ||
1302 | spin_unlock_irqrestore(&counter->waitq.lock, flags); | ||
1303 | |||
1304 | return events; | ||
1305 | } | ||
1306 | |||
1307 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1308 | { | ||
1309 | struct perf_counter *counter = file->private_data; | ||
1310 | int err = 0; | ||
1311 | |||
1312 | switch (cmd) { | ||
1313 | case PERF_COUNTER_IOC_ENABLE: | ||
1314 | perf_counter_enable_family(counter); | ||
1315 | break; | ||
1316 | case PERF_COUNTER_IOC_DISABLE: | ||
1317 | perf_counter_disable_family(counter); | ||
1318 | break; | ||
1319 | default: | ||
1320 | err = -ENOTTY; | ||
1321 | } | ||
1322 | return err; | ||
1323 | } | ||
1324 | |||
1325 | static const struct file_operations perf_fops = { | ||
1326 | .release = perf_release, | ||
1327 | .read = perf_read, | ||
1328 | .poll = perf_poll, | ||
1329 | .unlocked_ioctl = perf_ioctl, | ||
1330 | .compat_ioctl = perf_ioctl, | ||
1331 | }; | ||
1332 | |||
1333 | static int cpu_clock_perf_counter_enable(struct perf_counter *counter) | ||
1334 | { | ||
1335 | int cpu = raw_smp_processor_id(); | ||
1336 | |||
1337 | atomic64_set(&counter->hw.prev_count, cpu_clock(cpu)); | ||
1338 | return 0; | ||
1339 | } | ||
1340 | |||
1341 | static void cpu_clock_perf_counter_update(struct perf_counter *counter) | ||
1342 | { | ||
1343 | int cpu = raw_smp_processor_id(); | ||
1344 | s64 prev; | ||
1345 | u64 now; | ||
1346 | |||
1347 | now = cpu_clock(cpu); | ||
1348 | prev = atomic64_read(&counter->hw.prev_count); | ||
1349 | atomic64_set(&counter->hw.prev_count, now); | ||
1350 | atomic64_add(now - prev, &counter->count); | ||
1351 | } | ||
1352 | |||
1353 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | ||
1354 | { | ||
1355 | cpu_clock_perf_counter_update(counter); | ||
1356 | } | ||
1357 | |||
1358 | static void cpu_clock_perf_counter_read(struct perf_counter *counter) | ||
1359 | { | ||
1360 | cpu_clock_perf_counter_update(counter); | ||
1361 | } | ||
1362 | |||
1363 | static const struct hw_perf_counter_ops perf_ops_cpu_clock = { | ||
1364 | .enable = cpu_clock_perf_counter_enable, | ||
1365 | .disable = cpu_clock_perf_counter_disable, | ||
1366 | .read = cpu_clock_perf_counter_read, | ||
1367 | }; | ||
1368 | |||
1369 | /* | ||
1370 | * Called from within the scheduler: | ||
1371 | */ | ||
1372 | static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update) | ||
1373 | { | ||
1374 | struct task_struct *curr = counter->task; | ||
1375 | u64 delta; | ||
1376 | |||
1377 | delta = __task_delta_exec(curr, update); | ||
1378 | |||
1379 | return curr->se.sum_exec_runtime + delta; | ||
1380 | } | ||
1381 | |||
1382 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) | ||
1383 | { | ||
1384 | u64 prev; | ||
1385 | s64 delta; | ||
1386 | |||
1387 | prev = atomic64_read(&counter->hw.prev_count); | ||
1388 | |||
1389 | atomic64_set(&counter->hw.prev_count, now); | ||
1390 | |||
1391 | delta = now - prev; | ||
1392 | |||
1393 | atomic64_add(delta, &counter->count); | ||
1394 | } | ||
1395 | |||
1396 | static void task_clock_perf_counter_read(struct perf_counter *counter) | ||
1397 | { | ||
1398 | u64 now = task_clock_perf_counter_val(counter, 1); | ||
1399 | |||
1400 | task_clock_perf_counter_update(counter, now); | ||
1401 | } | ||
1402 | |||
1403 | static int task_clock_perf_counter_enable(struct perf_counter *counter) | ||
1404 | { | ||
1405 | if (counter->prev_state <= PERF_COUNTER_STATE_OFF) | ||
1406 | atomic64_set(&counter->hw.prev_count, | ||
1407 | task_clock_perf_counter_val(counter, 0)); | ||
1408 | |||
1409 | return 0; | ||
1410 | } | ||
1411 | |||
1412 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | ||
1413 | { | ||
1414 | u64 now = task_clock_perf_counter_val(counter, 0); | ||
1415 | |||
1416 | task_clock_perf_counter_update(counter, now); | ||
1417 | } | ||
1418 | |||
1419 | static const struct hw_perf_counter_ops perf_ops_task_clock = { | ||
1420 | .enable = task_clock_perf_counter_enable, | ||
1421 | .disable = task_clock_perf_counter_disable, | ||
1422 | .read = task_clock_perf_counter_read, | ||
1423 | }; | ||
1424 | |||
1425 | #ifdef CONFIG_VM_EVENT_COUNTERS | ||
1426 | #define cpu_page_faults() __get_cpu_var(vm_event_states).event[PGFAULT] | ||
1427 | #else | ||
1428 | #define cpu_page_faults() 0 | ||
1429 | #endif | ||
1430 | |||
1431 | static u64 get_page_faults(struct perf_counter *counter) | ||
1432 | { | ||
1433 | struct task_struct *curr = counter->ctx->task; | ||
1434 | |||
1435 | if (curr) | ||
1436 | return curr->maj_flt + curr->min_flt; | ||
1437 | return cpu_page_faults(); | ||
1438 | } | ||
1439 | |||
1440 | static void page_faults_perf_counter_update(struct perf_counter *counter) | ||
1441 | { | ||
1442 | u64 prev, now; | ||
1443 | s64 delta; | ||
1444 | |||
1445 | prev = atomic64_read(&counter->hw.prev_count); | ||
1446 | now = get_page_faults(counter); | ||
1447 | |||
1448 | atomic64_set(&counter->hw.prev_count, now); | ||
1449 | |||
1450 | delta = now - prev; | ||
1451 | |||
1452 | atomic64_add(delta, &counter->count); | ||
1453 | } | ||
1454 | |||
1455 | static void page_faults_perf_counter_read(struct perf_counter *counter) | ||
1456 | { | ||
1457 | page_faults_perf_counter_update(counter); | ||
1458 | } | ||
1459 | |||
1460 | static int page_faults_perf_counter_enable(struct perf_counter *counter) | ||
1461 | { | ||
1462 | if (counter->prev_state <= PERF_COUNTER_STATE_OFF) | ||
1463 | atomic64_set(&counter->hw.prev_count, get_page_faults(counter)); | ||
1464 | return 0; | ||
1465 | } | ||
1466 | |||
1467 | static void page_faults_perf_counter_disable(struct perf_counter *counter) | ||
1468 | { | ||
1469 | page_faults_perf_counter_update(counter); | ||
1470 | } | ||
1471 | |||
1472 | static const struct hw_perf_counter_ops perf_ops_page_faults = { | ||
1473 | .enable = page_faults_perf_counter_enable, | ||
1474 | .disable = page_faults_perf_counter_disable, | ||
1475 | .read = page_faults_perf_counter_read, | ||
1476 | }; | ||
1477 | |||
1478 | static u64 get_context_switches(struct perf_counter *counter) | ||
1479 | { | ||
1480 | struct task_struct *curr = counter->ctx->task; | ||
1481 | |||
1482 | if (curr) | ||
1483 | return curr->nvcsw + curr->nivcsw; | ||
1484 | return cpu_nr_switches(smp_processor_id()); | ||
1485 | } | ||
1486 | |||
1487 | static void context_switches_perf_counter_update(struct perf_counter *counter) | ||
1488 | { | ||
1489 | u64 prev, now; | ||
1490 | s64 delta; | ||
1491 | |||
1492 | prev = atomic64_read(&counter->hw.prev_count); | ||
1493 | now = get_context_switches(counter); | ||
1494 | |||
1495 | atomic64_set(&counter->hw.prev_count, now); | ||
1496 | |||
1497 | delta = now - prev; | ||
1498 | |||
1499 | atomic64_add(delta, &counter->count); | ||
1500 | } | ||
1501 | |||
1502 | static void context_switches_perf_counter_read(struct perf_counter *counter) | ||
1503 | { | ||
1504 | context_switches_perf_counter_update(counter); | ||
1505 | } | ||
1506 | |||
1507 | static int context_switches_perf_counter_enable(struct perf_counter *counter) | ||
1508 | { | ||
1509 | if (counter->prev_state <= PERF_COUNTER_STATE_OFF) | ||
1510 | atomic64_set(&counter->hw.prev_count, | ||
1511 | get_context_switches(counter)); | ||
1512 | return 0; | ||
1513 | } | ||
1514 | |||
1515 | static void context_switches_perf_counter_disable(struct perf_counter *counter) | ||
1516 | { | ||
1517 | context_switches_perf_counter_update(counter); | ||
1518 | } | ||
1519 | |||
1520 | static const struct hw_perf_counter_ops perf_ops_context_switches = { | ||
1521 | .enable = context_switches_perf_counter_enable, | ||
1522 | .disable = context_switches_perf_counter_disable, | ||
1523 | .read = context_switches_perf_counter_read, | ||
1524 | }; | ||
1525 | |||
1526 | static inline u64 get_cpu_migrations(struct perf_counter *counter) | ||
1527 | { | ||
1528 | struct task_struct *curr = counter->ctx->task; | ||
1529 | |||
1530 | if (curr) | ||
1531 | return curr->se.nr_migrations; | ||
1532 | return cpu_nr_migrations(smp_processor_id()); | ||
1533 | } | ||
1534 | |||
1535 | static void cpu_migrations_perf_counter_update(struct perf_counter *counter) | ||
1536 | { | ||
1537 | u64 prev, now; | ||
1538 | s64 delta; | ||
1539 | |||
1540 | prev = atomic64_read(&counter->hw.prev_count); | ||
1541 | now = get_cpu_migrations(counter); | ||
1542 | |||
1543 | atomic64_set(&counter->hw.prev_count, now); | ||
1544 | |||
1545 | delta = now - prev; | ||
1546 | |||
1547 | atomic64_add(delta, &counter->count); | ||
1548 | } | ||
1549 | |||
1550 | static void cpu_migrations_perf_counter_read(struct perf_counter *counter) | ||
1551 | { | ||
1552 | cpu_migrations_perf_counter_update(counter); | ||
1553 | } | ||
1554 | |||
1555 | static int cpu_migrations_perf_counter_enable(struct perf_counter *counter) | ||
1556 | { | ||
1557 | if (counter->prev_state <= PERF_COUNTER_STATE_OFF) | ||
1558 | atomic64_set(&counter->hw.prev_count, | ||
1559 | get_cpu_migrations(counter)); | ||
1560 | return 0; | ||
1561 | } | ||
1562 | |||
1563 | static void cpu_migrations_perf_counter_disable(struct perf_counter *counter) | ||
1564 | { | ||
1565 | cpu_migrations_perf_counter_update(counter); | ||
1566 | } | ||
1567 | |||
1568 | static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { | ||
1569 | .enable = cpu_migrations_perf_counter_enable, | ||
1570 | .disable = cpu_migrations_perf_counter_disable, | ||
1571 | .read = cpu_migrations_perf_counter_read, | ||
1572 | }; | ||
1573 | |||
1574 | static const struct hw_perf_counter_ops * | ||
1575 | sw_perf_counter_init(struct perf_counter *counter) | ||
1576 | { | ||
1577 | const struct hw_perf_counter_ops *hw_ops = NULL; | ||
1578 | |||
1579 | /* | ||
1580 | * Software counters (currently) can't in general distinguish | ||
1581 | * between user, kernel and hypervisor events. | ||
1582 | * However, context switches and cpu migrations are considered | ||
1583 | * to be kernel events, and page faults are never hypervisor | ||
1584 | * events. | ||
1585 | */ | ||
1586 | switch (counter->hw_event.type) { | ||
1587 | case PERF_COUNT_CPU_CLOCK: | ||
1588 | if (!(counter->hw_event.exclude_user || | ||
1589 | counter->hw_event.exclude_kernel || | ||
1590 | counter->hw_event.exclude_hv)) | ||
1591 | hw_ops = &perf_ops_cpu_clock; | ||
1592 | break; | ||
1593 | case PERF_COUNT_TASK_CLOCK: | ||
1594 | if (counter->hw_event.exclude_user || | ||
1595 | counter->hw_event.exclude_kernel || | ||
1596 | counter->hw_event.exclude_hv) | ||
1597 | break; | ||
1598 | /* | ||
1599 | * If the user instantiates this as a per-cpu counter, | ||
1600 | * use the cpu_clock counter instead. | ||
1601 | */ | ||
1602 | if (counter->ctx->task) | ||
1603 | hw_ops = &perf_ops_task_clock; | ||
1604 | else | ||
1605 | hw_ops = &perf_ops_cpu_clock; | ||
1606 | break; | ||
1607 | case PERF_COUNT_PAGE_FAULTS: | ||
1608 | if (!(counter->hw_event.exclude_user || | ||
1609 | counter->hw_event.exclude_kernel)) | ||
1610 | hw_ops = &perf_ops_page_faults; | ||
1611 | break; | ||
1612 | case PERF_COUNT_CONTEXT_SWITCHES: | ||
1613 | if (!counter->hw_event.exclude_kernel) | ||
1614 | hw_ops = &perf_ops_context_switches; | ||
1615 | break; | ||
1616 | case PERF_COUNT_CPU_MIGRATIONS: | ||
1617 | if (!counter->hw_event.exclude_kernel) | ||
1618 | hw_ops = &perf_ops_cpu_migrations; | ||
1619 | break; | ||
1620 | default: | ||
1621 | break; | ||
1622 | } | ||
1623 | return hw_ops; | ||
1624 | } | ||
1625 | |||
1626 | /* | ||
1627 | * Allocate and initialize a counter structure | ||
1628 | */ | ||
1629 | static struct perf_counter * | ||
1630 | perf_counter_alloc(struct perf_counter_hw_event *hw_event, | ||
1631 | int cpu, | ||
1632 | struct perf_counter_context *ctx, | ||
1633 | struct perf_counter *group_leader, | ||
1634 | gfp_t gfpflags) | ||
1635 | { | ||
1636 | const struct hw_perf_counter_ops *hw_ops; | ||
1637 | struct perf_counter *counter; | ||
1638 | |||
1639 | counter = kzalloc(sizeof(*counter), gfpflags); | ||
1640 | if (!counter) | ||
1641 | return NULL; | ||
1642 | |||
1643 | /* | ||
1644 | * Single counters are their own group leaders, with an | ||
1645 | * empty sibling list: | ||
1646 | */ | ||
1647 | if (!group_leader) | ||
1648 | group_leader = counter; | ||
1649 | |||
1650 | mutex_init(&counter->mutex); | ||
1651 | INIT_LIST_HEAD(&counter->list_entry); | ||
1652 | INIT_LIST_HEAD(&counter->sibling_list); | ||
1653 | init_waitqueue_head(&counter->waitq); | ||
1654 | |||
1655 | INIT_LIST_HEAD(&counter->child_list); | ||
1656 | |||
1657 | counter->irqdata = &counter->data[0]; | ||
1658 | counter->usrdata = &counter->data[1]; | ||
1659 | counter->cpu = cpu; | ||
1660 | counter->hw_event = *hw_event; | ||
1661 | counter->wakeup_pending = 0; | ||
1662 | counter->group_leader = group_leader; | ||
1663 | counter->hw_ops = NULL; | ||
1664 | counter->ctx = ctx; | ||
1665 | |||
1666 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
1667 | if (hw_event->disabled) | ||
1668 | counter->state = PERF_COUNTER_STATE_OFF; | ||
1669 | |||
1670 | hw_ops = NULL; | ||
1671 | if (!hw_event->raw && hw_event->type < 0) | ||
1672 | hw_ops = sw_perf_counter_init(counter); | ||
1673 | else | ||
1674 | hw_ops = hw_perf_counter_init(counter); | ||
1675 | |||
1676 | if (!hw_ops) { | ||
1677 | kfree(counter); | ||
1678 | return NULL; | ||
1679 | } | ||
1680 | counter->hw_ops = hw_ops; | ||
1681 | |||
1682 | return counter; | ||
1683 | } | ||
1684 | |||
1685 | /** | ||
1686 | * sys_perf_counter_open - open a performance counter, associate it to a task/cpu | ||
1687 | * | ||
1688 | * @hw_event_uptr: event type attributes for monitoring/sampling | ||
1689 | * @pid: target pid | ||
1690 | * @cpu: target cpu | ||
1691 | * @group_fd: group leader counter fd | ||
1692 | */ | ||
1693 | SYSCALL_DEFINE5(perf_counter_open, | ||
1694 | const struct perf_counter_hw_event __user *, hw_event_uptr, | ||
1695 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | ||
1696 | { | ||
1697 | struct perf_counter *counter, *group_leader; | ||
1698 | struct perf_counter_hw_event hw_event; | ||
1699 | struct perf_counter_context *ctx; | ||
1700 | struct file *counter_file = NULL; | ||
1701 | struct file *group_file = NULL; | ||
1702 | int fput_needed = 0; | ||
1703 | int fput_needed2 = 0; | ||
1704 | int ret; | ||
1705 | |||
1706 | /* for future expandability... */ | ||
1707 | if (flags) | ||
1708 | return -EINVAL; | ||
1709 | |||
1710 | if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0) | ||
1711 | return -EFAULT; | ||
1712 | |||
1713 | /* | ||
1714 | * Get the target context (task or percpu): | ||
1715 | */ | ||
1716 | ctx = find_get_context(pid, cpu); | ||
1717 | if (IS_ERR(ctx)) | ||
1718 | return PTR_ERR(ctx); | ||
1719 | |||
1720 | /* | ||
1721 | * Look up the group leader (we will attach this counter to it): | ||
1722 | */ | ||
1723 | group_leader = NULL; | ||
1724 | if (group_fd != -1) { | ||
1725 | ret = -EINVAL; | ||
1726 | group_file = fget_light(group_fd, &fput_needed); | ||
1727 | if (!group_file) | ||
1728 | goto err_put_context; | ||
1729 | if (group_file->f_op != &perf_fops) | ||
1730 | goto err_put_context; | ||
1731 | |||
1732 | group_leader = group_file->private_data; | ||
1733 | /* | ||
1734 | * Do not allow a recursive hierarchy (this new sibling | ||
1735 | * becoming part of another group-sibling): | ||
1736 | */ | ||
1737 | if (group_leader->group_leader != group_leader) | ||
1738 | goto err_put_context; | ||
1739 | /* | ||
1740 | * Do not allow to attach to a group in a different | ||
1741 | * task or CPU context: | ||
1742 | */ | ||
1743 | if (group_leader->ctx != ctx) | ||
1744 | goto err_put_context; | ||
1745 | /* | ||
1746 | * Only a group leader can be exclusive or pinned | ||
1747 | */ | ||
1748 | if (hw_event.exclusive || hw_event.pinned) | ||
1749 | goto err_put_context; | ||
1750 | } | ||
1751 | |||
1752 | ret = -EINVAL; | ||
1753 | counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader, | ||
1754 | GFP_KERNEL); | ||
1755 | if (!counter) | ||
1756 | goto err_put_context; | ||
1757 | |||
1758 | ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); | ||
1759 | if (ret < 0) | ||
1760 | goto err_free_put_context; | ||
1761 | |||
1762 | counter_file = fget_light(ret, &fput_needed2); | ||
1763 | if (!counter_file) | ||
1764 | goto err_free_put_context; | ||
1765 | |||
1766 | counter->filp = counter_file; | ||
1767 | mutex_lock(&ctx->mutex); | ||
1768 | perf_install_in_context(ctx, counter, cpu); | ||
1769 | mutex_unlock(&ctx->mutex); | ||
1770 | |||
1771 | fput_light(counter_file, fput_needed2); | ||
1772 | |||
1773 | out_fput: | ||
1774 | fput_light(group_file, fput_needed); | ||
1775 | |||
1776 | return ret; | ||
1777 | |||
1778 | err_free_put_context: | ||
1779 | kfree(counter); | ||
1780 | |||
1781 | err_put_context: | ||
1782 | put_context(ctx); | ||
1783 | |||
1784 | goto out_fput; | ||
1785 | } | ||
1786 | |||
1787 | /* | ||
1788 | * Initialize the perf_counter context in a task_struct: | ||
1789 | */ | ||
1790 | static void | ||
1791 | __perf_counter_init_context(struct perf_counter_context *ctx, | ||
1792 | struct task_struct *task) | ||
1793 | { | ||
1794 | memset(ctx, 0, sizeof(*ctx)); | ||
1795 | spin_lock_init(&ctx->lock); | ||
1796 | mutex_init(&ctx->mutex); | ||
1797 | INIT_LIST_HEAD(&ctx->counter_list); | ||
1798 | ctx->task = task; | ||
1799 | } | ||
1800 | |||
1801 | /* | ||
1802 | * inherit a counter from parent task to child task: | ||
1803 | */ | ||
1804 | static struct perf_counter * | ||
1805 | inherit_counter(struct perf_counter *parent_counter, | ||
1806 | struct task_struct *parent, | ||
1807 | struct perf_counter_context *parent_ctx, | ||
1808 | struct task_struct *child, | ||
1809 | struct perf_counter *group_leader, | ||
1810 | struct perf_counter_context *child_ctx) | ||
1811 | { | ||
1812 | struct perf_counter *child_counter; | ||
1813 | |||
1814 | /* | ||
1815 | * Instead of creating recursive hierarchies of counters, | ||
1816 | * we link inherited counters back to the original parent, | ||
1817 | * which has a filp for sure, which we use as the reference | ||
1818 | * count: | ||
1819 | */ | ||
1820 | if (parent_counter->parent) | ||
1821 | parent_counter = parent_counter->parent; | ||
1822 | |||
1823 | child_counter = perf_counter_alloc(&parent_counter->hw_event, | ||
1824 | parent_counter->cpu, child_ctx, | ||
1825 | group_leader, GFP_KERNEL); | ||
1826 | if (!child_counter) | ||
1827 | return NULL; | ||
1828 | |||
1829 | /* | ||
1830 | * Link it up in the child's context: | ||
1831 | */ | ||
1832 | child_counter->task = child; | ||
1833 | list_add_counter(child_counter, child_ctx); | ||
1834 | child_ctx->nr_counters++; | ||
1835 | |||
1836 | child_counter->parent = parent_counter; | ||
1837 | /* | ||
1838 | * inherit into child's child as well: | ||
1839 | */ | ||
1840 | child_counter->hw_event.inherit = 1; | ||
1841 | |||
1842 | /* | ||
1843 | * Get a reference to the parent filp - we will fput it | ||
1844 | * when the child counter exits. This is safe to do because | ||
1845 | * we are in the parent and we know that the filp still | ||
1846 | * exists and has a nonzero count: | ||
1847 | */ | ||
1848 | atomic_long_inc(&parent_counter->filp->f_count); | ||
1849 | |||
1850 | /* | ||
1851 | * Link this into the parent counter's child list | ||
1852 | */ | ||
1853 | mutex_lock(&parent_counter->mutex); | ||
1854 | list_add_tail(&child_counter->child_list, &parent_counter->child_list); | ||
1855 | |||
1856 | /* | ||
1857 | * Make the child state follow the state of the parent counter, | ||
1858 | * not its hw_event.disabled bit. We hold the parent's mutex, | ||
1859 | * so we won't race with perf_counter_{en,dis}able_family. | ||
1860 | */ | ||
1861 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
1862 | child_counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
1863 | else | ||
1864 | child_counter->state = PERF_COUNTER_STATE_OFF; | ||
1865 | |||
1866 | mutex_unlock(&parent_counter->mutex); | ||
1867 | |||
1868 | return child_counter; | ||
1869 | } | ||
1870 | |||
1871 | static int inherit_group(struct perf_counter *parent_counter, | ||
1872 | struct task_struct *parent, | ||
1873 | struct perf_counter_context *parent_ctx, | ||
1874 | struct task_struct *child, | ||
1875 | struct perf_counter_context *child_ctx) | ||
1876 | { | ||
1877 | struct perf_counter *leader; | ||
1878 | struct perf_counter *sub; | ||
1879 | |||
1880 | leader = inherit_counter(parent_counter, parent, parent_ctx, | ||
1881 | child, NULL, child_ctx); | ||
1882 | if (!leader) | ||
1883 | return -ENOMEM; | ||
1884 | list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { | ||
1885 | if (!inherit_counter(sub, parent, parent_ctx, | ||
1886 | child, leader, child_ctx)) | ||
1887 | return -ENOMEM; | ||
1888 | } | ||
1889 | return 0; | ||
1890 | } | ||
1891 | |||
1892 | static void sync_child_counter(struct perf_counter *child_counter, | ||
1893 | struct perf_counter *parent_counter) | ||
1894 | { | ||
1895 | u64 parent_val, child_val; | ||
1896 | |||
1897 | parent_val = atomic64_read(&parent_counter->count); | ||
1898 | child_val = atomic64_read(&child_counter->count); | ||
1899 | |||
1900 | /* | ||
1901 | * Add back the child's count to the parent's count: | ||
1902 | */ | ||
1903 | atomic64_add(child_val, &parent_counter->count); | ||
1904 | |||
1905 | /* | ||
1906 | * Remove this counter from the parent's list | ||
1907 | */ | ||
1908 | mutex_lock(&parent_counter->mutex); | ||
1909 | list_del_init(&child_counter->child_list); | ||
1910 | mutex_unlock(&parent_counter->mutex); | ||
1911 | |||
1912 | /* | ||
1913 | * Release the parent counter, if this was the last | ||
1914 | * reference to it. | ||
1915 | */ | ||
1916 | fput(parent_counter->filp); | ||
1917 | } | ||
1918 | |||
1919 | static void | ||
1920 | __perf_counter_exit_task(struct task_struct *child, | ||
1921 | struct perf_counter *child_counter, | ||
1922 | struct perf_counter_context *child_ctx) | ||
1923 | { | ||
1924 | struct perf_counter *parent_counter; | ||
1925 | struct perf_counter *sub, *tmp; | ||
1926 | |||
1927 | /* | ||
1928 | * If we do not self-reap then we have to wait for the | ||
1929 | * child task to unschedule (it will happen for sure), | ||
1930 | * so that its counter is at its final count. (This | ||
1931 | * condition triggers rarely - child tasks usually get | ||
1932 | * off their CPU before the parent has a chance to | ||
1933 | * get this far into the reaping action) | ||
1934 | */ | ||
1935 | if (child != current) { | ||
1936 | wait_task_inactive(child, 0); | ||
1937 | list_del_init(&child_counter->list_entry); | ||
1938 | } else { | ||
1939 | struct perf_cpu_context *cpuctx; | ||
1940 | unsigned long flags; | ||
1941 | u64 perf_flags; | ||
1942 | |||
1943 | /* | ||
1944 | * Disable and unlink this counter. | ||
1945 | * | ||
1946 | * Be careful about zapping the list - IRQ/NMI context | ||
1947 | * could still be processing it: | ||
1948 | */ | ||
1949 | curr_rq_lock_irq_save(&flags); | ||
1950 | perf_flags = hw_perf_save_disable(); | ||
1951 | |||
1952 | cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1953 | |||
1954 | group_sched_out(child_counter, cpuctx, child_ctx); | ||
1955 | |||
1956 | list_del_init(&child_counter->list_entry); | ||
1957 | |||
1958 | child_ctx->nr_counters--; | ||
1959 | |||
1960 | hw_perf_restore(perf_flags); | ||
1961 | curr_rq_unlock_irq_restore(&flags); | ||
1962 | } | ||
1963 | |||
1964 | parent_counter = child_counter->parent; | ||
1965 | /* | ||
1966 | * It can happen that parent exits first, and has counters | ||
1967 | * that are still around due to the child reference. These | ||
1968 | * counters need to be zapped - but otherwise linger. | ||
1969 | */ | ||
1970 | if (parent_counter) { | ||
1971 | sync_child_counter(child_counter, parent_counter); | ||
1972 | list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list, | ||
1973 | list_entry) { | ||
1974 | if (sub->parent) { | ||
1975 | sync_child_counter(sub, sub->parent); | ||
1976 | kfree(sub); | ||
1977 | } | ||
1978 | } | ||
1979 | kfree(child_counter); | ||
1980 | } | ||
1981 | } | ||
1982 | |||
1983 | /* | ||
1984 | * When a child task exits, feed back counter values to parent counters. | ||
1985 | * | ||
1986 | * Note: we may be running in child context, but the PID is not hashed | ||
1987 | * anymore so new counters will not be added. | ||
1988 | */ | ||
1989 | void perf_counter_exit_task(struct task_struct *child) | ||
1990 | { | ||
1991 | struct perf_counter *child_counter, *tmp; | ||
1992 | struct perf_counter_context *child_ctx; | ||
1993 | |||
1994 | child_ctx = &child->perf_counter_ctx; | ||
1995 | |||
1996 | if (likely(!child_ctx->nr_counters)) | ||
1997 | return; | ||
1998 | |||
1999 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | ||
2000 | list_entry) | ||
2001 | __perf_counter_exit_task(child, child_counter, child_ctx); | ||
2002 | } | ||
2003 | |||
2004 | /* | ||
2005 | * Initialize the perf_counter context in task_struct | ||
2006 | */ | ||
2007 | void perf_counter_init_task(struct task_struct *child) | ||
2008 | { | ||
2009 | struct perf_counter_context *child_ctx, *parent_ctx; | ||
2010 | struct perf_counter *counter; | ||
2011 | struct task_struct *parent = current; | ||
2012 | |||
2013 | child_ctx = &child->perf_counter_ctx; | ||
2014 | parent_ctx = &parent->perf_counter_ctx; | ||
2015 | |||
2016 | __perf_counter_init_context(child_ctx, child); | ||
2017 | |||
2018 | /* | ||
2019 | * This is executed from the parent task context, so inherit | ||
2020 | * counters that have been marked for cloning: | ||
2021 | */ | ||
2022 | |||
2023 | if (likely(!parent_ctx->nr_counters)) | ||
2024 | return; | ||
2025 | |||
2026 | /* | ||
2027 | * Lock the parent list. No need to lock the child - not PID | ||
2028 | * hashed yet and not running, so nobody can access it. | ||
2029 | */ | ||
2030 | mutex_lock(&parent_ctx->mutex); | ||
2031 | |||
2032 | /* | ||
2033 | * We dont have to disable NMIs - we are only looking at | ||
2034 | * the list, not manipulating it: | ||
2035 | */ | ||
2036 | list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) { | ||
2037 | if (!counter->hw_event.inherit) | ||
2038 | continue; | ||
2039 | |||
2040 | if (inherit_group(counter, parent, | ||
2041 | parent_ctx, child, child_ctx)) | ||
2042 | break; | ||
2043 | } | ||
2044 | |||
2045 | mutex_unlock(&parent_ctx->mutex); | ||
2046 | } | ||
2047 | |||
2048 | static void __cpuinit perf_counter_init_cpu(int cpu) | ||
2049 | { | ||
2050 | struct perf_cpu_context *cpuctx; | ||
2051 | |||
2052 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
2053 | __perf_counter_init_context(&cpuctx->ctx, NULL); | ||
2054 | |||
2055 | mutex_lock(&perf_resource_mutex); | ||
2056 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; | ||
2057 | mutex_unlock(&perf_resource_mutex); | ||
2058 | |||
2059 | hw_perf_counter_setup(cpu); | ||
2060 | } | ||
2061 | |||
2062 | #ifdef CONFIG_HOTPLUG_CPU | ||
2063 | static void __perf_counter_exit_cpu(void *info) | ||
2064 | { | ||
2065 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
2066 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
2067 | struct perf_counter *counter, *tmp; | ||
2068 | |||
2069 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) | ||
2070 | __perf_counter_remove_from_context(counter); | ||
2071 | } | ||
2072 | static void perf_counter_exit_cpu(int cpu) | ||
2073 | { | ||
2074 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
2075 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
2076 | |||
2077 | mutex_lock(&ctx->mutex); | ||
2078 | smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); | ||
2079 | mutex_unlock(&ctx->mutex); | ||
2080 | } | ||
2081 | #else | ||
2082 | static inline void perf_counter_exit_cpu(int cpu) { } | ||
2083 | #endif | ||
2084 | |||
2085 | static int __cpuinit | ||
2086 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | ||
2087 | { | ||
2088 | unsigned int cpu = (long)hcpu; | ||
2089 | |||
2090 | switch (action) { | ||
2091 | |||
2092 | case CPU_UP_PREPARE: | ||
2093 | case CPU_UP_PREPARE_FROZEN: | ||
2094 | perf_counter_init_cpu(cpu); | ||
2095 | break; | ||
2096 | |||
2097 | case CPU_DOWN_PREPARE: | ||
2098 | case CPU_DOWN_PREPARE_FROZEN: | ||
2099 | perf_counter_exit_cpu(cpu); | ||
2100 | break; | ||
2101 | |||
2102 | default: | ||
2103 | break; | ||
2104 | } | ||
2105 | |||
2106 | return NOTIFY_OK; | ||
2107 | } | ||
2108 | |||
2109 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | ||
2110 | .notifier_call = perf_cpu_notify, | ||
2111 | }; | ||
2112 | |||
2113 | static int __init perf_counter_init(void) | ||
2114 | { | ||
2115 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | ||
2116 | (void *)(long)smp_processor_id()); | ||
2117 | register_cpu_notifier(&perf_cpu_nb); | ||
2118 | |||
2119 | return 0; | ||
2120 | } | ||
2121 | early_initcall(perf_counter_init); | ||
2122 | |||
2123 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | ||
2124 | { | ||
2125 | return sprintf(buf, "%d\n", perf_reserved_percpu); | ||
2126 | } | ||
2127 | |||
2128 | static ssize_t | ||
2129 | perf_set_reserve_percpu(struct sysdev_class *class, | ||
2130 | const char *buf, | ||
2131 | size_t count) | ||
2132 | { | ||
2133 | struct perf_cpu_context *cpuctx; | ||
2134 | unsigned long val; | ||
2135 | int err, cpu, mpt; | ||
2136 | |||
2137 | err = strict_strtoul(buf, 10, &val); | ||
2138 | if (err) | ||
2139 | return err; | ||
2140 | if (val > perf_max_counters) | ||
2141 | return -EINVAL; | ||
2142 | |||
2143 | mutex_lock(&perf_resource_mutex); | ||
2144 | perf_reserved_percpu = val; | ||
2145 | for_each_online_cpu(cpu) { | ||
2146 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
2147 | spin_lock_irq(&cpuctx->ctx.lock); | ||
2148 | mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, | ||
2149 | perf_max_counters - perf_reserved_percpu); | ||
2150 | cpuctx->max_pertask = mpt; | ||
2151 | spin_unlock_irq(&cpuctx->ctx.lock); | ||
2152 | } | ||
2153 | mutex_unlock(&perf_resource_mutex); | ||
2154 | |||
2155 | return count; | ||
2156 | } | ||
2157 | |||
2158 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | ||
2159 | { | ||
2160 | return sprintf(buf, "%d\n", perf_overcommit); | ||
2161 | } | ||
2162 | |||
2163 | static ssize_t | ||
2164 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | ||
2165 | { | ||
2166 | unsigned long val; | ||
2167 | int err; | ||
2168 | |||
2169 | err = strict_strtoul(buf, 10, &val); | ||
2170 | if (err) | ||
2171 | return err; | ||
2172 | if (val > 1) | ||
2173 | return -EINVAL; | ||
2174 | |||
2175 | mutex_lock(&perf_resource_mutex); | ||
2176 | perf_overcommit = val; | ||
2177 | mutex_unlock(&perf_resource_mutex); | ||
2178 | |||
2179 | return count; | ||
2180 | } | ||
2181 | |||
2182 | static SYSDEV_CLASS_ATTR( | ||
2183 | reserve_percpu, | ||
2184 | 0644, | ||
2185 | perf_show_reserve_percpu, | ||
2186 | perf_set_reserve_percpu | ||
2187 | ); | ||
2188 | |||
2189 | static SYSDEV_CLASS_ATTR( | ||
2190 | overcommit, | ||
2191 | 0644, | ||
2192 | perf_show_overcommit, | ||
2193 | perf_set_overcommit | ||
2194 | ); | ||
2195 | |||
2196 | static struct attribute *perfclass_attrs[] = { | ||
2197 | &attr_reserve_percpu.attr, | ||
2198 | &attr_overcommit.attr, | ||
2199 | NULL | ||
2200 | }; | ||
2201 | |||
2202 | static struct attribute_group perfclass_attr_group = { | ||
2203 | .attrs = perfclass_attrs, | ||
2204 | .name = "perf_counters", | ||
2205 | }; | ||
2206 | |||
2207 | static int __init perf_counter_sysfs_init(void) | ||
2208 | { | ||
2209 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | ||
2210 | &perfclass_attr_group); | ||
2211 | } | ||
2212 | device_initcall(perf_counter_sysfs_init); | ||
diff --git a/kernel/sched.c b/kernel/sched.c index bec249885e17..39e708602169 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -577,6 +577,7 @@ struct rq { | |||
577 | struct load_weight load; | 577 | struct load_weight load; |
578 | unsigned long nr_load_updates; | 578 | unsigned long nr_load_updates; |
579 | u64 nr_switches; | 579 | u64 nr_switches; |
580 | u64 nr_migrations_in; | ||
580 | 581 | ||
581 | struct cfs_rq cfs; | 582 | struct cfs_rq cfs; |
582 | struct rt_rq rt; | 583 | struct rt_rq rt; |
@@ -685,7 +686,7 @@ static inline int cpu_of(struct rq *rq) | |||
685 | #define task_rq(p) cpu_rq(task_cpu(p)) | 686 | #define task_rq(p) cpu_rq(task_cpu(p)) |
686 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 687 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
687 | 688 | ||
688 | static inline void update_rq_clock(struct rq *rq) | 689 | inline void update_rq_clock(struct rq *rq) |
689 | { | 690 | { |
690 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 691 | rq->clock = sched_clock_cpu(cpu_of(rq)); |
691 | } | 692 | } |
@@ -996,6 +997,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
996 | } | 997 | } |
997 | } | 998 | } |
998 | 999 | ||
1000 | void curr_rq_lock_irq_save(unsigned long *flags) | ||
1001 | __acquires(rq->lock) | ||
1002 | { | ||
1003 | struct rq *rq; | ||
1004 | |||
1005 | local_irq_save(*flags); | ||
1006 | rq = cpu_rq(smp_processor_id()); | ||
1007 | spin_lock(&rq->lock); | ||
1008 | } | ||
1009 | |||
1010 | void curr_rq_unlock_irq_restore(unsigned long *flags) | ||
1011 | __releases(rq->lock) | ||
1012 | { | ||
1013 | struct rq *rq; | ||
1014 | |||
1015 | rq = cpu_rq(smp_processor_id()); | ||
1016 | spin_unlock(&rq->lock); | ||
1017 | local_irq_restore(*flags); | ||
1018 | } | ||
1019 | |||
999 | void task_rq_unlock_wait(struct task_struct *p) | 1020 | void task_rq_unlock_wait(struct task_struct *p) |
1000 | { | 1021 | { |
1001 | struct rq *rq = task_rq(p); | 1022 | struct rq *rq = task_rq(p); |
@@ -1947,12 +1968,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1947 | p->se.sleep_start -= clock_offset; | 1968 | p->se.sleep_start -= clock_offset; |
1948 | if (p->se.block_start) | 1969 | if (p->se.block_start) |
1949 | p->se.block_start -= clock_offset; | 1970 | p->se.block_start -= clock_offset; |
1971 | #endif | ||
1950 | if (old_cpu != new_cpu) { | 1972 | if (old_cpu != new_cpu) { |
1951 | schedstat_inc(p, se.nr_migrations); | 1973 | p->se.nr_migrations++; |
1974 | new_rq->nr_migrations_in++; | ||
1975 | #ifdef CONFIG_SCHEDSTATS | ||
1952 | if (task_hot(p, old_rq->clock, NULL)) | 1976 | if (task_hot(p, old_rq->clock, NULL)) |
1953 | schedstat_inc(p, se.nr_forced2_migrations); | 1977 | schedstat_inc(p, se.nr_forced2_migrations); |
1954 | } | ||
1955 | #endif | 1978 | #endif |
1979 | } | ||
1956 | p->se.vruntime -= old_cfsrq->min_vruntime - | 1980 | p->se.vruntime -= old_cfsrq->min_vruntime - |
1957 | new_cfsrq->min_vruntime; | 1981 | new_cfsrq->min_vruntime; |
1958 | 1982 | ||
@@ -2304,6 +2328,27 @@ static int sched_balance_self(int cpu, int flag) | |||
2304 | 2328 | ||
2305 | #endif /* CONFIG_SMP */ | 2329 | #endif /* CONFIG_SMP */ |
2306 | 2330 | ||
2331 | /** | ||
2332 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2333 | * @p: the task to evaluate | ||
2334 | * @func: the function to be called | ||
2335 | * @info: the function call argument | ||
2336 | * | ||
2337 | * Calls the function @func when the task is currently running. This might | ||
2338 | * be on the current CPU, which just calls the function directly | ||
2339 | */ | ||
2340 | void task_oncpu_function_call(struct task_struct *p, | ||
2341 | void (*func) (void *info), void *info) | ||
2342 | { | ||
2343 | int cpu; | ||
2344 | |||
2345 | preempt_disable(); | ||
2346 | cpu = task_cpu(p); | ||
2347 | if (task_curr(p)) | ||
2348 | smp_call_function_single(cpu, func, info, 1); | ||
2349 | preempt_enable(); | ||
2350 | } | ||
2351 | |||
2307 | /*** | 2352 | /*** |
2308 | * try_to_wake_up - wake up a thread | 2353 | * try_to_wake_up - wake up a thread |
2309 | * @p: the to-be-woken-up thread | 2354 | * @p: the to-be-woken-up thread |
@@ -2460,6 +2505,7 @@ static void __sched_fork(struct task_struct *p) | |||
2460 | p->se.exec_start = 0; | 2505 | p->se.exec_start = 0; |
2461 | p->se.sum_exec_runtime = 0; | 2506 | p->se.sum_exec_runtime = 0; |
2462 | p->se.prev_sum_exec_runtime = 0; | 2507 | p->se.prev_sum_exec_runtime = 0; |
2508 | p->se.nr_migrations = 0; | ||
2463 | p->se.last_wakeup = 0; | 2509 | p->se.last_wakeup = 0; |
2464 | p->se.avg_overlap = 0; | 2510 | p->se.avg_overlap = 0; |
2465 | p->se.start_runtime = 0; | 2511 | p->se.start_runtime = 0; |
@@ -2690,6 +2736,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2690 | */ | 2736 | */ |
2691 | prev_state = prev->state; | 2737 | prev_state = prev->state; |
2692 | finish_arch_switch(prev); | 2738 | finish_arch_switch(prev); |
2739 | perf_counter_task_sched_in(current, cpu_of(rq)); | ||
2693 | finish_lock_switch(rq, prev); | 2740 | finish_lock_switch(rq, prev); |
2694 | #ifdef CONFIG_SMP | 2741 | #ifdef CONFIG_SMP |
2695 | if (post_schedule) | 2742 | if (post_schedule) |
@@ -2852,6 +2899,21 @@ unsigned long nr_active(void) | |||
2852 | } | 2899 | } |
2853 | 2900 | ||
2854 | /* | 2901 | /* |
2902 | * Externally visible per-cpu scheduler statistics: | ||
2903 | * cpu_nr_switches(cpu) - number of context switches on that cpu | ||
2904 | * cpu_nr_migrations(cpu) - number of migrations into that cpu | ||
2905 | */ | ||
2906 | u64 cpu_nr_switches(int cpu) | ||
2907 | { | ||
2908 | return cpu_rq(cpu)->nr_switches; | ||
2909 | } | ||
2910 | |||
2911 | u64 cpu_nr_migrations(int cpu) | ||
2912 | { | ||
2913 | return cpu_rq(cpu)->nr_migrations_in; | ||
2914 | } | ||
2915 | |||
2916 | /* | ||
2855 | * Update rq->cpu_load[] statistics. This function is usually called every | 2917 | * Update rq->cpu_load[] statistics. This function is usually called every |
2856 | * scheduler tick (TICK_NSEC). | 2918 | * scheduler tick (TICK_NSEC). |
2857 | */ | 2919 | */ |
@@ -4506,6 +4568,29 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
4506 | * Return any ns on the sched_clock that have not yet been banked in | 4568 | * Return any ns on the sched_clock that have not yet been banked in |
4507 | * @p in case that task is currently running. | 4569 | * @p in case that task is currently running. |
4508 | */ | 4570 | */ |
4571 | unsigned long long __task_delta_exec(struct task_struct *p, int update) | ||
4572 | { | ||
4573 | s64 delta_exec; | ||
4574 | struct rq *rq; | ||
4575 | |||
4576 | rq = task_rq(p); | ||
4577 | WARN_ON_ONCE(!runqueue_is_locked()); | ||
4578 | WARN_ON_ONCE(!task_current(rq, p)); | ||
4579 | |||
4580 | if (update) | ||
4581 | update_rq_clock(rq); | ||
4582 | |||
4583 | delta_exec = rq->clock - p->se.exec_start; | ||
4584 | |||
4585 | WARN_ON_ONCE(delta_exec < 0); | ||
4586 | |||
4587 | return delta_exec; | ||
4588 | } | ||
4589 | |||
4590 | /* | ||
4591 | * Return any ns on the sched_clock that have not yet been banked in | ||
4592 | * @p in case that task is currently running. | ||
4593 | */ | ||
4509 | unsigned long long task_delta_exec(struct task_struct *p) | 4594 | unsigned long long task_delta_exec(struct task_struct *p) |
4510 | { | 4595 | { |
4511 | unsigned long flags; | 4596 | unsigned long flags; |
@@ -4765,6 +4850,7 @@ void scheduler_tick(void) | |||
4765 | update_rq_clock(rq); | 4850 | update_rq_clock(rq); |
4766 | update_cpu_load(rq); | 4851 | update_cpu_load(rq); |
4767 | curr->sched_class->task_tick(rq, curr, 0); | 4852 | curr->sched_class->task_tick(rq, curr, 0); |
4853 | perf_counter_task_tick(curr, cpu); | ||
4768 | spin_unlock(&rq->lock); | 4854 | spin_unlock(&rq->lock); |
4769 | 4855 | ||
4770 | #ifdef CONFIG_SMP | 4856 | #ifdef CONFIG_SMP |
@@ -4980,6 +5066,7 @@ need_resched_nonpreemptible: | |||
4980 | 5066 | ||
4981 | if (likely(prev != next)) { | 5067 | if (likely(prev != next)) { |
4982 | sched_info_switch(prev, next); | 5068 | sched_info_switch(prev, next); |
5069 | perf_counter_task_sched_out(prev, cpu); | ||
4983 | 5070 | ||
4984 | rq->nr_switches++; | 5071 | rq->nr_switches++; |
4985 | rq->curr = next; | 5072 | rq->curr = next; |
diff --git a/kernel/sys.c b/kernel/sys.c index 51dbb55604e8..14c4c5613118 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/prctl.h> | 14 | #include <linux/prctl.h> |
15 | #include <linux/highuid.h> | 15 | #include <linux/highuid.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/perf_counter.h> | ||
17 | #include <linux/resource.h> | 18 | #include <linux/resource.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
19 | #include <linux/kexec.h> | 20 | #include <linux/kexec.h> |
@@ -1799,6 +1800,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1799 | case PR_SET_TSC: | 1800 | case PR_SET_TSC: |
1800 | error = SET_TSC_CTL(arg2); | 1801 | error = SET_TSC_CTL(arg2); |
1801 | break; | 1802 | break; |
1803 | case PR_TASK_PERF_COUNTERS_DISABLE: | ||
1804 | error = perf_counter_task_disable(); | ||
1805 | break; | ||
1806 | case PR_TASK_PERF_COUNTERS_ENABLE: | ||
1807 | error = perf_counter_task_enable(); | ||
1808 | break; | ||
1802 | case PR_GET_TIMERSLACK: | 1809 | case PR_GET_TIMERSLACK: |
1803 | error = current->timer_slack_ns; | 1810 | error = current->timer_slack_ns; |
1804 | break; | 1811 | break; |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 27dad2967387..68320f6b07b5 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -175,3 +175,6 @@ cond_syscall(compat_sys_timerfd_settime); | |||
175 | cond_syscall(compat_sys_timerfd_gettime); | 175 | cond_syscall(compat_sys_timerfd_gettime); |
176 | cond_syscall(sys_eventfd); | 176 | cond_syscall(sys_eventfd); |
177 | cond_syscall(sys_eventfd2); | 177 | cond_syscall(sys_eventfd2); |
178 | |||
179 | /* performance counters: */ | ||
180 | cond_syscall(sys_perf_counter_open); | ||