diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 4962 |
1 files changed, 0 insertions, 4962 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c deleted file mode 100644 index e0d91fdf0c3c..000000000000 --- a/kernel/perf_counter.c +++ /dev/null | |||
@@ -1,4962 +0,0 @@ | |||
1 | /* | ||
2 | * Performance counter core code | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
8 | * | ||
9 | * For licensing details see kernel-base/COPYING | ||
10 | */ | ||
11 | |||
12 | #include <linux/fs.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/cpu.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/file.h> | ||
17 | #include <linux/poll.h> | ||
18 | #include <linux/sysfs.h> | ||
19 | #include <linux/dcache.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/vmstat.h> | ||
23 | #include <linux/hardirq.h> | ||
24 | #include <linux/rculist.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/anon_inodes.h> | ||
28 | #include <linux/kernel_stat.h> | ||
29 | #include <linux/perf_counter.h> | ||
30 | |||
31 | #include <asm/irq_regs.h> | ||
32 | |||
33 | /* | ||
34 | * Each CPU has a list of per CPU counters: | ||
35 | */ | ||
36 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | ||
37 | |||
38 | int perf_max_counters __read_mostly = 1; | ||
39 | static int perf_reserved_percpu __read_mostly; | ||
40 | static int perf_overcommit __read_mostly = 1; | ||
41 | |||
42 | static atomic_t nr_counters __read_mostly; | ||
43 | static atomic_t nr_mmap_counters __read_mostly; | ||
44 | static atomic_t nr_comm_counters __read_mostly; | ||
45 | static atomic_t nr_task_counters __read_mostly; | ||
46 | |||
47 | /* | ||
48 | * perf counter paranoia level: | ||
49 | * -1 - not paranoid at all | ||
50 | * 0 - disallow raw tracepoint access for unpriv | ||
51 | * 1 - disallow cpu counters for unpriv | ||
52 | * 2 - disallow kernel profiling for unpriv | ||
53 | */ | ||
54 | int sysctl_perf_counter_paranoid __read_mostly = 1; | ||
55 | |||
56 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
57 | { | ||
58 | return sysctl_perf_counter_paranoid > -1; | ||
59 | } | ||
60 | |||
61 | static inline bool perf_paranoid_cpu(void) | ||
62 | { | ||
63 | return sysctl_perf_counter_paranoid > 0; | ||
64 | } | ||
65 | |||
66 | static inline bool perf_paranoid_kernel(void) | ||
67 | { | ||
68 | return sysctl_perf_counter_paranoid > 1; | ||
69 | } | ||
70 | |||
71 | int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ | ||
72 | |||
73 | /* | ||
74 | * max perf counter sample rate | ||
75 | */ | ||
76 | int sysctl_perf_counter_sample_rate __read_mostly = 100000; | ||
77 | |||
78 | static atomic64_t perf_counter_id; | ||
79 | |||
80 | /* | ||
81 | * Lock for (sysadmin-configurable) counter reservations: | ||
82 | */ | ||
83 | static DEFINE_SPINLOCK(perf_resource_lock); | ||
84 | |||
85 | /* | ||
86 | * Architecture provided APIs - weak aliases: | ||
87 | */ | ||
88 | extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | ||
89 | { | ||
90 | return NULL; | ||
91 | } | ||
92 | |||
93 | void __weak hw_perf_disable(void) { barrier(); } | ||
94 | void __weak hw_perf_enable(void) { barrier(); } | ||
95 | |||
96 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } | ||
97 | void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } | ||
98 | |||
99 | int __weak | ||
100 | hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
101 | struct perf_cpu_context *cpuctx, | ||
102 | struct perf_counter_context *ctx, int cpu) | ||
103 | { | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | void __weak perf_counter_print_debug(void) { } | ||
108 | |||
109 | static DEFINE_PER_CPU(int, disable_count); | ||
110 | |||
111 | void __perf_disable(void) | ||
112 | { | ||
113 | __get_cpu_var(disable_count)++; | ||
114 | } | ||
115 | |||
116 | bool __perf_enable(void) | ||
117 | { | ||
118 | return !--__get_cpu_var(disable_count); | ||
119 | } | ||
120 | |||
121 | void perf_disable(void) | ||
122 | { | ||
123 | __perf_disable(); | ||
124 | hw_perf_disable(); | ||
125 | } | ||
126 | |||
127 | void perf_enable(void) | ||
128 | { | ||
129 | if (__perf_enable()) | ||
130 | hw_perf_enable(); | ||
131 | } | ||
132 | |||
133 | static void get_ctx(struct perf_counter_context *ctx) | ||
134 | { | ||
135 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | ||
136 | } | ||
137 | |||
138 | static void free_ctx(struct rcu_head *head) | ||
139 | { | ||
140 | struct perf_counter_context *ctx; | ||
141 | |||
142 | ctx = container_of(head, struct perf_counter_context, rcu_head); | ||
143 | kfree(ctx); | ||
144 | } | ||
145 | |||
146 | static void put_ctx(struct perf_counter_context *ctx) | ||
147 | { | ||
148 | if (atomic_dec_and_test(&ctx->refcount)) { | ||
149 | if (ctx->parent_ctx) | ||
150 | put_ctx(ctx->parent_ctx); | ||
151 | if (ctx->task) | ||
152 | put_task_struct(ctx->task); | ||
153 | call_rcu(&ctx->rcu_head, free_ctx); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static void unclone_ctx(struct perf_counter_context *ctx) | ||
158 | { | ||
159 | if (ctx->parent_ctx) { | ||
160 | put_ctx(ctx->parent_ctx); | ||
161 | ctx->parent_ctx = NULL; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * If we inherit counters we want to return the parent counter id | ||
167 | * to userspace. | ||
168 | */ | ||
169 | static u64 primary_counter_id(struct perf_counter *counter) | ||
170 | { | ||
171 | u64 id = counter->id; | ||
172 | |||
173 | if (counter->parent) | ||
174 | id = counter->parent->id; | ||
175 | |||
176 | return id; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Get the perf_counter_context for a task and lock it. | ||
181 | * This has to cope with with the fact that until it is locked, | ||
182 | * the context could get moved to another task. | ||
183 | */ | ||
184 | static struct perf_counter_context * | ||
185 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) | ||
186 | { | ||
187 | struct perf_counter_context *ctx; | ||
188 | |||
189 | rcu_read_lock(); | ||
190 | retry: | ||
191 | ctx = rcu_dereference(task->perf_counter_ctxp); | ||
192 | if (ctx) { | ||
193 | /* | ||
194 | * If this context is a clone of another, it might | ||
195 | * get swapped for another underneath us by | ||
196 | * perf_counter_task_sched_out, though the | ||
197 | * rcu_read_lock() protects us from any context | ||
198 | * getting freed. Lock the context and check if it | ||
199 | * got swapped before we could get the lock, and retry | ||
200 | * if so. If we locked the right context, then it | ||
201 | * can't get swapped on us any more. | ||
202 | */ | ||
203 | spin_lock_irqsave(&ctx->lock, *flags); | ||
204 | if (ctx != rcu_dereference(task->perf_counter_ctxp)) { | ||
205 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
206 | goto retry; | ||
207 | } | ||
208 | |||
209 | if (!atomic_inc_not_zero(&ctx->refcount)) { | ||
210 | spin_unlock_irqrestore(&ctx->lock, *flags); | ||
211 | ctx = NULL; | ||
212 | } | ||
213 | } | ||
214 | rcu_read_unlock(); | ||
215 | return ctx; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Get the context for a task and increment its pin_count so it | ||
220 | * can't get swapped to another task. This also increments its | ||
221 | * reference count so that the context can't get freed. | ||
222 | */ | ||
223 | static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) | ||
224 | { | ||
225 | struct perf_counter_context *ctx; | ||
226 | unsigned long flags; | ||
227 | |||
228 | ctx = perf_lock_task_context(task, &flags); | ||
229 | if (ctx) { | ||
230 | ++ctx->pin_count; | ||
231 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
232 | } | ||
233 | return ctx; | ||
234 | } | ||
235 | |||
236 | static void perf_unpin_context(struct perf_counter_context *ctx) | ||
237 | { | ||
238 | unsigned long flags; | ||
239 | |||
240 | spin_lock_irqsave(&ctx->lock, flags); | ||
241 | --ctx->pin_count; | ||
242 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
243 | put_ctx(ctx); | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Add a counter from the lists for its context. | ||
248 | * Must be called with ctx->mutex and ctx->lock held. | ||
249 | */ | ||
250 | static void | ||
251 | list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | ||
252 | { | ||
253 | struct perf_counter *group_leader = counter->group_leader; | ||
254 | |||
255 | /* | ||
256 | * Depending on whether it is a standalone or sibling counter, | ||
257 | * add it straight to the context's counter list, or to the group | ||
258 | * leader's sibling list: | ||
259 | */ | ||
260 | if (group_leader == counter) | ||
261 | list_add_tail(&counter->list_entry, &ctx->counter_list); | ||
262 | else { | ||
263 | list_add_tail(&counter->list_entry, &group_leader->sibling_list); | ||
264 | group_leader->nr_siblings++; | ||
265 | } | ||
266 | |||
267 | list_add_rcu(&counter->event_entry, &ctx->event_list); | ||
268 | ctx->nr_counters++; | ||
269 | if (counter->attr.inherit_stat) | ||
270 | ctx->nr_stat++; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Remove a counter from the lists for its context. | ||
275 | * Must be called with ctx->mutex and ctx->lock held. | ||
276 | */ | ||
277 | static void | ||
278 | list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | ||
279 | { | ||
280 | struct perf_counter *sibling, *tmp; | ||
281 | |||
282 | if (list_empty(&counter->list_entry)) | ||
283 | return; | ||
284 | ctx->nr_counters--; | ||
285 | if (counter->attr.inherit_stat) | ||
286 | ctx->nr_stat--; | ||
287 | |||
288 | list_del_init(&counter->list_entry); | ||
289 | list_del_rcu(&counter->event_entry); | ||
290 | |||
291 | if (counter->group_leader != counter) | ||
292 | counter->group_leader->nr_siblings--; | ||
293 | |||
294 | /* | ||
295 | * If this was a group counter with sibling counters then | ||
296 | * upgrade the siblings to singleton counters by adding them | ||
297 | * to the context list directly: | ||
298 | */ | ||
299 | list_for_each_entry_safe(sibling, tmp, | ||
300 | &counter->sibling_list, list_entry) { | ||
301 | |||
302 | list_move_tail(&sibling->list_entry, &ctx->counter_list); | ||
303 | sibling->group_leader = sibling; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static void | ||
308 | counter_sched_out(struct perf_counter *counter, | ||
309 | struct perf_cpu_context *cpuctx, | ||
310 | struct perf_counter_context *ctx) | ||
311 | { | ||
312 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
313 | return; | ||
314 | |||
315 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
316 | if (counter->pending_disable) { | ||
317 | counter->pending_disable = 0; | ||
318 | counter->state = PERF_COUNTER_STATE_OFF; | ||
319 | } | ||
320 | counter->tstamp_stopped = ctx->time; | ||
321 | counter->pmu->disable(counter); | ||
322 | counter->oncpu = -1; | ||
323 | |||
324 | if (!is_software_counter(counter)) | ||
325 | cpuctx->active_oncpu--; | ||
326 | ctx->nr_active--; | ||
327 | if (counter->attr.exclusive || !cpuctx->active_oncpu) | ||
328 | cpuctx->exclusive = 0; | ||
329 | } | ||
330 | |||
331 | static void | ||
332 | group_sched_out(struct perf_counter *group_counter, | ||
333 | struct perf_cpu_context *cpuctx, | ||
334 | struct perf_counter_context *ctx) | ||
335 | { | ||
336 | struct perf_counter *counter; | ||
337 | |||
338 | if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
339 | return; | ||
340 | |||
341 | counter_sched_out(group_counter, cpuctx, ctx); | ||
342 | |||
343 | /* | ||
344 | * Schedule out siblings (if any): | ||
345 | */ | ||
346 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) | ||
347 | counter_sched_out(counter, cpuctx, ctx); | ||
348 | |||
349 | if (group_counter->attr.exclusive) | ||
350 | cpuctx->exclusive = 0; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Cross CPU call to remove a performance counter | ||
355 | * | ||
356 | * We disable the counter on the hardware level first. After that we | ||
357 | * remove it from the context list. | ||
358 | */ | ||
359 | static void __perf_counter_remove_from_context(void *info) | ||
360 | { | ||
361 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
362 | struct perf_counter *counter = info; | ||
363 | struct perf_counter_context *ctx = counter->ctx; | ||
364 | |||
365 | /* | ||
366 | * If this is a task context, we need to check whether it is | ||
367 | * the current task context of this cpu. If not it has been | ||
368 | * scheduled out before the smp call arrived. | ||
369 | */ | ||
370 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
371 | return; | ||
372 | |||
373 | spin_lock(&ctx->lock); | ||
374 | /* | ||
375 | * Protect the list operation against NMI by disabling the | ||
376 | * counters on a global level. | ||
377 | */ | ||
378 | perf_disable(); | ||
379 | |||
380 | counter_sched_out(counter, cpuctx, ctx); | ||
381 | |||
382 | list_del_counter(counter, ctx); | ||
383 | |||
384 | if (!ctx->task) { | ||
385 | /* | ||
386 | * Allow more per task counters with respect to the | ||
387 | * reservation: | ||
388 | */ | ||
389 | cpuctx->max_pertask = | ||
390 | min(perf_max_counters - ctx->nr_counters, | ||
391 | perf_max_counters - perf_reserved_percpu); | ||
392 | } | ||
393 | |||
394 | perf_enable(); | ||
395 | spin_unlock(&ctx->lock); | ||
396 | } | ||
397 | |||
398 | |||
399 | /* | ||
400 | * Remove the counter from a task's (or a CPU's) list of counters. | ||
401 | * | ||
402 | * Must be called with ctx->mutex held. | ||
403 | * | ||
404 | * CPU counters are removed with a smp call. For task counters we only | ||
405 | * call when the task is on a CPU. | ||
406 | * | ||
407 | * If counter->ctx is a cloned context, callers must make sure that | ||
408 | * every task struct that counter->ctx->task could possibly point to | ||
409 | * remains valid. This is OK when called from perf_release since | ||
410 | * that only calls us on the top-level context, which can't be a clone. | ||
411 | * When called from perf_counter_exit_task, it's OK because the | ||
412 | * context has been detached from its task. | ||
413 | */ | ||
414 | static void perf_counter_remove_from_context(struct perf_counter *counter) | ||
415 | { | ||
416 | struct perf_counter_context *ctx = counter->ctx; | ||
417 | struct task_struct *task = ctx->task; | ||
418 | |||
419 | if (!task) { | ||
420 | /* | ||
421 | * Per cpu counters are removed via an smp call and | ||
422 | * the removal is always sucessful. | ||
423 | */ | ||
424 | smp_call_function_single(counter->cpu, | ||
425 | __perf_counter_remove_from_context, | ||
426 | counter, 1); | ||
427 | return; | ||
428 | } | ||
429 | |||
430 | retry: | ||
431 | task_oncpu_function_call(task, __perf_counter_remove_from_context, | ||
432 | counter); | ||
433 | |||
434 | spin_lock_irq(&ctx->lock); | ||
435 | /* | ||
436 | * If the context is active we need to retry the smp call. | ||
437 | */ | ||
438 | if (ctx->nr_active && !list_empty(&counter->list_entry)) { | ||
439 | spin_unlock_irq(&ctx->lock); | ||
440 | goto retry; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * The lock prevents that this context is scheduled in so we | ||
445 | * can remove the counter safely, if the call above did not | ||
446 | * succeed. | ||
447 | */ | ||
448 | if (!list_empty(&counter->list_entry)) { | ||
449 | list_del_counter(counter, ctx); | ||
450 | } | ||
451 | spin_unlock_irq(&ctx->lock); | ||
452 | } | ||
453 | |||
454 | static inline u64 perf_clock(void) | ||
455 | { | ||
456 | return cpu_clock(smp_processor_id()); | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Update the record of the current time in a context. | ||
461 | */ | ||
462 | static void update_context_time(struct perf_counter_context *ctx) | ||
463 | { | ||
464 | u64 now = perf_clock(); | ||
465 | |||
466 | ctx->time += now - ctx->timestamp; | ||
467 | ctx->timestamp = now; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Update the total_time_enabled and total_time_running fields for a counter. | ||
472 | */ | ||
473 | static void update_counter_times(struct perf_counter *counter) | ||
474 | { | ||
475 | struct perf_counter_context *ctx = counter->ctx; | ||
476 | u64 run_end; | ||
477 | |||
478 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || | ||
479 | counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) | ||
480 | return; | ||
481 | |||
482 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; | ||
483 | |||
484 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) | ||
485 | run_end = counter->tstamp_stopped; | ||
486 | else | ||
487 | run_end = ctx->time; | ||
488 | |||
489 | counter->total_time_running = run_end - counter->tstamp_running; | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * Update total_time_enabled and total_time_running for all counters in a group. | ||
494 | */ | ||
495 | static void update_group_times(struct perf_counter *leader) | ||
496 | { | ||
497 | struct perf_counter *counter; | ||
498 | |||
499 | update_counter_times(leader); | ||
500 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | ||
501 | update_counter_times(counter); | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Cross CPU call to disable a performance counter | ||
506 | */ | ||
507 | static void __perf_counter_disable(void *info) | ||
508 | { | ||
509 | struct perf_counter *counter = info; | ||
510 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
511 | struct perf_counter_context *ctx = counter->ctx; | ||
512 | |||
513 | /* | ||
514 | * If this is a per-task counter, need to check whether this | ||
515 | * counter's task is the current task on this cpu. | ||
516 | */ | ||
517 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
518 | return; | ||
519 | |||
520 | spin_lock(&ctx->lock); | ||
521 | |||
522 | /* | ||
523 | * If the counter is on, turn it off. | ||
524 | * If it is in error state, leave it in error state. | ||
525 | */ | ||
526 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { | ||
527 | update_context_time(ctx); | ||
528 | update_group_times(counter); | ||
529 | if (counter == counter->group_leader) | ||
530 | group_sched_out(counter, cpuctx, ctx); | ||
531 | else | ||
532 | counter_sched_out(counter, cpuctx, ctx); | ||
533 | counter->state = PERF_COUNTER_STATE_OFF; | ||
534 | } | ||
535 | |||
536 | spin_unlock(&ctx->lock); | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Disable a counter. | ||
541 | * | ||
542 | * If counter->ctx is a cloned context, callers must make sure that | ||
543 | * every task struct that counter->ctx->task could possibly point to | ||
544 | * remains valid. This condition is satisifed when called through | ||
545 | * perf_counter_for_each_child or perf_counter_for_each because they | ||
546 | * hold the top-level counter's child_mutex, so any descendant that | ||
547 | * goes to exit will block in sync_child_counter. | ||
548 | * When called from perf_pending_counter it's OK because counter->ctx | ||
549 | * is the current context on this CPU and preemption is disabled, | ||
550 | * hence we can't get into perf_counter_task_sched_out for this context. | ||
551 | */ | ||
552 | static void perf_counter_disable(struct perf_counter *counter) | ||
553 | { | ||
554 | struct perf_counter_context *ctx = counter->ctx; | ||
555 | struct task_struct *task = ctx->task; | ||
556 | |||
557 | if (!task) { | ||
558 | /* | ||
559 | * Disable the counter on the cpu that it's on | ||
560 | */ | ||
561 | smp_call_function_single(counter->cpu, __perf_counter_disable, | ||
562 | counter, 1); | ||
563 | return; | ||
564 | } | ||
565 | |||
566 | retry: | ||
567 | task_oncpu_function_call(task, __perf_counter_disable, counter); | ||
568 | |||
569 | spin_lock_irq(&ctx->lock); | ||
570 | /* | ||
571 | * If the counter is still active, we need to retry the cross-call. | ||
572 | */ | ||
573 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | ||
574 | spin_unlock_irq(&ctx->lock); | ||
575 | goto retry; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Since we have the lock this context can't be scheduled | ||
580 | * in, so we can change the state safely. | ||
581 | */ | ||
582 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | ||
583 | update_group_times(counter); | ||
584 | counter->state = PERF_COUNTER_STATE_OFF; | ||
585 | } | ||
586 | |||
587 | spin_unlock_irq(&ctx->lock); | ||
588 | } | ||
589 | |||
590 | static int | ||
591 | counter_sched_in(struct perf_counter *counter, | ||
592 | struct perf_cpu_context *cpuctx, | ||
593 | struct perf_counter_context *ctx, | ||
594 | int cpu) | ||
595 | { | ||
596 | if (counter->state <= PERF_COUNTER_STATE_OFF) | ||
597 | return 0; | ||
598 | |||
599 | counter->state = PERF_COUNTER_STATE_ACTIVE; | ||
600 | counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ | ||
601 | /* | ||
602 | * The new state must be visible before we turn it on in the hardware: | ||
603 | */ | ||
604 | smp_wmb(); | ||
605 | |||
606 | if (counter->pmu->enable(counter)) { | ||
607 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
608 | counter->oncpu = -1; | ||
609 | return -EAGAIN; | ||
610 | } | ||
611 | |||
612 | counter->tstamp_running += ctx->time - counter->tstamp_stopped; | ||
613 | |||
614 | if (!is_software_counter(counter)) | ||
615 | cpuctx->active_oncpu++; | ||
616 | ctx->nr_active++; | ||
617 | |||
618 | if (counter->attr.exclusive) | ||
619 | cpuctx->exclusive = 1; | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | static int | ||
625 | group_sched_in(struct perf_counter *group_counter, | ||
626 | struct perf_cpu_context *cpuctx, | ||
627 | struct perf_counter_context *ctx, | ||
628 | int cpu) | ||
629 | { | ||
630 | struct perf_counter *counter, *partial_group; | ||
631 | int ret; | ||
632 | |||
633 | if (group_counter->state == PERF_COUNTER_STATE_OFF) | ||
634 | return 0; | ||
635 | |||
636 | ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); | ||
637 | if (ret) | ||
638 | return ret < 0 ? ret : 0; | ||
639 | |||
640 | if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) | ||
641 | return -EAGAIN; | ||
642 | |||
643 | /* | ||
644 | * Schedule in siblings as one group (if any): | ||
645 | */ | ||
646 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
647 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) { | ||
648 | partial_group = counter; | ||
649 | goto group_error; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | return 0; | ||
654 | |||
655 | group_error: | ||
656 | /* | ||
657 | * Groups can be scheduled in as one unit only, so undo any | ||
658 | * partial group before returning: | ||
659 | */ | ||
660 | list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { | ||
661 | if (counter == partial_group) | ||
662 | break; | ||
663 | counter_sched_out(counter, cpuctx, ctx); | ||
664 | } | ||
665 | counter_sched_out(group_counter, cpuctx, ctx); | ||
666 | |||
667 | return -EAGAIN; | ||
668 | } | ||
669 | |||
670 | /* | ||
671 | * Return 1 for a group consisting entirely of software counters, | ||
672 | * 0 if the group contains any hardware counters. | ||
673 | */ | ||
674 | static int is_software_only_group(struct perf_counter *leader) | ||
675 | { | ||
676 | struct perf_counter *counter; | ||
677 | |||
678 | if (!is_software_counter(leader)) | ||
679 | return 0; | ||
680 | |||
681 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | ||
682 | if (!is_software_counter(counter)) | ||
683 | return 0; | ||
684 | |||
685 | return 1; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Work out whether we can put this counter group on the CPU now. | ||
690 | */ | ||
691 | static int group_can_go_on(struct perf_counter *counter, | ||
692 | struct perf_cpu_context *cpuctx, | ||
693 | int can_add_hw) | ||
694 | { | ||
695 | /* | ||
696 | * Groups consisting entirely of software counters can always go on. | ||
697 | */ | ||
698 | if (is_software_only_group(counter)) | ||
699 | return 1; | ||
700 | /* | ||
701 | * If an exclusive group is already on, no other hardware | ||
702 | * counters can go on. | ||
703 | */ | ||
704 | if (cpuctx->exclusive) | ||
705 | return 0; | ||
706 | /* | ||
707 | * If this group is exclusive and there are already | ||
708 | * counters on the CPU, it can't go on. | ||
709 | */ | ||
710 | if (counter->attr.exclusive && cpuctx->active_oncpu) | ||
711 | return 0; | ||
712 | /* | ||
713 | * Otherwise, try to add it if all previous groups were able | ||
714 | * to go on. | ||
715 | */ | ||
716 | return can_add_hw; | ||
717 | } | ||
718 | |||
719 | static void add_counter_to_ctx(struct perf_counter *counter, | ||
720 | struct perf_counter_context *ctx) | ||
721 | { | ||
722 | list_add_counter(counter, ctx); | ||
723 | counter->tstamp_enabled = ctx->time; | ||
724 | counter->tstamp_running = ctx->time; | ||
725 | counter->tstamp_stopped = ctx->time; | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * Cross CPU call to install and enable a performance counter | ||
730 | * | ||
731 | * Must be called with ctx->mutex held | ||
732 | */ | ||
733 | static void __perf_install_in_context(void *info) | ||
734 | { | ||
735 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
736 | struct perf_counter *counter = info; | ||
737 | struct perf_counter_context *ctx = counter->ctx; | ||
738 | struct perf_counter *leader = counter->group_leader; | ||
739 | int cpu = smp_processor_id(); | ||
740 | int err; | ||
741 | |||
742 | /* | ||
743 | * If this is a task context, we need to check whether it is | ||
744 | * the current task context of this cpu. If not it has been | ||
745 | * scheduled out before the smp call arrived. | ||
746 | * Or possibly this is the right context but it isn't | ||
747 | * on this cpu because it had no counters. | ||
748 | */ | ||
749 | if (ctx->task && cpuctx->task_ctx != ctx) { | ||
750 | if (cpuctx->task_ctx || ctx->task != current) | ||
751 | return; | ||
752 | cpuctx->task_ctx = ctx; | ||
753 | } | ||
754 | |||
755 | spin_lock(&ctx->lock); | ||
756 | ctx->is_active = 1; | ||
757 | update_context_time(ctx); | ||
758 | |||
759 | /* | ||
760 | * Protect the list operation against NMI by disabling the | ||
761 | * counters on a global level. NOP for non NMI based counters. | ||
762 | */ | ||
763 | perf_disable(); | ||
764 | |||
765 | add_counter_to_ctx(counter, ctx); | ||
766 | |||
767 | /* | ||
768 | * Don't put the counter on if it is disabled or if | ||
769 | * it is in a group and the group isn't on. | ||
770 | */ | ||
771 | if (counter->state != PERF_COUNTER_STATE_INACTIVE || | ||
772 | (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) | ||
773 | goto unlock; | ||
774 | |||
775 | /* | ||
776 | * An exclusive counter can't go on if there are already active | ||
777 | * hardware counters, and no hardware counter can go on if there | ||
778 | * is already an exclusive counter on. | ||
779 | */ | ||
780 | if (!group_can_go_on(counter, cpuctx, 1)) | ||
781 | err = -EEXIST; | ||
782 | else | ||
783 | err = counter_sched_in(counter, cpuctx, ctx, cpu); | ||
784 | |||
785 | if (err) { | ||
786 | /* | ||
787 | * This counter couldn't go on. If it is in a group | ||
788 | * then we have to pull the whole group off. | ||
789 | * If the counter group is pinned then put it in error state. | ||
790 | */ | ||
791 | if (leader != counter) | ||
792 | group_sched_out(leader, cpuctx, ctx); | ||
793 | if (leader->attr.pinned) { | ||
794 | update_group_times(leader); | ||
795 | leader->state = PERF_COUNTER_STATE_ERROR; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (!err && !ctx->task && cpuctx->max_pertask) | ||
800 | cpuctx->max_pertask--; | ||
801 | |||
802 | unlock: | ||
803 | perf_enable(); | ||
804 | |||
805 | spin_unlock(&ctx->lock); | ||
806 | } | ||
807 | |||
808 | /* | ||
809 | * Attach a performance counter to a context | ||
810 | * | ||
811 | * First we add the counter to the list with the hardware enable bit | ||
812 | * in counter->hw_config cleared. | ||
813 | * | ||
814 | * If the counter is attached to a task which is on a CPU we use a smp | ||
815 | * call to enable it in the task context. The task might have been | ||
816 | * scheduled away, but we check this in the smp call again. | ||
817 | * | ||
818 | * Must be called with ctx->mutex held. | ||
819 | */ | ||
820 | static void | ||
821 | perf_install_in_context(struct perf_counter_context *ctx, | ||
822 | struct perf_counter *counter, | ||
823 | int cpu) | ||
824 | { | ||
825 | struct task_struct *task = ctx->task; | ||
826 | |||
827 | if (!task) { | ||
828 | /* | ||
829 | * Per cpu counters are installed via an smp call and | ||
830 | * the install is always sucessful. | ||
831 | */ | ||
832 | smp_call_function_single(cpu, __perf_install_in_context, | ||
833 | counter, 1); | ||
834 | return; | ||
835 | } | ||
836 | |||
837 | retry: | ||
838 | task_oncpu_function_call(task, __perf_install_in_context, | ||
839 | counter); | ||
840 | |||
841 | spin_lock_irq(&ctx->lock); | ||
842 | /* | ||
843 | * we need to retry the smp call. | ||
844 | */ | ||
845 | if (ctx->is_active && list_empty(&counter->list_entry)) { | ||
846 | spin_unlock_irq(&ctx->lock); | ||
847 | goto retry; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * The lock prevents that this context is scheduled in so we | ||
852 | * can add the counter safely, if it the call above did not | ||
853 | * succeed. | ||
854 | */ | ||
855 | if (list_empty(&counter->list_entry)) | ||
856 | add_counter_to_ctx(counter, ctx); | ||
857 | spin_unlock_irq(&ctx->lock); | ||
858 | } | ||
859 | |||
860 | /* | ||
861 | * Put a counter into inactive state and update time fields. | ||
862 | * Enabling the leader of a group effectively enables all | ||
863 | * the group members that aren't explicitly disabled, so we | ||
864 | * have to update their ->tstamp_enabled also. | ||
865 | * Note: this works for group members as well as group leaders | ||
866 | * since the non-leader members' sibling_lists will be empty. | ||
867 | */ | ||
868 | static void __perf_counter_mark_enabled(struct perf_counter *counter, | ||
869 | struct perf_counter_context *ctx) | ||
870 | { | ||
871 | struct perf_counter *sub; | ||
872 | |||
873 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
874 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | ||
875 | list_for_each_entry(sub, &counter->sibling_list, list_entry) | ||
876 | if (sub->state >= PERF_COUNTER_STATE_INACTIVE) | ||
877 | sub->tstamp_enabled = | ||
878 | ctx->time - sub->total_time_enabled; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * Cross CPU call to enable a performance counter | ||
883 | */ | ||
884 | static void __perf_counter_enable(void *info) | ||
885 | { | ||
886 | struct perf_counter *counter = info; | ||
887 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
888 | struct perf_counter_context *ctx = counter->ctx; | ||
889 | struct perf_counter *leader = counter->group_leader; | ||
890 | int err; | ||
891 | |||
892 | /* | ||
893 | * If this is a per-task counter, need to check whether this | ||
894 | * counter's task is the current task on this cpu. | ||
895 | */ | ||
896 | if (ctx->task && cpuctx->task_ctx != ctx) { | ||
897 | if (cpuctx->task_ctx || ctx->task != current) | ||
898 | return; | ||
899 | cpuctx->task_ctx = ctx; | ||
900 | } | ||
901 | |||
902 | spin_lock(&ctx->lock); | ||
903 | ctx->is_active = 1; | ||
904 | update_context_time(ctx); | ||
905 | |||
906 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
907 | goto unlock; | ||
908 | __perf_counter_mark_enabled(counter, ctx); | ||
909 | |||
910 | /* | ||
911 | * If the counter is in a group and isn't the group leader, | ||
912 | * then don't put it on unless the group is on. | ||
913 | */ | ||
914 | if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) | ||
915 | goto unlock; | ||
916 | |||
917 | if (!group_can_go_on(counter, cpuctx, 1)) { | ||
918 | err = -EEXIST; | ||
919 | } else { | ||
920 | perf_disable(); | ||
921 | if (counter == leader) | ||
922 | err = group_sched_in(counter, cpuctx, ctx, | ||
923 | smp_processor_id()); | ||
924 | else | ||
925 | err = counter_sched_in(counter, cpuctx, ctx, | ||
926 | smp_processor_id()); | ||
927 | perf_enable(); | ||
928 | } | ||
929 | |||
930 | if (err) { | ||
931 | /* | ||
932 | * If this counter can't go on and it's part of a | ||
933 | * group, then the whole group has to come off. | ||
934 | */ | ||
935 | if (leader != counter) | ||
936 | group_sched_out(leader, cpuctx, ctx); | ||
937 | if (leader->attr.pinned) { | ||
938 | update_group_times(leader); | ||
939 | leader->state = PERF_COUNTER_STATE_ERROR; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | unlock: | ||
944 | spin_unlock(&ctx->lock); | ||
945 | } | ||
946 | |||
947 | /* | ||
948 | * Enable a counter. | ||
949 | * | ||
950 | * If counter->ctx is a cloned context, callers must make sure that | ||
951 | * every task struct that counter->ctx->task could possibly point to | ||
952 | * remains valid. This condition is satisfied when called through | ||
953 | * perf_counter_for_each_child or perf_counter_for_each as described | ||
954 | * for perf_counter_disable. | ||
955 | */ | ||
956 | static void perf_counter_enable(struct perf_counter *counter) | ||
957 | { | ||
958 | struct perf_counter_context *ctx = counter->ctx; | ||
959 | struct task_struct *task = ctx->task; | ||
960 | |||
961 | if (!task) { | ||
962 | /* | ||
963 | * Enable the counter on the cpu that it's on | ||
964 | */ | ||
965 | smp_call_function_single(counter->cpu, __perf_counter_enable, | ||
966 | counter, 1); | ||
967 | return; | ||
968 | } | ||
969 | |||
970 | spin_lock_irq(&ctx->lock); | ||
971 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
972 | goto out; | ||
973 | |||
974 | /* | ||
975 | * If the counter is in error state, clear that first. | ||
976 | * That way, if we see the counter in error state below, we | ||
977 | * know that it has gone back into error state, as distinct | ||
978 | * from the task having been scheduled away before the | ||
979 | * cross-call arrived. | ||
980 | */ | ||
981 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
982 | counter->state = PERF_COUNTER_STATE_OFF; | ||
983 | |||
984 | retry: | ||
985 | spin_unlock_irq(&ctx->lock); | ||
986 | task_oncpu_function_call(task, __perf_counter_enable, counter); | ||
987 | |||
988 | spin_lock_irq(&ctx->lock); | ||
989 | |||
990 | /* | ||
991 | * If the context is active and the counter is still off, | ||
992 | * we need to retry the cross-call. | ||
993 | */ | ||
994 | if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) | ||
995 | goto retry; | ||
996 | |||
997 | /* | ||
998 | * Since we have the lock this context can't be scheduled | ||
999 | * in, so we can change the state safely. | ||
1000 | */ | ||
1001 | if (counter->state == PERF_COUNTER_STATE_OFF) | ||
1002 | __perf_counter_mark_enabled(counter, ctx); | ||
1003 | |||
1004 | out: | ||
1005 | spin_unlock_irq(&ctx->lock); | ||
1006 | } | ||
1007 | |||
1008 | static int perf_counter_refresh(struct perf_counter *counter, int refresh) | ||
1009 | { | ||
1010 | /* | ||
1011 | * not supported on inherited counters | ||
1012 | */ | ||
1013 | if (counter->attr.inherit) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | atomic_add(refresh, &counter->event_limit); | ||
1017 | perf_counter_enable(counter); | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | void __perf_counter_sched_out(struct perf_counter_context *ctx, | ||
1023 | struct perf_cpu_context *cpuctx) | ||
1024 | { | ||
1025 | struct perf_counter *counter; | ||
1026 | |||
1027 | spin_lock(&ctx->lock); | ||
1028 | ctx->is_active = 0; | ||
1029 | if (likely(!ctx->nr_counters)) | ||
1030 | goto out; | ||
1031 | update_context_time(ctx); | ||
1032 | |||
1033 | perf_disable(); | ||
1034 | if (ctx->nr_active) { | ||
1035 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1036 | if (counter != counter->group_leader) | ||
1037 | counter_sched_out(counter, cpuctx, ctx); | ||
1038 | else | ||
1039 | group_sched_out(counter, cpuctx, ctx); | ||
1040 | } | ||
1041 | } | ||
1042 | perf_enable(); | ||
1043 | out: | ||
1044 | spin_unlock(&ctx->lock); | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1048 | * Test whether two contexts are equivalent, i.e. whether they | ||
1049 | * have both been cloned from the same version of the same context | ||
1050 | * and they both have the same number of enabled counters. | ||
1051 | * If the number of enabled counters is the same, then the set | ||
1052 | * of enabled counters should be the same, because these are both | ||
1053 | * inherited contexts, therefore we can't access individual counters | ||
1054 | * in them directly with an fd; we can only enable/disable all | ||
1055 | * counters via prctl, or enable/disable all counters in a family | ||
1056 | * via ioctl, which will have the same effect on both contexts. | ||
1057 | */ | ||
1058 | static int context_equiv(struct perf_counter_context *ctx1, | ||
1059 | struct perf_counter_context *ctx2) | ||
1060 | { | ||
1061 | return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx | ||
1062 | && ctx1->parent_gen == ctx2->parent_gen | ||
1063 | && !ctx1->pin_count && !ctx2->pin_count; | ||
1064 | } | ||
1065 | |||
1066 | static void __perf_counter_read(void *counter); | ||
1067 | |||
1068 | static void __perf_counter_sync_stat(struct perf_counter *counter, | ||
1069 | struct perf_counter *next_counter) | ||
1070 | { | ||
1071 | u64 value; | ||
1072 | |||
1073 | if (!counter->attr.inherit_stat) | ||
1074 | return; | ||
1075 | |||
1076 | /* | ||
1077 | * Update the counter value, we cannot use perf_counter_read() | ||
1078 | * because we're in the middle of a context switch and have IRQs | ||
1079 | * disabled, which upsets smp_call_function_single(), however | ||
1080 | * we know the counter must be on the current CPU, therefore we | ||
1081 | * don't need to use it. | ||
1082 | */ | ||
1083 | switch (counter->state) { | ||
1084 | case PERF_COUNTER_STATE_ACTIVE: | ||
1085 | __perf_counter_read(counter); | ||
1086 | break; | ||
1087 | |||
1088 | case PERF_COUNTER_STATE_INACTIVE: | ||
1089 | update_counter_times(counter); | ||
1090 | break; | ||
1091 | |||
1092 | default: | ||
1093 | break; | ||
1094 | } | ||
1095 | |||
1096 | /* | ||
1097 | * In order to keep per-task stats reliable we need to flip the counter | ||
1098 | * values when we flip the contexts. | ||
1099 | */ | ||
1100 | value = atomic64_read(&next_counter->count); | ||
1101 | value = atomic64_xchg(&counter->count, value); | ||
1102 | atomic64_set(&next_counter->count, value); | ||
1103 | |||
1104 | swap(counter->total_time_enabled, next_counter->total_time_enabled); | ||
1105 | swap(counter->total_time_running, next_counter->total_time_running); | ||
1106 | |||
1107 | /* | ||
1108 | * Since we swizzled the values, update the user visible data too. | ||
1109 | */ | ||
1110 | perf_counter_update_userpage(counter); | ||
1111 | perf_counter_update_userpage(next_counter); | ||
1112 | } | ||
1113 | |||
1114 | #define list_next_entry(pos, member) \ | ||
1115 | list_entry(pos->member.next, typeof(*pos), member) | ||
1116 | |||
1117 | static void perf_counter_sync_stat(struct perf_counter_context *ctx, | ||
1118 | struct perf_counter_context *next_ctx) | ||
1119 | { | ||
1120 | struct perf_counter *counter, *next_counter; | ||
1121 | |||
1122 | if (!ctx->nr_stat) | ||
1123 | return; | ||
1124 | |||
1125 | counter = list_first_entry(&ctx->event_list, | ||
1126 | struct perf_counter, event_entry); | ||
1127 | |||
1128 | next_counter = list_first_entry(&next_ctx->event_list, | ||
1129 | struct perf_counter, event_entry); | ||
1130 | |||
1131 | while (&counter->event_entry != &ctx->event_list && | ||
1132 | &next_counter->event_entry != &next_ctx->event_list) { | ||
1133 | |||
1134 | __perf_counter_sync_stat(counter, next_counter); | ||
1135 | |||
1136 | counter = list_next_entry(counter, event_entry); | ||
1137 | next_counter = list_next_entry(next_counter, event_entry); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | /* | ||
1142 | * Called from scheduler to remove the counters of the current task, | ||
1143 | * with interrupts disabled. | ||
1144 | * | ||
1145 | * We stop each counter and update the counter value in counter->count. | ||
1146 | * | ||
1147 | * This does not protect us against NMI, but disable() | ||
1148 | * sets the disabled bit in the control field of counter _before_ | ||
1149 | * accessing the counter control register. If a NMI hits, then it will | ||
1150 | * not restart the counter. | ||
1151 | */ | ||
1152 | void perf_counter_task_sched_out(struct task_struct *task, | ||
1153 | struct task_struct *next, int cpu) | ||
1154 | { | ||
1155 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1156 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | ||
1157 | struct perf_counter_context *next_ctx; | ||
1158 | struct perf_counter_context *parent; | ||
1159 | struct pt_regs *regs; | ||
1160 | int do_switch = 1; | ||
1161 | |||
1162 | regs = task_pt_regs(task); | ||
1163 | perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1164 | |||
1165 | if (likely(!ctx || !cpuctx->task_ctx)) | ||
1166 | return; | ||
1167 | |||
1168 | update_context_time(ctx); | ||
1169 | |||
1170 | rcu_read_lock(); | ||
1171 | parent = rcu_dereference(ctx->parent_ctx); | ||
1172 | next_ctx = next->perf_counter_ctxp; | ||
1173 | if (parent && next_ctx && | ||
1174 | rcu_dereference(next_ctx->parent_ctx) == parent) { | ||
1175 | /* | ||
1176 | * Looks like the two contexts are clones, so we might be | ||
1177 | * able to optimize the context switch. We lock both | ||
1178 | * contexts and check that they are clones under the | ||
1179 | * lock (including re-checking that neither has been | ||
1180 | * uncloned in the meantime). It doesn't matter which | ||
1181 | * order we take the locks because no other cpu could | ||
1182 | * be trying to lock both of these tasks. | ||
1183 | */ | ||
1184 | spin_lock(&ctx->lock); | ||
1185 | spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); | ||
1186 | if (context_equiv(ctx, next_ctx)) { | ||
1187 | /* | ||
1188 | * XXX do we need a memory barrier of sorts | ||
1189 | * wrt to rcu_dereference() of perf_counter_ctxp | ||
1190 | */ | ||
1191 | task->perf_counter_ctxp = next_ctx; | ||
1192 | next->perf_counter_ctxp = ctx; | ||
1193 | ctx->task = next; | ||
1194 | next_ctx->task = task; | ||
1195 | do_switch = 0; | ||
1196 | |||
1197 | perf_counter_sync_stat(ctx, next_ctx); | ||
1198 | } | ||
1199 | spin_unlock(&next_ctx->lock); | ||
1200 | spin_unlock(&ctx->lock); | ||
1201 | } | ||
1202 | rcu_read_unlock(); | ||
1203 | |||
1204 | if (do_switch) { | ||
1205 | __perf_counter_sched_out(ctx, cpuctx); | ||
1206 | cpuctx->task_ctx = NULL; | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | /* | ||
1211 | * Called with IRQs disabled | ||
1212 | */ | ||
1213 | static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) | ||
1214 | { | ||
1215 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1216 | |||
1217 | if (!cpuctx->task_ctx) | ||
1218 | return; | ||
1219 | |||
1220 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) | ||
1221 | return; | ||
1222 | |||
1223 | __perf_counter_sched_out(ctx, cpuctx); | ||
1224 | cpuctx->task_ctx = NULL; | ||
1225 | } | ||
1226 | |||
1227 | /* | ||
1228 | * Called with IRQs disabled | ||
1229 | */ | ||
1230 | static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) | ||
1231 | { | ||
1232 | __perf_counter_sched_out(&cpuctx->ctx, cpuctx); | ||
1233 | } | ||
1234 | |||
1235 | static void | ||
1236 | __perf_counter_sched_in(struct perf_counter_context *ctx, | ||
1237 | struct perf_cpu_context *cpuctx, int cpu) | ||
1238 | { | ||
1239 | struct perf_counter *counter; | ||
1240 | int can_add_hw = 1; | ||
1241 | |||
1242 | spin_lock(&ctx->lock); | ||
1243 | ctx->is_active = 1; | ||
1244 | if (likely(!ctx->nr_counters)) | ||
1245 | goto out; | ||
1246 | |||
1247 | ctx->timestamp = perf_clock(); | ||
1248 | |||
1249 | perf_disable(); | ||
1250 | |||
1251 | /* | ||
1252 | * First go through the list and put on any pinned groups | ||
1253 | * in order to give them the best chance of going on. | ||
1254 | */ | ||
1255 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1256 | if (counter->state <= PERF_COUNTER_STATE_OFF || | ||
1257 | !counter->attr.pinned) | ||
1258 | continue; | ||
1259 | if (counter->cpu != -1 && counter->cpu != cpu) | ||
1260 | continue; | ||
1261 | |||
1262 | if (counter != counter->group_leader) | ||
1263 | counter_sched_in(counter, cpuctx, ctx, cpu); | ||
1264 | else { | ||
1265 | if (group_can_go_on(counter, cpuctx, 1)) | ||
1266 | group_sched_in(counter, cpuctx, ctx, cpu); | ||
1267 | } | ||
1268 | |||
1269 | /* | ||
1270 | * If this pinned group hasn't been scheduled, | ||
1271 | * put it in error state. | ||
1272 | */ | ||
1273 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | ||
1274 | update_group_times(counter); | ||
1275 | counter->state = PERF_COUNTER_STATE_ERROR; | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1280 | /* | ||
1281 | * Ignore counters in OFF or ERROR state, and | ||
1282 | * ignore pinned counters since we did them already. | ||
1283 | */ | ||
1284 | if (counter->state <= PERF_COUNTER_STATE_OFF || | ||
1285 | counter->attr.pinned) | ||
1286 | continue; | ||
1287 | |||
1288 | /* | ||
1289 | * Listen to the 'cpu' scheduling filter constraint | ||
1290 | * of counters: | ||
1291 | */ | ||
1292 | if (counter->cpu != -1 && counter->cpu != cpu) | ||
1293 | continue; | ||
1294 | |||
1295 | if (counter != counter->group_leader) { | ||
1296 | if (counter_sched_in(counter, cpuctx, ctx, cpu)) | ||
1297 | can_add_hw = 0; | ||
1298 | } else { | ||
1299 | if (group_can_go_on(counter, cpuctx, can_add_hw)) { | ||
1300 | if (group_sched_in(counter, cpuctx, ctx, cpu)) | ||
1301 | can_add_hw = 0; | ||
1302 | } | ||
1303 | } | ||
1304 | } | ||
1305 | perf_enable(); | ||
1306 | out: | ||
1307 | spin_unlock(&ctx->lock); | ||
1308 | } | ||
1309 | |||
1310 | /* | ||
1311 | * Called from scheduler to add the counters of the current task | ||
1312 | * with interrupts disabled. | ||
1313 | * | ||
1314 | * We restore the counter value and then enable it. | ||
1315 | * | ||
1316 | * This does not protect us against NMI, but enable() | ||
1317 | * sets the enabled bit in the control field of counter _before_ | ||
1318 | * accessing the counter control register. If a NMI hits, then it will | ||
1319 | * keep the counter running. | ||
1320 | */ | ||
1321 | void perf_counter_task_sched_in(struct task_struct *task, int cpu) | ||
1322 | { | ||
1323 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1324 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | ||
1325 | |||
1326 | if (likely(!ctx)) | ||
1327 | return; | ||
1328 | if (cpuctx->task_ctx == ctx) | ||
1329 | return; | ||
1330 | __perf_counter_sched_in(ctx, cpuctx, cpu); | ||
1331 | cpuctx->task_ctx = ctx; | ||
1332 | } | ||
1333 | |||
1334 | static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | ||
1335 | { | ||
1336 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
1337 | |||
1338 | __perf_counter_sched_in(ctx, cpuctx, cpu); | ||
1339 | } | ||
1340 | |||
1341 | #define MAX_INTERRUPTS (~0ULL) | ||
1342 | |||
1343 | static void perf_log_throttle(struct perf_counter *counter, int enable); | ||
1344 | |||
1345 | static void perf_adjust_period(struct perf_counter *counter, u64 events) | ||
1346 | { | ||
1347 | struct hw_perf_counter *hwc = &counter->hw; | ||
1348 | u64 period, sample_period; | ||
1349 | s64 delta; | ||
1350 | |||
1351 | events *= hwc->sample_period; | ||
1352 | period = div64_u64(events, counter->attr.sample_freq); | ||
1353 | |||
1354 | delta = (s64)(period - hwc->sample_period); | ||
1355 | delta = (delta + 7) / 8; /* low pass filter */ | ||
1356 | |||
1357 | sample_period = hwc->sample_period + delta; | ||
1358 | |||
1359 | if (!sample_period) | ||
1360 | sample_period = 1; | ||
1361 | |||
1362 | hwc->sample_period = sample_period; | ||
1363 | } | ||
1364 | |||
1365 | static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) | ||
1366 | { | ||
1367 | struct perf_counter *counter; | ||
1368 | struct hw_perf_counter *hwc; | ||
1369 | u64 interrupts, freq; | ||
1370 | |||
1371 | spin_lock(&ctx->lock); | ||
1372 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1373 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
1374 | continue; | ||
1375 | |||
1376 | hwc = &counter->hw; | ||
1377 | |||
1378 | interrupts = hwc->interrupts; | ||
1379 | hwc->interrupts = 0; | ||
1380 | |||
1381 | /* | ||
1382 | * unthrottle counters on the tick | ||
1383 | */ | ||
1384 | if (interrupts == MAX_INTERRUPTS) { | ||
1385 | perf_log_throttle(counter, 1); | ||
1386 | counter->pmu->unthrottle(counter); | ||
1387 | interrupts = 2*sysctl_perf_counter_sample_rate/HZ; | ||
1388 | } | ||
1389 | |||
1390 | if (!counter->attr.freq || !counter->attr.sample_freq) | ||
1391 | continue; | ||
1392 | |||
1393 | /* | ||
1394 | * if the specified freq < HZ then we need to skip ticks | ||
1395 | */ | ||
1396 | if (counter->attr.sample_freq < HZ) { | ||
1397 | freq = counter->attr.sample_freq; | ||
1398 | |||
1399 | hwc->freq_count += freq; | ||
1400 | hwc->freq_interrupts += interrupts; | ||
1401 | |||
1402 | if (hwc->freq_count < HZ) | ||
1403 | continue; | ||
1404 | |||
1405 | interrupts = hwc->freq_interrupts; | ||
1406 | hwc->freq_interrupts = 0; | ||
1407 | hwc->freq_count -= HZ; | ||
1408 | } else | ||
1409 | freq = HZ; | ||
1410 | |||
1411 | perf_adjust_period(counter, freq * interrupts); | ||
1412 | |||
1413 | /* | ||
1414 | * In order to avoid being stalled by an (accidental) huge | ||
1415 | * sample period, force reset the sample period if we didn't | ||
1416 | * get any events in this freq period. | ||
1417 | */ | ||
1418 | if (!interrupts) { | ||
1419 | perf_disable(); | ||
1420 | counter->pmu->disable(counter); | ||
1421 | atomic64_set(&hwc->period_left, 0); | ||
1422 | counter->pmu->enable(counter); | ||
1423 | perf_enable(); | ||
1424 | } | ||
1425 | } | ||
1426 | spin_unlock(&ctx->lock); | ||
1427 | } | ||
1428 | |||
1429 | /* | ||
1430 | * Round-robin a context's counters: | ||
1431 | */ | ||
1432 | static void rotate_ctx(struct perf_counter_context *ctx) | ||
1433 | { | ||
1434 | struct perf_counter *counter; | ||
1435 | |||
1436 | if (!ctx->nr_counters) | ||
1437 | return; | ||
1438 | |||
1439 | spin_lock(&ctx->lock); | ||
1440 | /* | ||
1441 | * Rotate the first entry last (works just fine for group counters too): | ||
1442 | */ | ||
1443 | perf_disable(); | ||
1444 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1445 | list_move_tail(&counter->list_entry, &ctx->counter_list); | ||
1446 | break; | ||
1447 | } | ||
1448 | perf_enable(); | ||
1449 | |||
1450 | spin_unlock(&ctx->lock); | ||
1451 | } | ||
1452 | |||
1453 | void perf_counter_task_tick(struct task_struct *curr, int cpu) | ||
1454 | { | ||
1455 | struct perf_cpu_context *cpuctx; | ||
1456 | struct perf_counter_context *ctx; | ||
1457 | |||
1458 | if (!atomic_read(&nr_counters)) | ||
1459 | return; | ||
1460 | |||
1461 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1462 | ctx = curr->perf_counter_ctxp; | ||
1463 | |||
1464 | perf_ctx_adjust_freq(&cpuctx->ctx); | ||
1465 | if (ctx) | ||
1466 | perf_ctx_adjust_freq(ctx); | ||
1467 | |||
1468 | perf_counter_cpu_sched_out(cpuctx); | ||
1469 | if (ctx) | ||
1470 | __perf_counter_task_sched_out(ctx); | ||
1471 | |||
1472 | rotate_ctx(&cpuctx->ctx); | ||
1473 | if (ctx) | ||
1474 | rotate_ctx(ctx); | ||
1475 | |||
1476 | perf_counter_cpu_sched_in(cpuctx, cpu); | ||
1477 | if (ctx) | ||
1478 | perf_counter_task_sched_in(curr, cpu); | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Enable all of a task's counters that have been marked enable-on-exec. | ||
1483 | * This expects task == current. | ||
1484 | */ | ||
1485 | static void perf_counter_enable_on_exec(struct task_struct *task) | ||
1486 | { | ||
1487 | struct perf_counter_context *ctx; | ||
1488 | struct perf_counter *counter; | ||
1489 | unsigned long flags; | ||
1490 | int enabled = 0; | ||
1491 | |||
1492 | local_irq_save(flags); | ||
1493 | ctx = task->perf_counter_ctxp; | ||
1494 | if (!ctx || !ctx->nr_counters) | ||
1495 | goto out; | ||
1496 | |||
1497 | __perf_counter_task_sched_out(ctx); | ||
1498 | |||
1499 | spin_lock(&ctx->lock); | ||
1500 | |||
1501 | list_for_each_entry(counter, &ctx->counter_list, list_entry) { | ||
1502 | if (!counter->attr.enable_on_exec) | ||
1503 | continue; | ||
1504 | counter->attr.enable_on_exec = 0; | ||
1505 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
1506 | continue; | ||
1507 | __perf_counter_mark_enabled(counter, ctx); | ||
1508 | enabled = 1; | ||
1509 | } | ||
1510 | |||
1511 | /* | ||
1512 | * Unclone this context if we enabled any counter. | ||
1513 | */ | ||
1514 | if (enabled) | ||
1515 | unclone_ctx(ctx); | ||
1516 | |||
1517 | spin_unlock(&ctx->lock); | ||
1518 | |||
1519 | perf_counter_task_sched_in(task, smp_processor_id()); | ||
1520 | out: | ||
1521 | local_irq_restore(flags); | ||
1522 | } | ||
1523 | |||
1524 | /* | ||
1525 | * Cross CPU call to read the hardware counter | ||
1526 | */ | ||
1527 | static void __perf_counter_read(void *info) | ||
1528 | { | ||
1529 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
1530 | struct perf_counter *counter = info; | ||
1531 | struct perf_counter_context *ctx = counter->ctx; | ||
1532 | unsigned long flags; | ||
1533 | |||
1534 | /* | ||
1535 | * If this is a task context, we need to check whether it is | ||
1536 | * the current task context of this cpu. If not it has been | ||
1537 | * scheduled out before the smp call arrived. In that case | ||
1538 | * counter->count would have been updated to a recent sample | ||
1539 | * when the counter was scheduled out. | ||
1540 | */ | ||
1541 | if (ctx->task && cpuctx->task_ctx != ctx) | ||
1542 | return; | ||
1543 | |||
1544 | local_irq_save(flags); | ||
1545 | if (ctx->is_active) | ||
1546 | update_context_time(ctx); | ||
1547 | counter->pmu->read(counter); | ||
1548 | update_counter_times(counter); | ||
1549 | local_irq_restore(flags); | ||
1550 | } | ||
1551 | |||
1552 | static u64 perf_counter_read(struct perf_counter *counter) | ||
1553 | { | ||
1554 | /* | ||
1555 | * If counter is enabled and currently active on a CPU, update the | ||
1556 | * value in the counter structure: | ||
1557 | */ | ||
1558 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) { | ||
1559 | smp_call_function_single(counter->oncpu, | ||
1560 | __perf_counter_read, counter, 1); | ||
1561 | } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | ||
1562 | update_counter_times(counter); | ||
1563 | } | ||
1564 | |||
1565 | return atomic64_read(&counter->count); | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * Initialize the perf_counter context in a task_struct: | ||
1570 | */ | ||
1571 | static void | ||
1572 | __perf_counter_init_context(struct perf_counter_context *ctx, | ||
1573 | struct task_struct *task) | ||
1574 | { | ||
1575 | memset(ctx, 0, sizeof(*ctx)); | ||
1576 | spin_lock_init(&ctx->lock); | ||
1577 | mutex_init(&ctx->mutex); | ||
1578 | INIT_LIST_HEAD(&ctx->counter_list); | ||
1579 | INIT_LIST_HEAD(&ctx->event_list); | ||
1580 | atomic_set(&ctx->refcount, 1); | ||
1581 | ctx->task = task; | ||
1582 | } | ||
1583 | |||
1584 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | ||
1585 | { | ||
1586 | struct perf_counter_context *ctx; | ||
1587 | struct perf_cpu_context *cpuctx; | ||
1588 | struct task_struct *task; | ||
1589 | unsigned long flags; | ||
1590 | int err; | ||
1591 | |||
1592 | /* | ||
1593 | * If cpu is not a wildcard then this is a percpu counter: | ||
1594 | */ | ||
1595 | if (cpu != -1) { | ||
1596 | /* Must be root to operate on a CPU counter: */ | ||
1597 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
1598 | return ERR_PTR(-EACCES); | ||
1599 | |||
1600 | if (cpu < 0 || cpu > num_possible_cpus()) | ||
1601 | return ERR_PTR(-EINVAL); | ||
1602 | |||
1603 | /* | ||
1604 | * We could be clever and allow to attach a counter to an | ||
1605 | * offline CPU and activate it when the CPU comes up, but | ||
1606 | * that's for later. | ||
1607 | */ | ||
1608 | if (!cpu_isset(cpu, cpu_online_map)) | ||
1609 | return ERR_PTR(-ENODEV); | ||
1610 | |||
1611 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
1612 | ctx = &cpuctx->ctx; | ||
1613 | get_ctx(ctx); | ||
1614 | |||
1615 | return ctx; | ||
1616 | } | ||
1617 | |||
1618 | rcu_read_lock(); | ||
1619 | if (!pid) | ||
1620 | task = current; | ||
1621 | else | ||
1622 | task = find_task_by_vpid(pid); | ||
1623 | if (task) | ||
1624 | get_task_struct(task); | ||
1625 | rcu_read_unlock(); | ||
1626 | |||
1627 | if (!task) | ||
1628 | return ERR_PTR(-ESRCH); | ||
1629 | |||
1630 | /* | ||
1631 | * Can't attach counters to a dying task. | ||
1632 | */ | ||
1633 | err = -ESRCH; | ||
1634 | if (task->flags & PF_EXITING) | ||
1635 | goto errout; | ||
1636 | |||
1637 | /* Reuse ptrace permission checks for now. */ | ||
1638 | err = -EACCES; | ||
1639 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | ||
1640 | goto errout; | ||
1641 | |||
1642 | retry: | ||
1643 | ctx = perf_lock_task_context(task, &flags); | ||
1644 | if (ctx) { | ||
1645 | unclone_ctx(ctx); | ||
1646 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
1647 | } | ||
1648 | |||
1649 | if (!ctx) { | ||
1650 | ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); | ||
1651 | err = -ENOMEM; | ||
1652 | if (!ctx) | ||
1653 | goto errout; | ||
1654 | __perf_counter_init_context(ctx, task); | ||
1655 | get_ctx(ctx); | ||
1656 | if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { | ||
1657 | /* | ||
1658 | * We raced with some other task; use | ||
1659 | * the context they set. | ||
1660 | */ | ||
1661 | kfree(ctx); | ||
1662 | goto retry; | ||
1663 | } | ||
1664 | get_task_struct(task); | ||
1665 | } | ||
1666 | |||
1667 | put_task_struct(task); | ||
1668 | return ctx; | ||
1669 | |||
1670 | errout: | ||
1671 | put_task_struct(task); | ||
1672 | return ERR_PTR(err); | ||
1673 | } | ||
1674 | |||
1675 | static void free_counter_rcu(struct rcu_head *head) | ||
1676 | { | ||
1677 | struct perf_counter *counter; | ||
1678 | |||
1679 | counter = container_of(head, struct perf_counter, rcu_head); | ||
1680 | if (counter->ns) | ||
1681 | put_pid_ns(counter->ns); | ||
1682 | kfree(counter); | ||
1683 | } | ||
1684 | |||
1685 | static void perf_pending_sync(struct perf_counter *counter); | ||
1686 | |||
1687 | static void free_counter(struct perf_counter *counter) | ||
1688 | { | ||
1689 | perf_pending_sync(counter); | ||
1690 | |||
1691 | if (!counter->parent) { | ||
1692 | atomic_dec(&nr_counters); | ||
1693 | if (counter->attr.mmap) | ||
1694 | atomic_dec(&nr_mmap_counters); | ||
1695 | if (counter->attr.comm) | ||
1696 | atomic_dec(&nr_comm_counters); | ||
1697 | if (counter->attr.task) | ||
1698 | atomic_dec(&nr_task_counters); | ||
1699 | } | ||
1700 | |||
1701 | if (counter->output) { | ||
1702 | fput(counter->output->filp); | ||
1703 | counter->output = NULL; | ||
1704 | } | ||
1705 | |||
1706 | if (counter->destroy) | ||
1707 | counter->destroy(counter); | ||
1708 | |||
1709 | put_ctx(counter->ctx); | ||
1710 | call_rcu(&counter->rcu_head, free_counter_rcu); | ||
1711 | } | ||
1712 | |||
1713 | /* | ||
1714 | * Called when the last reference to the file is gone. | ||
1715 | */ | ||
1716 | static int perf_release(struct inode *inode, struct file *file) | ||
1717 | { | ||
1718 | struct perf_counter *counter = file->private_data; | ||
1719 | struct perf_counter_context *ctx = counter->ctx; | ||
1720 | |||
1721 | file->private_data = NULL; | ||
1722 | |||
1723 | WARN_ON_ONCE(ctx->parent_ctx); | ||
1724 | mutex_lock(&ctx->mutex); | ||
1725 | perf_counter_remove_from_context(counter); | ||
1726 | mutex_unlock(&ctx->mutex); | ||
1727 | |||
1728 | mutex_lock(&counter->owner->perf_counter_mutex); | ||
1729 | list_del_init(&counter->owner_entry); | ||
1730 | mutex_unlock(&counter->owner->perf_counter_mutex); | ||
1731 | put_task_struct(counter->owner); | ||
1732 | |||
1733 | free_counter(counter); | ||
1734 | |||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | static int perf_counter_read_size(struct perf_counter *counter) | ||
1739 | { | ||
1740 | int entry = sizeof(u64); /* value */ | ||
1741 | int size = 0; | ||
1742 | int nr = 1; | ||
1743 | |||
1744 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
1745 | size += sizeof(u64); | ||
1746 | |||
1747 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1748 | size += sizeof(u64); | ||
1749 | |||
1750 | if (counter->attr.read_format & PERF_FORMAT_ID) | ||
1751 | entry += sizeof(u64); | ||
1752 | |||
1753 | if (counter->attr.read_format & PERF_FORMAT_GROUP) { | ||
1754 | nr += counter->group_leader->nr_siblings; | ||
1755 | size += sizeof(u64); | ||
1756 | } | ||
1757 | |||
1758 | size += entry * nr; | ||
1759 | |||
1760 | return size; | ||
1761 | } | ||
1762 | |||
1763 | static u64 perf_counter_read_value(struct perf_counter *counter) | ||
1764 | { | ||
1765 | struct perf_counter *child; | ||
1766 | u64 total = 0; | ||
1767 | |||
1768 | total += perf_counter_read(counter); | ||
1769 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1770 | total += perf_counter_read(child); | ||
1771 | |||
1772 | return total; | ||
1773 | } | ||
1774 | |||
1775 | static int perf_counter_read_entry(struct perf_counter *counter, | ||
1776 | u64 read_format, char __user *buf) | ||
1777 | { | ||
1778 | int n = 0, count = 0; | ||
1779 | u64 values[2]; | ||
1780 | |||
1781 | values[n++] = perf_counter_read_value(counter); | ||
1782 | if (read_format & PERF_FORMAT_ID) | ||
1783 | values[n++] = primary_counter_id(counter); | ||
1784 | |||
1785 | count = n * sizeof(u64); | ||
1786 | |||
1787 | if (copy_to_user(buf, values, count)) | ||
1788 | return -EFAULT; | ||
1789 | |||
1790 | return count; | ||
1791 | } | ||
1792 | |||
1793 | static int perf_counter_read_group(struct perf_counter *counter, | ||
1794 | u64 read_format, char __user *buf) | ||
1795 | { | ||
1796 | struct perf_counter *leader = counter->group_leader, *sub; | ||
1797 | int n = 0, size = 0, err = -EFAULT; | ||
1798 | u64 values[3]; | ||
1799 | |||
1800 | values[n++] = 1 + leader->nr_siblings; | ||
1801 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1802 | values[n++] = leader->total_time_enabled + | ||
1803 | atomic64_read(&leader->child_total_time_enabled); | ||
1804 | } | ||
1805 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1806 | values[n++] = leader->total_time_running + | ||
1807 | atomic64_read(&leader->child_total_time_running); | ||
1808 | } | ||
1809 | |||
1810 | size = n * sizeof(u64); | ||
1811 | |||
1812 | if (copy_to_user(buf, values, size)) | ||
1813 | return -EFAULT; | ||
1814 | |||
1815 | err = perf_counter_read_entry(leader, read_format, buf + size); | ||
1816 | if (err < 0) | ||
1817 | return err; | ||
1818 | |||
1819 | size += err; | ||
1820 | |||
1821 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
1822 | err = perf_counter_read_entry(sub, read_format, | ||
1823 | buf + size); | ||
1824 | if (err < 0) | ||
1825 | return err; | ||
1826 | |||
1827 | size += err; | ||
1828 | } | ||
1829 | |||
1830 | return size; | ||
1831 | } | ||
1832 | |||
1833 | static int perf_counter_read_one(struct perf_counter *counter, | ||
1834 | u64 read_format, char __user *buf) | ||
1835 | { | ||
1836 | u64 values[4]; | ||
1837 | int n = 0; | ||
1838 | |||
1839 | values[n++] = perf_counter_read_value(counter); | ||
1840 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1841 | values[n++] = counter->total_time_enabled + | ||
1842 | atomic64_read(&counter->child_total_time_enabled); | ||
1843 | } | ||
1844 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1845 | values[n++] = counter->total_time_running + | ||
1846 | atomic64_read(&counter->child_total_time_running); | ||
1847 | } | ||
1848 | if (read_format & PERF_FORMAT_ID) | ||
1849 | values[n++] = primary_counter_id(counter); | ||
1850 | |||
1851 | if (copy_to_user(buf, values, n * sizeof(u64))) | ||
1852 | return -EFAULT; | ||
1853 | |||
1854 | return n * sizeof(u64); | ||
1855 | } | ||
1856 | |||
1857 | /* | ||
1858 | * Read the performance counter - simple non blocking version for now | ||
1859 | */ | ||
1860 | static ssize_t | ||
1861 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | ||
1862 | { | ||
1863 | u64 read_format = counter->attr.read_format; | ||
1864 | int ret; | ||
1865 | |||
1866 | /* | ||
1867 | * Return end-of-file for a read on a counter that is in | ||
1868 | * error state (i.e. because it was pinned but it couldn't be | ||
1869 | * scheduled on to the CPU at some point). | ||
1870 | */ | ||
1871 | if (counter->state == PERF_COUNTER_STATE_ERROR) | ||
1872 | return 0; | ||
1873 | |||
1874 | if (count < perf_counter_read_size(counter)) | ||
1875 | return -ENOSPC; | ||
1876 | |||
1877 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
1878 | mutex_lock(&counter->child_mutex); | ||
1879 | if (read_format & PERF_FORMAT_GROUP) | ||
1880 | ret = perf_counter_read_group(counter, read_format, buf); | ||
1881 | else | ||
1882 | ret = perf_counter_read_one(counter, read_format, buf); | ||
1883 | mutex_unlock(&counter->child_mutex); | ||
1884 | |||
1885 | return ret; | ||
1886 | } | ||
1887 | |||
1888 | static ssize_t | ||
1889 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | ||
1890 | { | ||
1891 | struct perf_counter *counter = file->private_data; | ||
1892 | |||
1893 | return perf_read_hw(counter, buf, count); | ||
1894 | } | ||
1895 | |||
1896 | static unsigned int perf_poll(struct file *file, poll_table *wait) | ||
1897 | { | ||
1898 | struct perf_counter *counter = file->private_data; | ||
1899 | struct perf_mmap_data *data; | ||
1900 | unsigned int events = POLL_HUP; | ||
1901 | |||
1902 | rcu_read_lock(); | ||
1903 | data = rcu_dereference(counter->data); | ||
1904 | if (data) | ||
1905 | events = atomic_xchg(&data->poll, 0); | ||
1906 | rcu_read_unlock(); | ||
1907 | |||
1908 | poll_wait(file, &counter->waitq, wait); | ||
1909 | |||
1910 | return events; | ||
1911 | } | ||
1912 | |||
1913 | static void perf_counter_reset(struct perf_counter *counter) | ||
1914 | { | ||
1915 | (void)perf_counter_read(counter); | ||
1916 | atomic64_set(&counter->count, 0); | ||
1917 | perf_counter_update_userpage(counter); | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * Holding the top-level counter's child_mutex means that any | ||
1922 | * descendant process that has inherited this counter will block | ||
1923 | * in sync_child_counter if it goes to exit, thus satisfying the | ||
1924 | * task existence requirements of perf_counter_enable/disable. | ||
1925 | */ | ||
1926 | static void perf_counter_for_each_child(struct perf_counter *counter, | ||
1927 | void (*func)(struct perf_counter *)) | ||
1928 | { | ||
1929 | struct perf_counter *child; | ||
1930 | |||
1931 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
1932 | mutex_lock(&counter->child_mutex); | ||
1933 | func(counter); | ||
1934 | list_for_each_entry(child, &counter->child_list, child_list) | ||
1935 | func(child); | ||
1936 | mutex_unlock(&counter->child_mutex); | ||
1937 | } | ||
1938 | |||
1939 | static void perf_counter_for_each(struct perf_counter *counter, | ||
1940 | void (*func)(struct perf_counter *)) | ||
1941 | { | ||
1942 | struct perf_counter_context *ctx = counter->ctx; | ||
1943 | struct perf_counter *sibling; | ||
1944 | |||
1945 | WARN_ON_ONCE(ctx->parent_ctx); | ||
1946 | mutex_lock(&ctx->mutex); | ||
1947 | counter = counter->group_leader; | ||
1948 | |||
1949 | perf_counter_for_each_child(counter, func); | ||
1950 | func(counter); | ||
1951 | list_for_each_entry(sibling, &counter->sibling_list, list_entry) | ||
1952 | perf_counter_for_each_child(counter, func); | ||
1953 | mutex_unlock(&ctx->mutex); | ||
1954 | } | ||
1955 | |||
1956 | static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) | ||
1957 | { | ||
1958 | struct perf_counter_context *ctx = counter->ctx; | ||
1959 | unsigned long size; | ||
1960 | int ret = 0; | ||
1961 | u64 value; | ||
1962 | |||
1963 | if (!counter->attr.sample_period) | ||
1964 | return -EINVAL; | ||
1965 | |||
1966 | size = copy_from_user(&value, arg, sizeof(value)); | ||
1967 | if (size != sizeof(value)) | ||
1968 | return -EFAULT; | ||
1969 | |||
1970 | if (!value) | ||
1971 | return -EINVAL; | ||
1972 | |||
1973 | spin_lock_irq(&ctx->lock); | ||
1974 | if (counter->attr.freq) { | ||
1975 | if (value > sysctl_perf_counter_sample_rate) { | ||
1976 | ret = -EINVAL; | ||
1977 | goto unlock; | ||
1978 | } | ||
1979 | |||
1980 | counter->attr.sample_freq = value; | ||
1981 | } else { | ||
1982 | counter->attr.sample_period = value; | ||
1983 | counter->hw.sample_period = value; | ||
1984 | } | ||
1985 | unlock: | ||
1986 | spin_unlock_irq(&ctx->lock); | ||
1987 | |||
1988 | return ret; | ||
1989 | } | ||
1990 | |||
1991 | int perf_counter_set_output(struct perf_counter *counter, int output_fd); | ||
1992 | |||
1993 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1994 | { | ||
1995 | struct perf_counter *counter = file->private_data; | ||
1996 | void (*func)(struct perf_counter *); | ||
1997 | u32 flags = arg; | ||
1998 | |||
1999 | switch (cmd) { | ||
2000 | case PERF_COUNTER_IOC_ENABLE: | ||
2001 | func = perf_counter_enable; | ||
2002 | break; | ||
2003 | case PERF_COUNTER_IOC_DISABLE: | ||
2004 | func = perf_counter_disable; | ||
2005 | break; | ||
2006 | case PERF_COUNTER_IOC_RESET: | ||
2007 | func = perf_counter_reset; | ||
2008 | break; | ||
2009 | |||
2010 | case PERF_COUNTER_IOC_REFRESH: | ||
2011 | return perf_counter_refresh(counter, arg); | ||
2012 | |||
2013 | case PERF_COUNTER_IOC_PERIOD: | ||
2014 | return perf_counter_period(counter, (u64 __user *)arg); | ||
2015 | |||
2016 | case PERF_COUNTER_IOC_SET_OUTPUT: | ||
2017 | return perf_counter_set_output(counter, arg); | ||
2018 | |||
2019 | default: | ||
2020 | return -ENOTTY; | ||
2021 | } | ||
2022 | |||
2023 | if (flags & PERF_IOC_FLAG_GROUP) | ||
2024 | perf_counter_for_each(counter, func); | ||
2025 | else | ||
2026 | perf_counter_for_each_child(counter, func); | ||
2027 | |||
2028 | return 0; | ||
2029 | } | ||
2030 | |||
2031 | int perf_counter_task_enable(void) | ||
2032 | { | ||
2033 | struct perf_counter *counter; | ||
2034 | |||
2035 | mutex_lock(¤t->perf_counter_mutex); | ||
2036 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | ||
2037 | perf_counter_for_each_child(counter, perf_counter_enable); | ||
2038 | mutex_unlock(¤t->perf_counter_mutex); | ||
2039 | |||
2040 | return 0; | ||
2041 | } | ||
2042 | |||
2043 | int perf_counter_task_disable(void) | ||
2044 | { | ||
2045 | struct perf_counter *counter; | ||
2046 | |||
2047 | mutex_lock(¤t->perf_counter_mutex); | ||
2048 | list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry) | ||
2049 | perf_counter_for_each_child(counter, perf_counter_disable); | ||
2050 | mutex_unlock(¤t->perf_counter_mutex); | ||
2051 | |||
2052 | return 0; | ||
2053 | } | ||
2054 | |||
2055 | #ifndef PERF_COUNTER_INDEX_OFFSET | ||
2056 | # define PERF_COUNTER_INDEX_OFFSET 0 | ||
2057 | #endif | ||
2058 | |||
2059 | static int perf_counter_index(struct perf_counter *counter) | ||
2060 | { | ||
2061 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | ||
2062 | return 0; | ||
2063 | |||
2064 | return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; | ||
2065 | } | ||
2066 | |||
2067 | /* | ||
2068 | * Callers need to ensure there can be no nesting of this function, otherwise | ||
2069 | * the seqlock logic goes bad. We can not serialize this because the arch | ||
2070 | * code calls this from NMI context. | ||
2071 | */ | ||
2072 | void perf_counter_update_userpage(struct perf_counter *counter) | ||
2073 | { | ||
2074 | struct perf_counter_mmap_page *userpg; | ||
2075 | struct perf_mmap_data *data; | ||
2076 | |||
2077 | rcu_read_lock(); | ||
2078 | data = rcu_dereference(counter->data); | ||
2079 | if (!data) | ||
2080 | goto unlock; | ||
2081 | |||
2082 | userpg = data->user_page; | ||
2083 | |||
2084 | /* | ||
2085 | * Disable preemption so as to not let the corresponding user-space | ||
2086 | * spin too long if we get preempted. | ||
2087 | */ | ||
2088 | preempt_disable(); | ||
2089 | ++userpg->lock; | ||
2090 | barrier(); | ||
2091 | userpg->index = perf_counter_index(counter); | ||
2092 | userpg->offset = atomic64_read(&counter->count); | ||
2093 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | ||
2094 | userpg->offset -= atomic64_read(&counter->hw.prev_count); | ||
2095 | |||
2096 | userpg->time_enabled = counter->total_time_enabled + | ||
2097 | atomic64_read(&counter->child_total_time_enabled); | ||
2098 | |||
2099 | userpg->time_running = counter->total_time_running + | ||
2100 | atomic64_read(&counter->child_total_time_running); | ||
2101 | |||
2102 | barrier(); | ||
2103 | ++userpg->lock; | ||
2104 | preempt_enable(); | ||
2105 | unlock: | ||
2106 | rcu_read_unlock(); | ||
2107 | } | ||
2108 | |||
2109 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
2110 | { | ||
2111 | struct perf_counter *counter = vma->vm_file->private_data; | ||
2112 | struct perf_mmap_data *data; | ||
2113 | int ret = VM_FAULT_SIGBUS; | ||
2114 | |||
2115 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | ||
2116 | if (vmf->pgoff == 0) | ||
2117 | ret = 0; | ||
2118 | return ret; | ||
2119 | } | ||
2120 | |||
2121 | rcu_read_lock(); | ||
2122 | data = rcu_dereference(counter->data); | ||
2123 | if (!data) | ||
2124 | goto unlock; | ||
2125 | |||
2126 | if (vmf->pgoff == 0) { | ||
2127 | vmf->page = virt_to_page(data->user_page); | ||
2128 | } else { | ||
2129 | int nr = vmf->pgoff - 1; | ||
2130 | |||
2131 | if ((unsigned)nr > data->nr_pages) | ||
2132 | goto unlock; | ||
2133 | |||
2134 | if (vmf->flags & FAULT_FLAG_WRITE) | ||
2135 | goto unlock; | ||
2136 | |||
2137 | vmf->page = virt_to_page(data->data_pages[nr]); | ||
2138 | } | ||
2139 | |||
2140 | get_page(vmf->page); | ||
2141 | vmf->page->mapping = vma->vm_file->f_mapping; | ||
2142 | vmf->page->index = vmf->pgoff; | ||
2143 | |||
2144 | ret = 0; | ||
2145 | unlock: | ||
2146 | rcu_read_unlock(); | ||
2147 | |||
2148 | return ret; | ||
2149 | } | ||
2150 | |||
2151 | static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) | ||
2152 | { | ||
2153 | struct perf_mmap_data *data; | ||
2154 | unsigned long size; | ||
2155 | int i; | ||
2156 | |||
2157 | WARN_ON(atomic_read(&counter->mmap_count)); | ||
2158 | |||
2159 | size = sizeof(struct perf_mmap_data); | ||
2160 | size += nr_pages * sizeof(void *); | ||
2161 | |||
2162 | data = kzalloc(size, GFP_KERNEL); | ||
2163 | if (!data) | ||
2164 | goto fail; | ||
2165 | |||
2166 | data->user_page = (void *)get_zeroed_page(GFP_KERNEL); | ||
2167 | if (!data->user_page) | ||
2168 | goto fail_user_page; | ||
2169 | |||
2170 | for (i = 0; i < nr_pages; i++) { | ||
2171 | data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); | ||
2172 | if (!data->data_pages[i]) | ||
2173 | goto fail_data_pages; | ||
2174 | } | ||
2175 | |||
2176 | data->nr_pages = nr_pages; | ||
2177 | atomic_set(&data->lock, -1); | ||
2178 | |||
2179 | rcu_assign_pointer(counter->data, data); | ||
2180 | |||
2181 | return 0; | ||
2182 | |||
2183 | fail_data_pages: | ||
2184 | for (i--; i >= 0; i--) | ||
2185 | free_page((unsigned long)data->data_pages[i]); | ||
2186 | |||
2187 | free_page((unsigned long)data->user_page); | ||
2188 | |||
2189 | fail_user_page: | ||
2190 | kfree(data); | ||
2191 | |||
2192 | fail: | ||
2193 | return -ENOMEM; | ||
2194 | } | ||
2195 | |||
2196 | static void perf_mmap_free_page(unsigned long addr) | ||
2197 | { | ||
2198 | struct page *page = virt_to_page((void *)addr); | ||
2199 | |||
2200 | page->mapping = NULL; | ||
2201 | __free_page(page); | ||
2202 | } | ||
2203 | |||
2204 | static void __perf_mmap_data_free(struct rcu_head *rcu_head) | ||
2205 | { | ||
2206 | struct perf_mmap_data *data; | ||
2207 | int i; | ||
2208 | |||
2209 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
2210 | |||
2211 | perf_mmap_free_page((unsigned long)data->user_page); | ||
2212 | for (i = 0; i < data->nr_pages; i++) | ||
2213 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | ||
2214 | |||
2215 | kfree(data); | ||
2216 | } | ||
2217 | |||
2218 | static void perf_mmap_data_free(struct perf_counter *counter) | ||
2219 | { | ||
2220 | struct perf_mmap_data *data = counter->data; | ||
2221 | |||
2222 | WARN_ON(atomic_read(&counter->mmap_count)); | ||
2223 | |||
2224 | rcu_assign_pointer(counter->data, NULL); | ||
2225 | call_rcu(&data->rcu_head, __perf_mmap_data_free); | ||
2226 | } | ||
2227 | |||
2228 | static void perf_mmap_open(struct vm_area_struct *vma) | ||
2229 | { | ||
2230 | struct perf_counter *counter = vma->vm_file->private_data; | ||
2231 | |||
2232 | atomic_inc(&counter->mmap_count); | ||
2233 | } | ||
2234 | |||
2235 | static void perf_mmap_close(struct vm_area_struct *vma) | ||
2236 | { | ||
2237 | struct perf_counter *counter = vma->vm_file->private_data; | ||
2238 | |||
2239 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
2240 | if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { | ||
2241 | struct user_struct *user = current_user(); | ||
2242 | |||
2243 | atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); | ||
2244 | vma->vm_mm->locked_vm -= counter->data->nr_locked; | ||
2245 | perf_mmap_data_free(counter); | ||
2246 | mutex_unlock(&counter->mmap_mutex); | ||
2247 | } | ||
2248 | } | ||
2249 | |||
2250 | static struct vm_operations_struct perf_mmap_vmops = { | ||
2251 | .open = perf_mmap_open, | ||
2252 | .close = perf_mmap_close, | ||
2253 | .fault = perf_mmap_fault, | ||
2254 | .page_mkwrite = perf_mmap_fault, | ||
2255 | }; | ||
2256 | |||
2257 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | ||
2258 | { | ||
2259 | struct perf_counter *counter = file->private_data; | ||
2260 | unsigned long user_locked, user_lock_limit; | ||
2261 | struct user_struct *user = current_user(); | ||
2262 | unsigned long locked, lock_limit; | ||
2263 | unsigned long vma_size; | ||
2264 | unsigned long nr_pages; | ||
2265 | long user_extra, extra; | ||
2266 | int ret = 0; | ||
2267 | |||
2268 | if (!(vma->vm_flags & VM_SHARED)) | ||
2269 | return -EINVAL; | ||
2270 | |||
2271 | vma_size = vma->vm_end - vma->vm_start; | ||
2272 | nr_pages = (vma_size / PAGE_SIZE) - 1; | ||
2273 | |||
2274 | /* | ||
2275 | * If we have data pages ensure they're a power-of-two number, so we | ||
2276 | * can do bitmasks instead of modulo. | ||
2277 | */ | ||
2278 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | ||
2279 | return -EINVAL; | ||
2280 | |||
2281 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) | ||
2282 | return -EINVAL; | ||
2283 | |||
2284 | if (vma->vm_pgoff != 0) | ||
2285 | return -EINVAL; | ||
2286 | |||
2287 | WARN_ON_ONCE(counter->ctx->parent_ctx); | ||
2288 | mutex_lock(&counter->mmap_mutex); | ||
2289 | if (counter->output) { | ||
2290 | ret = -EINVAL; | ||
2291 | goto unlock; | ||
2292 | } | ||
2293 | |||
2294 | if (atomic_inc_not_zero(&counter->mmap_count)) { | ||
2295 | if (nr_pages != counter->data->nr_pages) | ||
2296 | ret = -EINVAL; | ||
2297 | goto unlock; | ||
2298 | } | ||
2299 | |||
2300 | user_extra = nr_pages + 1; | ||
2301 | user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); | ||
2302 | |||
2303 | /* | ||
2304 | * Increase the limit linearly with more CPUs: | ||
2305 | */ | ||
2306 | user_lock_limit *= num_online_cpus(); | ||
2307 | |||
2308 | user_locked = atomic_long_read(&user->locked_vm) + user_extra; | ||
2309 | |||
2310 | extra = 0; | ||
2311 | if (user_locked > user_lock_limit) | ||
2312 | extra = user_locked - user_lock_limit; | ||
2313 | |||
2314 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | ||
2315 | lock_limit >>= PAGE_SHIFT; | ||
2316 | locked = vma->vm_mm->locked_vm + extra; | ||
2317 | |||
2318 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | ||
2319 | ret = -EPERM; | ||
2320 | goto unlock; | ||
2321 | } | ||
2322 | |||
2323 | WARN_ON(counter->data); | ||
2324 | ret = perf_mmap_data_alloc(counter, nr_pages); | ||
2325 | if (ret) | ||
2326 | goto unlock; | ||
2327 | |||
2328 | atomic_set(&counter->mmap_count, 1); | ||
2329 | atomic_long_add(user_extra, &user->locked_vm); | ||
2330 | vma->vm_mm->locked_vm += extra; | ||
2331 | counter->data->nr_locked = extra; | ||
2332 | if (vma->vm_flags & VM_WRITE) | ||
2333 | counter->data->writable = 1; | ||
2334 | |||
2335 | unlock: | ||
2336 | mutex_unlock(&counter->mmap_mutex); | ||
2337 | |||
2338 | vma->vm_flags |= VM_RESERVED; | ||
2339 | vma->vm_ops = &perf_mmap_vmops; | ||
2340 | |||
2341 | return ret; | ||
2342 | } | ||
2343 | |||
2344 | static int perf_fasync(int fd, struct file *filp, int on) | ||
2345 | { | ||
2346 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
2347 | struct perf_counter *counter = filp->private_data; | ||
2348 | int retval; | ||
2349 | |||
2350 | mutex_lock(&inode->i_mutex); | ||
2351 | retval = fasync_helper(fd, filp, on, &counter->fasync); | ||
2352 | mutex_unlock(&inode->i_mutex); | ||
2353 | |||
2354 | if (retval < 0) | ||
2355 | return retval; | ||
2356 | |||
2357 | return 0; | ||
2358 | } | ||
2359 | |||
2360 | static const struct file_operations perf_fops = { | ||
2361 | .release = perf_release, | ||
2362 | .read = perf_read, | ||
2363 | .poll = perf_poll, | ||
2364 | .unlocked_ioctl = perf_ioctl, | ||
2365 | .compat_ioctl = perf_ioctl, | ||
2366 | .mmap = perf_mmap, | ||
2367 | .fasync = perf_fasync, | ||
2368 | }; | ||
2369 | |||
2370 | /* | ||
2371 | * Perf counter wakeup | ||
2372 | * | ||
2373 | * If there's data, ensure we set the poll() state and publish everything | ||
2374 | * to user-space before waking everybody up. | ||
2375 | */ | ||
2376 | |||
2377 | void perf_counter_wakeup(struct perf_counter *counter) | ||
2378 | { | ||
2379 | wake_up_all(&counter->waitq); | ||
2380 | |||
2381 | if (counter->pending_kill) { | ||
2382 | kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); | ||
2383 | counter->pending_kill = 0; | ||
2384 | } | ||
2385 | } | ||
2386 | |||
2387 | /* | ||
2388 | * Pending wakeups | ||
2389 | * | ||
2390 | * Handle the case where we need to wakeup up from NMI (or rq->lock) context. | ||
2391 | * | ||
2392 | * The NMI bit means we cannot possibly take locks. Therefore, maintain a | ||
2393 | * single linked list and use cmpxchg() to add entries lockless. | ||
2394 | */ | ||
2395 | |||
2396 | static void perf_pending_counter(struct perf_pending_entry *entry) | ||
2397 | { | ||
2398 | struct perf_counter *counter = container_of(entry, | ||
2399 | struct perf_counter, pending); | ||
2400 | |||
2401 | if (counter->pending_disable) { | ||
2402 | counter->pending_disable = 0; | ||
2403 | __perf_counter_disable(counter); | ||
2404 | } | ||
2405 | |||
2406 | if (counter->pending_wakeup) { | ||
2407 | counter->pending_wakeup = 0; | ||
2408 | perf_counter_wakeup(counter); | ||
2409 | } | ||
2410 | } | ||
2411 | |||
2412 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | ||
2413 | |||
2414 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | ||
2415 | PENDING_TAIL, | ||
2416 | }; | ||
2417 | |||
2418 | static void perf_pending_queue(struct perf_pending_entry *entry, | ||
2419 | void (*func)(struct perf_pending_entry *)) | ||
2420 | { | ||
2421 | struct perf_pending_entry **head; | ||
2422 | |||
2423 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) | ||
2424 | return; | ||
2425 | |||
2426 | entry->func = func; | ||
2427 | |||
2428 | head = &get_cpu_var(perf_pending_head); | ||
2429 | |||
2430 | do { | ||
2431 | entry->next = *head; | ||
2432 | } while (cmpxchg(head, entry->next, entry) != entry->next); | ||
2433 | |||
2434 | set_perf_counter_pending(); | ||
2435 | |||
2436 | put_cpu_var(perf_pending_head); | ||
2437 | } | ||
2438 | |||
2439 | static int __perf_pending_run(void) | ||
2440 | { | ||
2441 | struct perf_pending_entry *list; | ||
2442 | int nr = 0; | ||
2443 | |||
2444 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); | ||
2445 | while (list != PENDING_TAIL) { | ||
2446 | void (*func)(struct perf_pending_entry *); | ||
2447 | struct perf_pending_entry *entry = list; | ||
2448 | |||
2449 | list = list->next; | ||
2450 | |||
2451 | func = entry->func; | ||
2452 | entry->next = NULL; | ||
2453 | /* | ||
2454 | * Ensure we observe the unqueue before we issue the wakeup, | ||
2455 | * so that we won't be waiting forever. | ||
2456 | * -- see perf_not_pending(). | ||
2457 | */ | ||
2458 | smp_wmb(); | ||
2459 | |||
2460 | func(entry); | ||
2461 | nr++; | ||
2462 | } | ||
2463 | |||
2464 | return nr; | ||
2465 | } | ||
2466 | |||
2467 | static inline int perf_not_pending(struct perf_counter *counter) | ||
2468 | { | ||
2469 | /* | ||
2470 | * If we flush on whatever cpu we run, there is a chance we don't | ||
2471 | * need to wait. | ||
2472 | */ | ||
2473 | get_cpu(); | ||
2474 | __perf_pending_run(); | ||
2475 | put_cpu(); | ||
2476 | |||
2477 | /* | ||
2478 | * Ensure we see the proper queue state before going to sleep | ||
2479 | * so that we do not miss the wakeup. -- see perf_pending_handle() | ||
2480 | */ | ||
2481 | smp_rmb(); | ||
2482 | return counter->pending.next == NULL; | ||
2483 | } | ||
2484 | |||
2485 | static void perf_pending_sync(struct perf_counter *counter) | ||
2486 | { | ||
2487 | wait_event(counter->waitq, perf_not_pending(counter)); | ||
2488 | } | ||
2489 | |||
2490 | void perf_counter_do_pending(void) | ||
2491 | { | ||
2492 | __perf_pending_run(); | ||
2493 | } | ||
2494 | |||
2495 | /* | ||
2496 | * Callchain support -- arch specific | ||
2497 | */ | ||
2498 | |||
2499 | __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
2500 | { | ||
2501 | return NULL; | ||
2502 | } | ||
2503 | |||
2504 | /* | ||
2505 | * Output | ||
2506 | */ | ||
2507 | |||
2508 | struct perf_output_handle { | ||
2509 | struct perf_counter *counter; | ||
2510 | struct perf_mmap_data *data; | ||
2511 | unsigned long head; | ||
2512 | unsigned long offset; | ||
2513 | int nmi; | ||
2514 | int sample; | ||
2515 | int locked; | ||
2516 | unsigned long flags; | ||
2517 | }; | ||
2518 | |||
2519 | static bool perf_output_space(struct perf_mmap_data *data, | ||
2520 | unsigned int offset, unsigned int head) | ||
2521 | { | ||
2522 | unsigned long tail; | ||
2523 | unsigned long mask; | ||
2524 | |||
2525 | if (!data->writable) | ||
2526 | return true; | ||
2527 | |||
2528 | mask = (data->nr_pages << PAGE_SHIFT) - 1; | ||
2529 | /* | ||
2530 | * Userspace could choose to issue a mb() before updating the tail | ||
2531 | * pointer. So that all reads will be completed before the write is | ||
2532 | * issued. | ||
2533 | */ | ||
2534 | tail = ACCESS_ONCE(data->user_page->data_tail); | ||
2535 | smp_rmb(); | ||
2536 | |||
2537 | offset = (offset - tail) & mask; | ||
2538 | head = (head - tail) & mask; | ||
2539 | |||
2540 | if ((int)(head - offset) < 0) | ||
2541 | return false; | ||
2542 | |||
2543 | return true; | ||
2544 | } | ||
2545 | |||
2546 | static void perf_output_wakeup(struct perf_output_handle *handle) | ||
2547 | { | ||
2548 | atomic_set(&handle->data->poll, POLL_IN); | ||
2549 | |||
2550 | if (handle->nmi) { | ||
2551 | handle->counter->pending_wakeup = 1; | ||
2552 | perf_pending_queue(&handle->counter->pending, | ||
2553 | perf_pending_counter); | ||
2554 | } else | ||
2555 | perf_counter_wakeup(handle->counter); | ||
2556 | } | ||
2557 | |||
2558 | /* | ||
2559 | * Curious locking construct. | ||
2560 | * | ||
2561 | * We need to ensure a later event doesn't publish a head when a former | ||
2562 | * event isn't done writing. However since we need to deal with NMIs we | ||
2563 | * cannot fully serialize things. | ||
2564 | * | ||
2565 | * What we do is serialize between CPUs so we only have to deal with NMI | ||
2566 | * nesting on a single CPU. | ||
2567 | * | ||
2568 | * We only publish the head (and generate a wakeup) when the outer-most | ||
2569 | * event completes. | ||
2570 | */ | ||
2571 | static void perf_output_lock(struct perf_output_handle *handle) | ||
2572 | { | ||
2573 | struct perf_mmap_data *data = handle->data; | ||
2574 | int cpu; | ||
2575 | |||
2576 | handle->locked = 0; | ||
2577 | |||
2578 | local_irq_save(handle->flags); | ||
2579 | cpu = smp_processor_id(); | ||
2580 | |||
2581 | if (in_nmi() && atomic_read(&data->lock) == cpu) | ||
2582 | return; | ||
2583 | |||
2584 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2585 | cpu_relax(); | ||
2586 | |||
2587 | handle->locked = 1; | ||
2588 | } | ||
2589 | |||
2590 | static void perf_output_unlock(struct perf_output_handle *handle) | ||
2591 | { | ||
2592 | struct perf_mmap_data *data = handle->data; | ||
2593 | unsigned long head; | ||
2594 | int cpu; | ||
2595 | |||
2596 | data->done_head = data->head; | ||
2597 | |||
2598 | if (!handle->locked) | ||
2599 | goto out; | ||
2600 | |||
2601 | again: | ||
2602 | /* | ||
2603 | * The xchg implies a full barrier that ensures all writes are done | ||
2604 | * before we publish the new head, matched by a rmb() in userspace when | ||
2605 | * reading this position. | ||
2606 | */ | ||
2607 | while ((head = atomic_long_xchg(&data->done_head, 0))) | ||
2608 | data->user_page->data_head = head; | ||
2609 | |||
2610 | /* | ||
2611 | * NMI can happen here, which means we can miss a done_head update. | ||
2612 | */ | ||
2613 | |||
2614 | cpu = atomic_xchg(&data->lock, -1); | ||
2615 | WARN_ON_ONCE(cpu != smp_processor_id()); | ||
2616 | |||
2617 | /* | ||
2618 | * Therefore we have to validate we did not indeed do so. | ||
2619 | */ | ||
2620 | if (unlikely(atomic_long_read(&data->done_head))) { | ||
2621 | /* | ||
2622 | * Since we had it locked, we can lock it again. | ||
2623 | */ | ||
2624 | while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) | ||
2625 | cpu_relax(); | ||
2626 | |||
2627 | goto again; | ||
2628 | } | ||
2629 | |||
2630 | if (atomic_xchg(&data->wakeup, 0)) | ||
2631 | perf_output_wakeup(handle); | ||
2632 | out: | ||
2633 | local_irq_restore(handle->flags); | ||
2634 | } | ||
2635 | |||
2636 | static void perf_output_copy(struct perf_output_handle *handle, | ||
2637 | const void *buf, unsigned int len) | ||
2638 | { | ||
2639 | unsigned int pages_mask; | ||
2640 | unsigned int offset; | ||
2641 | unsigned int size; | ||
2642 | void **pages; | ||
2643 | |||
2644 | offset = handle->offset; | ||
2645 | pages_mask = handle->data->nr_pages - 1; | ||
2646 | pages = handle->data->data_pages; | ||
2647 | |||
2648 | do { | ||
2649 | unsigned int page_offset; | ||
2650 | int nr; | ||
2651 | |||
2652 | nr = (offset >> PAGE_SHIFT) & pages_mask; | ||
2653 | page_offset = offset & (PAGE_SIZE - 1); | ||
2654 | size = min_t(unsigned int, PAGE_SIZE - page_offset, len); | ||
2655 | |||
2656 | memcpy(pages[nr] + page_offset, buf, size); | ||
2657 | |||
2658 | len -= size; | ||
2659 | buf += size; | ||
2660 | offset += size; | ||
2661 | } while (len); | ||
2662 | |||
2663 | handle->offset = offset; | ||
2664 | |||
2665 | /* | ||
2666 | * Check we didn't copy past our reservation window, taking the | ||
2667 | * possible unsigned int wrap into account. | ||
2668 | */ | ||
2669 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); | ||
2670 | } | ||
2671 | |||
2672 | #define perf_output_put(handle, x) \ | ||
2673 | perf_output_copy((handle), &(x), sizeof(x)) | ||
2674 | |||
2675 | static int perf_output_begin(struct perf_output_handle *handle, | ||
2676 | struct perf_counter *counter, unsigned int size, | ||
2677 | int nmi, int sample) | ||
2678 | { | ||
2679 | struct perf_counter *output_counter; | ||
2680 | struct perf_mmap_data *data; | ||
2681 | unsigned int offset, head; | ||
2682 | int have_lost; | ||
2683 | struct { | ||
2684 | struct perf_event_header header; | ||
2685 | u64 id; | ||
2686 | u64 lost; | ||
2687 | } lost_event; | ||
2688 | |||
2689 | rcu_read_lock(); | ||
2690 | /* | ||
2691 | * For inherited counters we send all the output towards the parent. | ||
2692 | */ | ||
2693 | if (counter->parent) | ||
2694 | counter = counter->parent; | ||
2695 | |||
2696 | output_counter = rcu_dereference(counter->output); | ||
2697 | if (output_counter) | ||
2698 | counter = output_counter; | ||
2699 | |||
2700 | data = rcu_dereference(counter->data); | ||
2701 | if (!data) | ||
2702 | goto out; | ||
2703 | |||
2704 | handle->data = data; | ||
2705 | handle->counter = counter; | ||
2706 | handle->nmi = nmi; | ||
2707 | handle->sample = sample; | ||
2708 | |||
2709 | if (!data->nr_pages) | ||
2710 | goto fail; | ||
2711 | |||
2712 | have_lost = atomic_read(&data->lost); | ||
2713 | if (have_lost) | ||
2714 | size += sizeof(lost_event); | ||
2715 | |||
2716 | perf_output_lock(handle); | ||
2717 | |||
2718 | do { | ||
2719 | offset = head = atomic_long_read(&data->head); | ||
2720 | head += size; | ||
2721 | if (unlikely(!perf_output_space(data, offset, head))) | ||
2722 | goto fail; | ||
2723 | } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); | ||
2724 | |||
2725 | handle->offset = offset; | ||
2726 | handle->head = head; | ||
2727 | |||
2728 | if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) | ||
2729 | atomic_set(&data->wakeup, 1); | ||
2730 | |||
2731 | if (have_lost) { | ||
2732 | lost_event.header.type = PERF_EVENT_LOST; | ||
2733 | lost_event.header.misc = 0; | ||
2734 | lost_event.header.size = sizeof(lost_event); | ||
2735 | lost_event.id = counter->id; | ||
2736 | lost_event.lost = atomic_xchg(&data->lost, 0); | ||
2737 | |||
2738 | perf_output_put(handle, lost_event); | ||
2739 | } | ||
2740 | |||
2741 | return 0; | ||
2742 | |||
2743 | fail: | ||
2744 | atomic_inc(&data->lost); | ||
2745 | perf_output_unlock(handle); | ||
2746 | out: | ||
2747 | rcu_read_unlock(); | ||
2748 | |||
2749 | return -ENOSPC; | ||
2750 | } | ||
2751 | |||
2752 | static void perf_output_end(struct perf_output_handle *handle) | ||
2753 | { | ||
2754 | struct perf_counter *counter = handle->counter; | ||
2755 | struct perf_mmap_data *data = handle->data; | ||
2756 | |||
2757 | int wakeup_events = counter->attr.wakeup_events; | ||
2758 | |||
2759 | if (handle->sample && wakeup_events) { | ||
2760 | int events = atomic_inc_return(&data->events); | ||
2761 | if (events >= wakeup_events) { | ||
2762 | atomic_sub(wakeup_events, &data->events); | ||
2763 | atomic_set(&data->wakeup, 1); | ||
2764 | } | ||
2765 | } | ||
2766 | |||
2767 | perf_output_unlock(handle); | ||
2768 | rcu_read_unlock(); | ||
2769 | } | ||
2770 | |||
2771 | static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) | ||
2772 | { | ||
2773 | /* | ||
2774 | * only top level counters have the pid namespace they were created in | ||
2775 | */ | ||
2776 | if (counter->parent) | ||
2777 | counter = counter->parent; | ||
2778 | |||
2779 | return task_tgid_nr_ns(p, counter->ns); | ||
2780 | } | ||
2781 | |||
2782 | static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) | ||
2783 | { | ||
2784 | /* | ||
2785 | * only top level counters have the pid namespace they were created in | ||
2786 | */ | ||
2787 | if (counter->parent) | ||
2788 | counter = counter->parent; | ||
2789 | |||
2790 | return task_pid_nr_ns(p, counter->ns); | ||
2791 | } | ||
2792 | |||
2793 | static void perf_output_read_one(struct perf_output_handle *handle, | ||
2794 | struct perf_counter *counter) | ||
2795 | { | ||
2796 | u64 read_format = counter->attr.read_format; | ||
2797 | u64 values[4]; | ||
2798 | int n = 0; | ||
2799 | |||
2800 | values[n++] = atomic64_read(&counter->count); | ||
2801 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2802 | values[n++] = counter->total_time_enabled + | ||
2803 | atomic64_read(&counter->child_total_time_enabled); | ||
2804 | } | ||
2805 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2806 | values[n++] = counter->total_time_running + | ||
2807 | atomic64_read(&counter->child_total_time_running); | ||
2808 | } | ||
2809 | if (read_format & PERF_FORMAT_ID) | ||
2810 | values[n++] = primary_counter_id(counter); | ||
2811 | |||
2812 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2813 | } | ||
2814 | |||
2815 | /* | ||
2816 | * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. | ||
2817 | */ | ||
2818 | static void perf_output_read_group(struct perf_output_handle *handle, | ||
2819 | struct perf_counter *counter) | ||
2820 | { | ||
2821 | struct perf_counter *leader = counter->group_leader, *sub; | ||
2822 | u64 read_format = counter->attr.read_format; | ||
2823 | u64 values[5]; | ||
2824 | int n = 0; | ||
2825 | |||
2826 | values[n++] = 1 + leader->nr_siblings; | ||
2827 | |||
2828 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2829 | values[n++] = leader->total_time_enabled; | ||
2830 | |||
2831 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2832 | values[n++] = leader->total_time_running; | ||
2833 | |||
2834 | if (leader != counter) | ||
2835 | leader->pmu->read(leader); | ||
2836 | |||
2837 | values[n++] = atomic64_read(&leader->count); | ||
2838 | if (read_format & PERF_FORMAT_ID) | ||
2839 | values[n++] = primary_counter_id(leader); | ||
2840 | |||
2841 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2842 | |||
2843 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
2844 | n = 0; | ||
2845 | |||
2846 | if (sub != counter) | ||
2847 | sub->pmu->read(sub); | ||
2848 | |||
2849 | values[n++] = atomic64_read(&sub->count); | ||
2850 | if (read_format & PERF_FORMAT_ID) | ||
2851 | values[n++] = primary_counter_id(sub); | ||
2852 | |||
2853 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2854 | } | ||
2855 | } | ||
2856 | |||
2857 | static void perf_output_read(struct perf_output_handle *handle, | ||
2858 | struct perf_counter *counter) | ||
2859 | { | ||
2860 | if (counter->attr.read_format & PERF_FORMAT_GROUP) | ||
2861 | perf_output_read_group(handle, counter); | ||
2862 | else | ||
2863 | perf_output_read_one(handle, counter); | ||
2864 | } | ||
2865 | |||
2866 | void perf_counter_output(struct perf_counter *counter, int nmi, | ||
2867 | struct perf_sample_data *data) | ||
2868 | { | ||
2869 | int ret; | ||
2870 | u64 sample_type = counter->attr.sample_type; | ||
2871 | struct perf_output_handle handle; | ||
2872 | struct perf_event_header header; | ||
2873 | u64 ip; | ||
2874 | struct { | ||
2875 | u32 pid, tid; | ||
2876 | } tid_entry; | ||
2877 | struct perf_callchain_entry *callchain = NULL; | ||
2878 | int callchain_size = 0; | ||
2879 | u64 time; | ||
2880 | struct { | ||
2881 | u32 cpu, reserved; | ||
2882 | } cpu_entry; | ||
2883 | |||
2884 | header.type = PERF_EVENT_SAMPLE; | ||
2885 | header.size = sizeof(header); | ||
2886 | |||
2887 | header.misc = 0; | ||
2888 | header.misc |= perf_misc_flags(data->regs); | ||
2889 | |||
2890 | if (sample_type & PERF_SAMPLE_IP) { | ||
2891 | ip = perf_instruction_pointer(data->regs); | ||
2892 | header.size += sizeof(ip); | ||
2893 | } | ||
2894 | |||
2895 | if (sample_type & PERF_SAMPLE_TID) { | ||
2896 | /* namespace issues */ | ||
2897 | tid_entry.pid = perf_counter_pid(counter, current); | ||
2898 | tid_entry.tid = perf_counter_tid(counter, current); | ||
2899 | |||
2900 | header.size += sizeof(tid_entry); | ||
2901 | } | ||
2902 | |||
2903 | if (sample_type & PERF_SAMPLE_TIME) { | ||
2904 | /* | ||
2905 | * Maybe do better on x86 and provide cpu_clock_nmi() | ||
2906 | */ | ||
2907 | time = sched_clock(); | ||
2908 | |||
2909 | header.size += sizeof(u64); | ||
2910 | } | ||
2911 | |||
2912 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2913 | header.size += sizeof(u64); | ||
2914 | |||
2915 | if (sample_type & PERF_SAMPLE_ID) | ||
2916 | header.size += sizeof(u64); | ||
2917 | |||
2918 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2919 | header.size += sizeof(u64); | ||
2920 | |||
2921 | if (sample_type & PERF_SAMPLE_CPU) { | ||
2922 | header.size += sizeof(cpu_entry); | ||
2923 | |||
2924 | cpu_entry.cpu = raw_smp_processor_id(); | ||
2925 | cpu_entry.reserved = 0; | ||
2926 | } | ||
2927 | |||
2928 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2929 | header.size += sizeof(u64); | ||
2930 | |||
2931 | if (sample_type & PERF_SAMPLE_READ) | ||
2932 | header.size += perf_counter_read_size(counter); | ||
2933 | |||
2934 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
2935 | callchain = perf_callchain(data->regs); | ||
2936 | |||
2937 | if (callchain) { | ||
2938 | callchain_size = (1 + callchain->nr) * sizeof(u64); | ||
2939 | header.size += callchain_size; | ||
2940 | } else | ||
2941 | header.size += sizeof(u64); | ||
2942 | } | ||
2943 | |||
2944 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2945 | int size = sizeof(u32); | ||
2946 | |||
2947 | if (data->raw) | ||
2948 | size += data->raw->size; | ||
2949 | else | ||
2950 | size += sizeof(u32); | ||
2951 | |||
2952 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | ||
2953 | header.size += size; | ||
2954 | } | ||
2955 | |||
2956 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | ||
2957 | if (ret) | ||
2958 | return; | ||
2959 | |||
2960 | perf_output_put(&handle, header); | ||
2961 | |||
2962 | if (sample_type & PERF_SAMPLE_IP) | ||
2963 | perf_output_put(&handle, ip); | ||
2964 | |||
2965 | if (sample_type & PERF_SAMPLE_TID) | ||
2966 | perf_output_put(&handle, tid_entry); | ||
2967 | |||
2968 | if (sample_type & PERF_SAMPLE_TIME) | ||
2969 | perf_output_put(&handle, time); | ||
2970 | |||
2971 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2972 | perf_output_put(&handle, data->addr); | ||
2973 | |||
2974 | if (sample_type & PERF_SAMPLE_ID) { | ||
2975 | u64 id = primary_counter_id(counter); | ||
2976 | |||
2977 | perf_output_put(&handle, id); | ||
2978 | } | ||
2979 | |||
2980 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2981 | perf_output_put(&handle, counter->id); | ||
2982 | |||
2983 | if (sample_type & PERF_SAMPLE_CPU) | ||
2984 | perf_output_put(&handle, cpu_entry); | ||
2985 | |||
2986 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2987 | perf_output_put(&handle, data->period); | ||
2988 | |||
2989 | if (sample_type & PERF_SAMPLE_READ) | ||
2990 | perf_output_read(&handle, counter); | ||
2991 | |||
2992 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
2993 | if (callchain) | ||
2994 | perf_output_copy(&handle, callchain, callchain_size); | ||
2995 | else { | ||
2996 | u64 nr = 0; | ||
2997 | perf_output_put(&handle, nr); | ||
2998 | } | ||
2999 | } | ||
3000 | |||
3001 | if (sample_type & PERF_SAMPLE_RAW) { | ||
3002 | if (data->raw) { | ||
3003 | perf_output_put(&handle, data->raw->size); | ||
3004 | perf_output_copy(&handle, data->raw->data, data->raw->size); | ||
3005 | } else { | ||
3006 | struct { | ||
3007 | u32 size; | ||
3008 | u32 data; | ||
3009 | } raw = { | ||
3010 | .size = sizeof(u32), | ||
3011 | .data = 0, | ||
3012 | }; | ||
3013 | perf_output_put(&handle, raw); | ||
3014 | } | ||
3015 | } | ||
3016 | |||
3017 | perf_output_end(&handle); | ||
3018 | } | ||
3019 | |||
3020 | /* | ||
3021 | * read event | ||
3022 | */ | ||
3023 | |||
3024 | struct perf_read_event { | ||
3025 | struct perf_event_header header; | ||
3026 | |||
3027 | u32 pid; | ||
3028 | u32 tid; | ||
3029 | }; | ||
3030 | |||
3031 | static void | ||
3032 | perf_counter_read_event(struct perf_counter *counter, | ||
3033 | struct task_struct *task) | ||
3034 | { | ||
3035 | struct perf_output_handle handle; | ||
3036 | struct perf_read_event event = { | ||
3037 | .header = { | ||
3038 | .type = PERF_EVENT_READ, | ||
3039 | .misc = 0, | ||
3040 | .size = sizeof(event) + perf_counter_read_size(counter), | ||
3041 | }, | ||
3042 | .pid = perf_counter_pid(counter, task), | ||
3043 | .tid = perf_counter_tid(counter, task), | ||
3044 | }; | ||
3045 | int ret; | ||
3046 | |||
3047 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | ||
3048 | if (ret) | ||
3049 | return; | ||
3050 | |||
3051 | perf_output_put(&handle, event); | ||
3052 | perf_output_read(&handle, counter); | ||
3053 | |||
3054 | perf_output_end(&handle); | ||
3055 | } | ||
3056 | |||
3057 | /* | ||
3058 | * task tracking -- fork/exit | ||
3059 | * | ||
3060 | * enabled by: attr.comm | attr.mmap | attr.task | ||
3061 | */ | ||
3062 | |||
3063 | struct perf_task_event { | ||
3064 | struct task_struct *task; | ||
3065 | struct perf_counter_context *task_ctx; | ||
3066 | |||
3067 | struct { | ||
3068 | struct perf_event_header header; | ||
3069 | |||
3070 | u32 pid; | ||
3071 | u32 ppid; | ||
3072 | u32 tid; | ||
3073 | u32 ptid; | ||
3074 | } event; | ||
3075 | }; | ||
3076 | |||
3077 | static void perf_counter_task_output(struct perf_counter *counter, | ||
3078 | struct perf_task_event *task_event) | ||
3079 | { | ||
3080 | struct perf_output_handle handle; | ||
3081 | int size = task_event->event.header.size; | ||
3082 | struct task_struct *task = task_event->task; | ||
3083 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | ||
3084 | |||
3085 | if (ret) | ||
3086 | return; | ||
3087 | |||
3088 | task_event->event.pid = perf_counter_pid(counter, task); | ||
3089 | task_event->event.ppid = perf_counter_pid(counter, current); | ||
3090 | |||
3091 | task_event->event.tid = perf_counter_tid(counter, task); | ||
3092 | task_event->event.ptid = perf_counter_tid(counter, current); | ||
3093 | |||
3094 | perf_output_put(&handle, task_event->event); | ||
3095 | perf_output_end(&handle); | ||
3096 | } | ||
3097 | |||
3098 | static int perf_counter_task_match(struct perf_counter *counter) | ||
3099 | { | ||
3100 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) | ||
3101 | return 1; | ||
3102 | |||
3103 | return 0; | ||
3104 | } | ||
3105 | |||
3106 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, | ||
3107 | struct perf_task_event *task_event) | ||
3108 | { | ||
3109 | struct perf_counter *counter; | ||
3110 | |||
3111 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3112 | return; | ||
3113 | |||
3114 | rcu_read_lock(); | ||
3115 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3116 | if (perf_counter_task_match(counter)) | ||
3117 | perf_counter_task_output(counter, task_event); | ||
3118 | } | ||
3119 | rcu_read_unlock(); | ||
3120 | } | ||
3121 | |||
3122 | static void perf_counter_task_event(struct perf_task_event *task_event) | ||
3123 | { | ||
3124 | struct perf_cpu_context *cpuctx; | ||
3125 | struct perf_counter_context *ctx = task_event->task_ctx; | ||
3126 | |||
3127 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3128 | perf_counter_task_ctx(&cpuctx->ctx, task_event); | ||
3129 | put_cpu_var(perf_cpu_context); | ||
3130 | |||
3131 | rcu_read_lock(); | ||
3132 | if (!ctx) | ||
3133 | ctx = rcu_dereference(task_event->task->perf_counter_ctxp); | ||
3134 | if (ctx) | ||
3135 | perf_counter_task_ctx(ctx, task_event); | ||
3136 | rcu_read_unlock(); | ||
3137 | } | ||
3138 | |||
3139 | static void perf_counter_task(struct task_struct *task, | ||
3140 | struct perf_counter_context *task_ctx, | ||
3141 | int new) | ||
3142 | { | ||
3143 | struct perf_task_event task_event; | ||
3144 | |||
3145 | if (!atomic_read(&nr_comm_counters) && | ||
3146 | !atomic_read(&nr_mmap_counters) && | ||
3147 | !atomic_read(&nr_task_counters)) | ||
3148 | return; | ||
3149 | |||
3150 | task_event = (struct perf_task_event){ | ||
3151 | .task = task, | ||
3152 | .task_ctx = task_ctx, | ||
3153 | .event = { | ||
3154 | .header = { | ||
3155 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, | ||
3156 | .misc = 0, | ||
3157 | .size = sizeof(task_event.event), | ||
3158 | }, | ||
3159 | /* .pid */ | ||
3160 | /* .ppid */ | ||
3161 | /* .tid */ | ||
3162 | /* .ptid */ | ||
3163 | }, | ||
3164 | }; | ||
3165 | |||
3166 | perf_counter_task_event(&task_event); | ||
3167 | } | ||
3168 | |||
3169 | void perf_counter_fork(struct task_struct *task) | ||
3170 | { | ||
3171 | perf_counter_task(task, NULL, 1); | ||
3172 | } | ||
3173 | |||
3174 | /* | ||
3175 | * comm tracking | ||
3176 | */ | ||
3177 | |||
3178 | struct perf_comm_event { | ||
3179 | struct task_struct *task; | ||
3180 | char *comm; | ||
3181 | int comm_size; | ||
3182 | |||
3183 | struct { | ||
3184 | struct perf_event_header header; | ||
3185 | |||
3186 | u32 pid; | ||
3187 | u32 tid; | ||
3188 | } event; | ||
3189 | }; | ||
3190 | |||
3191 | static void perf_counter_comm_output(struct perf_counter *counter, | ||
3192 | struct perf_comm_event *comm_event) | ||
3193 | { | ||
3194 | struct perf_output_handle handle; | ||
3195 | int size = comm_event->event.header.size; | ||
3196 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | ||
3197 | |||
3198 | if (ret) | ||
3199 | return; | ||
3200 | |||
3201 | comm_event->event.pid = perf_counter_pid(counter, comm_event->task); | ||
3202 | comm_event->event.tid = perf_counter_tid(counter, comm_event->task); | ||
3203 | |||
3204 | perf_output_put(&handle, comm_event->event); | ||
3205 | perf_output_copy(&handle, comm_event->comm, | ||
3206 | comm_event->comm_size); | ||
3207 | perf_output_end(&handle); | ||
3208 | } | ||
3209 | |||
3210 | static int perf_counter_comm_match(struct perf_counter *counter) | ||
3211 | { | ||
3212 | if (counter->attr.comm) | ||
3213 | return 1; | ||
3214 | |||
3215 | return 0; | ||
3216 | } | ||
3217 | |||
3218 | static void perf_counter_comm_ctx(struct perf_counter_context *ctx, | ||
3219 | struct perf_comm_event *comm_event) | ||
3220 | { | ||
3221 | struct perf_counter *counter; | ||
3222 | |||
3223 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3224 | return; | ||
3225 | |||
3226 | rcu_read_lock(); | ||
3227 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3228 | if (perf_counter_comm_match(counter)) | ||
3229 | perf_counter_comm_output(counter, comm_event); | ||
3230 | } | ||
3231 | rcu_read_unlock(); | ||
3232 | } | ||
3233 | |||
3234 | static void perf_counter_comm_event(struct perf_comm_event *comm_event) | ||
3235 | { | ||
3236 | struct perf_cpu_context *cpuctx; | ||
3237 | struct perf_counter_context *ctx; | ||
3238 | unsigned int size; | ||
3239 | char comm[TASK_COMM_LEN]; | ||
3240 | |||
3241 | memset(comm, 0, sizeof(comm)); | ||
3242 | strncpy(comm, comm_event->task->comm, sizeof(comm)); | ||
3243 | size = ALIGN(strlen(comm)+1, sizeof(u64)); | ||
3244 | |||
3245 | comm_event->comm = comm; | ||
3246 | comm_event->comm_size = size; | ||
3247 | |||
3248 | comm_event->event.header.size = sizeof(comm_event->event) + size; | ||
3249 | |||
3250 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3251 | perf_counter_comm_ctx(&cpuctx->ctx, comm_event); | ||
3252 | put_cpu_var(perf_cpu_context); | ||
3253 | |||
3254 | rcu_read_lock(); | ||
3255 | /* | ||
3256 | * doesn't really matter which of the child contexts the | ||
3257 | * events ends up in. | ||
3258 | */ | ||
3259 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
3260 | if (ctx) | ||
3261 | perf_counter_comm_ctx(ctx, comm_event); | ||
3262 | rcu_read_unlock(); | ||
3263 | } | ||
3264 | |||
3265 | void perf_counter_comm(struct task_struct *task) | ||
3266 | { | ||
3267 | struct perf_comm_event comm_event; | ||
3268 | |||
3269 | if (task->perf_counter_ctxp) | ||
3270 | perf_counter_enable_on_exec(task); | ||
3271 | |||
3272 | if (!atomic_read(&nr_comm_counters)) | ||
3273 | return; | ||
3274 | |||
3275 | comm_event = (struct perf_comm_event){ | ||
3276 | .task = task, | ||
3277 | /* .comm */ | ||
3278 | /* .comm_size */ | ||
3279 | .event = { | ||
3280 | .header = { | ||
3281 | .type = PERF_EVENT_COMM, | ||
3282 | .misc = 0, | ||
3283 | /* .size */ | ||
3284 | }, | ||
3285 | /* .pid */ | ||
3286 | /* .tid */ | ||
3287 | }, | ||
3288 | }; | ||
3289 | |||
3290 | perf_counter_comm_event(&comm_event); | ||
3291 | } | ||
3292 | |||
3293 | /* | ||
3294 | * mmap tracking | ||
3295 | */ | ||
3296 | |||
3297 | struct perf_mmap_event { | ||
3298 | struct vm_area_struct *vma; | ||
3299 | |||
3300 | const char *file_name; | ||
3301 | int file_size; | ||
3302 | |||
3303 | struct { | ||
3304 | struct perf_event_header header; | ||
3305 | |||
3306 | u32 pid; | ||
3307 | u32 tid; | ||
3308 | u64 start; | ||
3309 | u64 len; | ||
3310 | u64 pgoff; | ||
3311 | } event; | ||
3312 | }; | ||
3313 | |||
3314 | static void perf_counter_mmap_output(struct perf_counter *counter, | ||
3315 | struct perf_mmap_event *mmap_event) | ||
3316 | { | ||
3317 | struct perf_output_handle handle; | ||
3318 | int size = mmap_event->event.header.size; | ||
3319 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | ||
3320 | |||
3321 | if (ret) | ||
3322 | return; | ||
3323 | |||
3324 | mmap_event->event.pid = perf_counter_pid(counter, current); | ||
3325 | mmap_event->event.tid = perf_counter_tid(counter, current); | ||
3326 | |||
3327 | perf_output_put(&handle, mmap_event->event); | ||
3328 | perf_output_copy(&handle, mmap_event->file_name, | ||
3329 | mmap_event->file_size); | ||
3330 | perf_output_end(&handle); | ||
3331 | } | ||
3332 | |||
3333 | static int perf_counter_mmap_match(struct perf_counter *counter, | ||
3334 | struct perf_mmap_event *mmap_event) | ||
3335 | { | ||
3336 | if (counter->attr.mmap) | ||
3337 | return 1; | ||
3338 | |||
3339 | return 0; | ||
3340 | } | ||
3341 | |||
3342 | static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, | ||
3343 | struct perf_mmap_event *mmap_event) | ||
3344 | { | ||
3345 | struct perf_counter *counter; | ||
3346 | |||
3347 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3348 | return; | ||
3349 | |||
3350 | rcu_read_lock(); | ||
3351 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3352 | if (perf_counter_mmap_match(counter, mmap_event)) | ||
3353 | perf_counter_mmap_output(counter, mmap_event); | ||
3354 | } | ||
3355 | rcu_read_unlock(); | ||
3356 | } | ||
3357 | |||
3358 | static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) | ||
3359 | { | ||
3360 | struct perf_cpu_context *cpuctx; | ||
3361 | struct perf_counter_context *ctx; | ||
3362 | struct vm_area_struct *vma = mmap_event->vma; | ||
3363 | struct file *file = vma->vm_file; | ||
3364 | unsigned int size; | ||
3365 | char tmp[16]; | ||
3366 | char *buf = NULL; | ||
3367 | const char *name; | ||
3368 | |||
3369 | memset(tmp, 0, sizeof(tmp)); | ||
3370 | |||
3371 | if (file) { | ||
3372 | /* | ||
3373 | * d_path works from the end of the buffer backwards, so we | ||
3374 | * need to add enough zero bytes after the string to handle | ||
3375 | * the 64bit alignment we do later. | ||
3376 | */ | ||
3377 | buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); | ||
3378 | if (!buf) { | ||
3379 | name = strncpy(tmp, "//enomem", sizeof(tmp)); | ||
3380 | goto got_name; | ||
3381 | } | ||
3382 | name = d_path(&file->f_path, buf, PATH_MAX); | ||
3383 | if (IS_ERR(name)) { | ||
3384 | name = strncpy(tmp, "//toolong", sizeof(tmp)); | ||
3385 | goto got_name; | ||
3386 | } | ||
3387 | } else { | ||
3388 | if (arch_vma_name(mmap_event->vma)) { | ||
3389 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | ||
3390 | sizeof(tmp)); | ||
3391 | goto got_name; | ||
3392 | } | ||
3393 | |||
3394 | if (!vma->vm_mm) { | ||
3395 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | ||
3396 | goto got_name; | ||
3397 | } | ||
3398 | |||
3399 | name = strncpy(tmp, "//anon", sizeof(tmp)); | ||
3400 | goto got_name; | ||
3401 | } | ||
3402 | |||
3403 | got_name: | ||
3404 | size = ALIGN(strlen(name)+1, sizeof(u64)); | ||
3405 | |||
3406 | mmap_event->file_name = name; | ||
3407 | mmap_event->file_size = size; | ||
3408 | |||
3409 | mmap_event->event.header.size = sizeof(mmap_event->event) + size; | ||
3410 | |||
3411 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
3412 | perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); | ||
3413 | put_cpu_var(perf_cpu_context); | ||
3414 | |||
3415 | rcu_read_lock(); | ||
3416 | /* | ||
3417 | * doesn't really matter which of the child contexts the | ||
3418 | * events ends up in. | ||
3419 | */ | ||
3420 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
3421 | if (ctx) | ||
3422 | perf_counter_mmap_ctx(ctx, mmap_event); | ||
3423 | rcu_read_unlock(); | ||
3424 | |||
3425 | kfree(buf); | ||
3426 | } | ||
3427 | |||
3428 | void __perf_counter_mmap(struct vm_area_struct *vma) | ||
3429 | { | ||
3430 | struct perf_mmap_event mmap_event; | ||
3431 | |||
3432 | if (!atomic_read(&nr_mmap_counters)) | ||
3433 | return; | ||
3434 | |||
3435 | mmap_event = (struct perf_mmap_event){ | ||
3436 | .vma = vma, | ||
3437 | /* .file_name */ | ||
3438 | /* .file_size */ | ||
3439 | .event = { | ||
3440 | .header = { | ||
3441 | .type = PERF_EVENT_MMAP, | ||
3442 | .misc = 0, | ||
3443 | /* .size */ | ||
3444 | }, | ||
3445 | /* .pid */ | ||
3446 | /* .tid */ | ||
3447 | .start = vma->vm_start, | ||
3448 | .len = vma->vm_end - vma->vm_start, | ||
3449 | .pgoff = vma->vm_pgoff, | ||
3450 | }, | ||
3451 | }; | ||
3452 | |||
3453 | perf_counter_mmap_event(&mmap_event); | ||
3454 | } | ||
3455 | |||
3456 | /* | ||
3457 | * IRQ throttle logging | ||
3458 | */ | ||
3459 | |||
3460 | static void perf_log_throttle(struct perf_counter *counter, int enable) | ||
3461 | { | ||
3462 | struct perf_output_handle handle; | ||
3463 | int ret; | ||
3464 | |||
3465 | struct { | ||
3466 | struct perf_event_header header; | ||
3467 | u64 time; | ||
3468 | u64 id; | ||
3469 | u64 stream_id; | ||
3470 | } throttle_event = { | ||
3471 | .header = { | ||
3472 | .type = PERF_EVENT_THROTTLE, | ||
3473 | .misc = 0, | ||
3474 | .size = sizeof(throttle_event), | ||
3475 | }, | ||
3476 | .time = sched_clock(), | ||
3477 | .id = primary_counter_id(counter), | ||
3478 | .stream_id = counter->id, | ||
3479 | }; | ||
3480 | |||
3481 | if (enable) | ||
3482 | throttle_event.header.type = PERF_EVENT_UNTHROTTLE; | ||
3483 | |||
3484 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); | ||
3485 | if (ret) | ||
3486 | return; | ||
3487 | |||
3488 | perf_output_put(&handle, throttle_event); | ||
3489 | perf_output_end(&handle); | ||
3490 | } | ||
3491 | |||
3492 | /* | ||
3493 | * Generic counter overflow handling, sampling. | ||
3494 | */ | ||
3495 | |||
3496 | int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
3497 | struct perf_sample_data *data) | ||
3498 | { | ||
3499 | int events = atomic_read(&counter->event_limit); | ||
3500 | int throttle = counter->pmu->unthrottle != NULL; | ||
3501 | struct hw_perf_counter *hwc = &counter->hw; | ||
3502 | int ret = 0; | ||
3503 | |||
3504 | if (!throttle) { | ||
3505 | hwc->interrupts++; | ||
3506 | } else { | ||
3507 | if (hwc->interrupts != MAX_INTERRUPTS) { | ||
3508 | hwc->interrupts++; | ||
3509 | if (HZ * hwc->interrupts > | ||
3510 | (u64)sysctl_perf_counter_sample_rate) { | ||
3511 | hwc->interrupts = MAX_INTERRUPTS; | ||
3512 | perf_log_throttle(counter, 0); | ||
3513 | ret = 1; | ||
3514 | } | ||
3515 | } else { | ||
3516 | /* | ||
3517 | * Keep re-disabling counters even though on the previous | ||
3518 | * pass we disabled it - just in case we raced with a | ||
3519 | * sched-in and the counter got enabled again: | ||
3520 | */ | ||
3521 | ret = 1; | ||
3522 | } | ||
3523 | } | ||
3524 | |||
3525 | if (counter->attr.freq) { | ||
3526 | u64 now = sched_clock(); | ||
3527 | s64 delta = now - hwc->freq_stamp; | ||
3528 | |||
3529 | hwc->freq_stamp = now; | ||
3530 | |||
3531 | if (delta > 0 && delta < TICK_NSEC) | ||
3532 | perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); | ||
3533 | } | ||
3534 | |||
3535 | /* | ||
3536 | * XXX event_limit might not quite work as expected on inherited | ||
3537 | * counters | ||
3538 | */ | ||
3539 | |||
3540 | counter->pending_kill = POLL_IN; | ||
3541 | if (events && atomic_dec_and_test(&counter->event_limit)) { | ||
3542 | ret = 1; | ||
3543 | counter->pending_kill = POLL_HUP; | ||
3544 | if (nmi) { | ||
3545 | counter->pending_disable = 1; | ||
3546 | perf_pending_queue(&counter->pending, | ||
3547 | perf_pending_counter); | ||
3548 | } else | ||
3549 | perf_counter_disable(counter); | ||
3550 | } | ||
3551 | |||
3552 | perf_counter_output(counter, nmi, data); | ||
3553 | return ret; | ||
3554 | } | ||
3555 | |||
3556 | /* | ||
3557 | * Generic software counter infrastructure | ||
3558 | */ | ||
3559 | |||
3560 | /* | ||
3561 | * We directly increment counter->count and keep a second value in | ||
3562 | * counter->hw.period_left to count intervals. This period counter | ||
3563 | * is kept in the range [-sample_period, 0] so that we can use the | ||
3564 | * sign as trigger. | ||
3565 | */ | ||
3566 | |||
3567 | static u64 perf_swcounter_set_period(struct perf_counter *counter) | ||
3568 | { | ||
3569 | struct hw_perf_counter *hwc = &counter->hw; | ||
3570 | u64 period = hwc->last_period; | ||
3571 | u64 nr, offset; | ||
3572 | s64 old, val; | ||
3573 | |||
3574 | hwc->last_period = hwc->sample_period; | ||
3575 | |||
3576 | again: | ||
3577 | old = val = atomic64_read(&hwc->period_left); | ||
3578 | if (val < 0) | ||
3579 | return 0; | ||
3580 | |||
3581 | nr = div64_u64(period + val, period); | ||
3582 | offset = nr * period; | ||
3583 | val -= offset; | ||
3584 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | ||
3585 | goto again; | ||
3586 | |||
3587 | return nr; | ||
3588 | } | ||
3589 | |||
3590 | static void perf_swcounter_overflow(struct perf_counter *counter, | ||
3591 | int nmi, struct perf_sample_data *data) | ||
3592 | { | ||
3593 | struct hw_perf_counter *hwc = &counter->hw; | ||
3594 | u64 overflow; | ||
3595 | |||
3596 | data->period = counter->hw.last_period; | ||
3597 | overflow = perf_swcounter_set_period(counter); | ||
3598 | |||
3599 | if (hwc->interrupts == MAX_INTERRUPTS) | ||
3600 | return; | ||
3601 | |||
3602 | for (; overflow; overflow--) { | ||
3603 | if (perf_counter_overflow(counter, nmi, data)) { | ||
3604 | /* | ||
3605 | * We inhibit the overflow from happening when | ||
3606 | * hwc->interrupts == MAX_INTERRUPTS. | ||
3607 | */ | ||
3608 | break; | ||
3609 | } | ||
3610 | } | ||
3611 | } | ||
3612 | |||
3613 | static void perf_swcounter_unthrottle(struct perf_counter *counter) | ||
3614 | { | ||
3615 | /* | ||
3616 | * Nothing to do, we already reset hwc->interrupts. | ||
3617 | */ | ||
3618 | } | ||
3619 | |||
3620 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | ||
3621 | int nmi, struct perf_sample_data *data) | ||
3622 | { | ||
3623 | struct hw_perf_counter *hwc = &counter->hw; | ||
3624 | |||
3625 | atomic64_add(nr, &counter->count); | ||
3626 | |||
3627 | if (!hwc->sample_period) | ||
3628 | return; | ||
3629 | |||
3630 | if (!data->regs) | ||
3631 | return; | ||
3632 | |||
3633 | if (!atomic64_add_negative(nr, &hwc->period_left)) | ||
3634 | perf_swcounter_overflow(counter, nmi, data); | ||
3635 | } | ||
3636 | |||
3637 | static int perf_swcounter_is_counting(struct perf_counter *counter) | ||
3638 | { | ||
3639 | /* | ||
3640 | * The counter is active, we're good! | ||
3641 | */ | ||
3642 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | ||
3643 | return 1; | ||
3644 | |||
3645 | /* | ||
3646 | * The counter is off/error, not counting. | ||
3647 | */ | ||
3648 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) | ||
3649 | return 0; | ||
3650 | |||
3651 | /* | ||
3652 | * The counter is inactive, if the context is active | ||
3653 | * we're part of a group that didn't make it on the 'pmu', | ||
3654 | * not counting. | ||
3655 | */ | ||
3656 | if (counter->ctx->is_active) | ||
3657 | return 0; | ||
3658 | |||
3659 | /* | ||
3660 | * We're inactive and the context is too, this means the | ||
3661 | * task is scheduled out, we're counting events that happen | ||
3662 | * to us, like migration events. | ||
3663 | */ | ||
3664 | return 1; | ||
3665 | } | ||
3666 | |||
3667 | static int perf_swcounter_match(struct perf_counter *counter, | ||
3668 | enum perf_type_id type, | ||
3669 | u32 event, struct pt_regs *regs) | ||
3670 | { | ||
3671 | if (!perf_swcounter_is_counting(counter)) | ||
3672 | return 0; | ||
3673 | |||
3674 | if (counter->attr.type != type) | ||
3675 | return 0; | ||
3676 | if (counter->attr.config != event) | ||
3677 | return 0; | ||
3678 | |||
3679 | if (regs) { | ||
3680 | if (counter->attr.exclude_user && user_mode(regs)) | ||
3681 | return 0; | ||
3682 | |||
3683 | if (counter->attr.exclude_kernel && !user_mode(regs)) | ||
3684 | return 0; | ||
3685 | } | ||
3686 | |||
3687 | return 1; | ||
3688 | } | ||
3689 | |||
3690 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | ||
3691 | enum perf_type_id type, | ||
3692 | u32 event, u64 nr, int nmi, | ||
3693 | struct perf_sample_data *data) | ||
3694 | { | ||
3695 | struct perf_counter *counter; | ||
3696 | |||
3697 | if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) | ||
3698 | return; | ||
3699 | |||
3700 | rcu_read_lock(); | ||
3701 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | ||
3702 | if (perf_swcounter_match(counter, type, event, data->regs)) | ||
3703 | perf_swcounter_add(counter, nr, nmi, data); | ||
3704 | } | ||
3705 | rcu_read_unlock(); | ||
3706 | } | ||
3707 | |||
3708 | static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) | ||
3709 | { | ||
3710 | if (in_nmi()) | ||
3711 | return &cpuctx->recursion[3]; | ||
3712 | |||
3713 | if (in_irq()) | ||
3714 | return &cpuctx->recursion[2]; | ||
3715 | |||
3716 | if (in_softirq()) | ||
3717 | return &cpuctx->recursion[1]; | ||
3718 | |||
3719 | return &cpuctx->recursion[0]; | ||
3720 | } | ||
3721 | |||
3722 | static void do_perf_swcounter_event(enum perf_type_id type, u32 event, | ||
3723 | u64 nr, int nmi, | ||
3724 | struct perf_sample_data *data) | ||
3725 | { | ||
3726 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | ||
3727 | int *recursion = perf_swcounter_recursion_context(cpuctx); | ||
3728 | struct perf_counter_context *ctx; | ||
3729 | |||
3730 | if (*recursion) | ||
3731 | goto out; | ||
3732 | |||
3733 | (*recursion)++; | ||
3734 | barrier(); | ||
3735 | |||
3736 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, | ||
3737 | nr, nmi, data); | ||
3738 | rcu_read_lock(); | ||
3739 | /* | ||
3740 | * doesn't really matter which of the child contexts the | ||
3741 | * events ends up in. | ||
3742 | */ | ||
3743 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
3744 | if (ctx) | ||
3745 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); | ||
3746 | rcu_read_unlock(); | ||
3747 | |||
3748 | barrier(); | ||
3749 | (*recursion)--; | ||
3750 | |||
3751 | out: | ||
3752 | put_cpu_var(perf_cpu_context); | ||
3753 | } | ||
3754 | |||
3755 | void __perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
3756 | struct pt_regs *regs, u64 addr) | ||
3757 | { | ||
3758 | struct perf_sample_data data = { | ||
3759 | .regs = regs, | ||
3760 | .addr = addr, | ||
3761 | }; | ||
3762 | |||
3763 | do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); | ||
3764 | } | ||
3765 | |||
3766 | static void perf_swcounter_read(struct perf_counter *counter) | ||
3767 | { | ||
3768 | } | ||
3769 | |||
3770 | static int perf_swcounter_enable(struct perf_counter *counter) | ||
3771 | { | ||
3772 | struct hw_perf_counter *hwc = &counter->hw; | ||
3773 | |||
3774 | if (hwc->sample_period) { | ||
3775 | hwc->last_period = hwc->sample_period; | ||
3776 | perf_swcounter_set_period(counter); | ||
3777 | } | ||
3778 | return 0; | ||
3779 | } | ||
3780 | |||
3781 | static void perf_swcounter_disable(struct perf_counter *counter) | ||
3782 | { | ||
3783 | } | ||
3784 | |||
3785 | static const struct pmu perf_ops_generic = { | ||
3786 | .enable = perf_swcounter_enable, | ||
3787 | .disable = perf_swcounter_disable, | ||
3788 | .read = perf_swcounter_read, | ||
3789 | .unthrottle = perf_swcounter_unthrottle, | ||
3790 | }; | ||
3791 | |||
3792 | /* | ||
3793 | * hrtimer based swcounter callback | ||
3794 | */ | ||
3795 | |||
3796 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | ||
3797 | { | ||
3798 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
3799 | struct perf_sample_data data; | ||
3800 | struct perf_counter *counter; | ||
3801 | u64 period; | ||
3802 | |||
3803 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
3804 | counter->pmu->read(counter); | ||
3805 | |||
3806 | data.addr = 0; | ||
3807 | data.regs = get_irq_regs(); | ||
3808 | /* | ||
3809 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
3810 | * context, provide the next best thing, the user IP. | ||
3811 | */ | ||
3812 | if ((counter->attr.exclude_kernel || !data.regs) && | ||
3813 | !counter->attr.exclude_user) | ||
3814 | data.regs = task_pt_regs(current); | ||
3815 | |||
3816 | if (data.regs) { | ||
3817 | if (perf_counter_overflow(counter, 0, &data)) | ||
3818 | ret = HRTIMER_NORESTART; | ||
3819 | } | ||
3820 | |||
3821 | period = max_t(u64, 10000, counter->hw.sample_period); | ||
3822 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
3823 | |||
3824 | return ret; | ||
3825 | } | ||
3826 | |||
3827 | /* | ||
3828 | * Software counter: cpu wall time clock | ||
3829 | */ | ||
3830 | |||
3831 | static void cpu_clock_perf_counter_update(struct perf_counter *counter) | ||
3832 | { | ||
3833 | int cpu = raw_smp_processor_id(); | ||
3834 | s64 prev; | ||
3835 | u64 now; | ||
3836 | |||
3837 | now = cpu_clock(cpu); | ||
3838 | prev = atomic64_read(&counter->hw.prev_count); | ||
3839 | atomic64_set(&counter->hw.prev_count, now); | ||
3840 | atomic64_add(now - prev, &counter->count); | ||
3841 | } | ||
3842 | |||
3843 | static int cpu_clock_perf_counter_enable(struct perf_counter *counter) | ||
3844 | { | ||
3845 | struct hw_perf_counter *hwc = &counter->hw; | ||
3846 | int cpu = raw_smp_processor_id(); | ||
3847 | |||
3848 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | ||
3849 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3850 | hwc->hrtimer.function = perf_swcounter_hrtimer; | ||
3851 | if (hwc->sample_period) { | ||
3852 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3853 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3854 | ns_to_ktime(period), 0, | ||
3855 | HRTIMER_MODE_REL, 0); | ||
3856 | } | ||
3857 | |||
3858 | return 0; | ||
3859 | } | ||
3860 | |||
3861 | static void cpu_clock_perf_counter_disable(struct perf_counter *counter) | ||
3862 | { | ||
3863 | if (counter->hw.sample_period) | ||
3864 | hrtimer_cancel(&counter->hw.hrtimer); | ||
3865 | cpu_clock_perf_counter_update(counter); | ||
3866 | } | ||
3867 | |||
3868 | static void cpu_clock_perf_counter_read(struct perf_counter *counter) | ||
3869 | { | ||
3870 | cpu_clock_perf_counter_update(counter); | ||
3871 | } | ||
3872 | |||
3873 | static const struct pmu perf_ops_cpu_clock = { | ||
3874 | .enable = cpu_clock_perf_counter_enable, | ||
3875 | .disable = cpu_clock_perf_counter_disable, | ||
3876 | .read = cpu_clock_perf_counter_read, | ||
3877 | }; | ||
3878 | |||
3879 | /* | ||
3880 | * Software counter: task time clock | ||
3881 | */ | ||
3882 | |||
3883 | static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) | ||
3884 | { | ||
3885 | u64 prev; | ||
3886 | s64 delta; | ||
3887 | |||
3888 | prev = atomic64_xchg(&counter->hw.prev_count, now); | ||
3889 | delta = now - prev; | ||
3890 | atomic64_add(delta, &counter->count); | ||
3891 | } | ||
3892 | |||
3893 | static int task_clock_perf_counter_enable(struct perf_counter *counter) | ||
3894 | { | ||
3895 | struct hw_perf_counter *hwc = &counter->hw; | ||
3896 | u64 now; | ||
3897 | |||
3898 | now = counter->ctx->time; | ||
3899 | |||
3900 | atomic64_set(&hwc->prev_count, now); | ||
3901 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3902 | hwc->hrtimer.function = perf_swcounter_hrtimer; | ||
3903 | if (hwc->sample_period) { | ||
3904 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3905 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3906 | ns_to_ktime(period), 0, | ||
3907 | HRTIMER_MODE_REL, 0); | ||
3908 | } | ||
3909 | |||
3910 | return 0; | ||
3911 | } | ||
3912 | |||
3913 | static void task_clock_perf_counter_disable(struct perf_counter *counter) | ||
3914 | { | ||
3915 | if (counter->hw.sample_period) | ||
3916 | hrtimer_cancel(&counter->hw.hrtimer); | ||
3917 | task_clock_perf_counter_update(counter, counter->ctx->time); | ||
3918 | |||
3919 | } | ||
3920 | |||
3921 | static void task_clock_perf_counter_read(struct perf_counter *counter) | ||
3922 | { | ||
3923 | u64 time; | ||
3924 | |||
3925 | if (!in_nmi()) { | ||
3926 | update_context_time(counter->ctx); | ||
3927 | time = counter->ctx->time; | ||
3928 | } else { | ||
3929 | u64 now = perf_clock(); | ||
3930 | u64 delta = now - counter->ctx->timestamp; | ||
3931 | time = counter->ctx->time + delta; | ||
3932 | } | ||
3933 | |||
3934 | task_clock_perf_counter_update(counter, time); | ||
3935 | } | ||
3936 | |||
3937 | static const struct pmu perf_ops_task_clock = { | ||
3938 | .enable = task_clock_perf_counter_enable, | ||
3939 | .disable = task_clock_perf_counter_disable, | ||
3940 | .read = task_clock_perf_counter_read, | ||
3941 | }; | ||
3942 | |||
3943 | #ifdef CONFIG_EVENT_PROFILE | ||
3944 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, | ||
3945 | int entry_size) | ||
3946 | { | ||
3947 | struct perf_raw_record raw = { | ||
3948 | .size = entry_size, | ||
3949 | .data = record, | ||
3950 | }; | ||
3951 | |||
3952 | struct perf_sample_data data = { | ||
3953 | .regs = get_irq_regs(), | ||
3954 | .addr = addr, | ||
3955 | .raw = &raw, | ||
3956 | }; | ||
3957 | |||
3958 | if (!data.regs) | ||
3959 | data.regs = task_pt_regs(current); | ||
3960 | |||
3961 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); | ||
3962 | } | ||
3963 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | ||
3964 | |||
3965 | extern int ftrace_profile_enable(int); | ||
3966 | extern void ftrace_profile_disable(int); | ||
3967 | |||
3968 | static void tp_perf_counter_destroy(struct perf_counter *counter) | ||
3969 | { | ||
3970 | ftrace_profile_disable(counter->attr.config); | ||
3971 | } | ||
3972 | |||
3973 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | ||
3974 | { | ||
3975 | /* | ||
3976 | * Raw tracepoint data is a severe data leak, only allow root to | ||
3977 | * have these. | ||
3978 | */ | ||
3979 | if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && | ||
3980 | perf_paranoid_tracepoint_raw() && | ||
3981 | !capable(CAP_SYS_ADMIN)) | ||
3982 | return ERR_PTR(-EPERM); | ||
3983 | |||
3984 | if (ftrace_profile_enable(counter->attr.config)) | ||
3985 | return NULL; | ||
3986 | |||
3987 | counter->destroy = tp_perf_counter_destroy; | ||
3988 | |||
3989 | return &perf_ops_generic; | ||
3990 | } | ||
3991 | #else | ||
3992 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | ||
3993 | { | ||
3994 | return NULL; | ||
3995 | } | ||
3996 | #endif | ||
3997 | |||
3998 | atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | ||
3999 | |||
4000 | static void sw_perf_counter_destroy(struct perf_counter *counter) | ||
4001 | { | ||
4002 | u64 event = counter->attr.config; | ||
4003 | |||
4004 | WARN_ON(counter->parent); | ||
4005 | |||
4006 | atomic_dec(&perf_swcounter_enabled[event]); | ||
4007 | } | ||
4008 | |||
4009 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | ||
4010 | { | ||
4011 | const struct pmu *pmu = NULL; | ||
4012 | u64 event = counter->attr.config; | ||
4013 | |||
4014 | /* | ||
4015 | * Software counters (currently) can't in general distinguish | ||
4016 | * between user, kernel and hypervisor events. | ||
4017 | * However, context switches and cpu migrations are considered | ||
4018 | * to be kernel events, and page faults are never hypervisor | ||
4019 | * events. | ||
4020 | */ | ||
4021 | switch (event) { | ||
4022 | case PERF_COUNT_SW_CPU_CLOCK: | ||
4023 | pmu = &perf_ops_cpu_clock; | ||
4024 | |||
4025 | break; | ||
4026 | case PERF_COUNT_SW_TASK_CLOCK: | ||
4027 | /* | ||
4028 | * If the user instantiates this as a per-cpu counter, | ||
4029 | * use the cpu_clock counter instead. | ||
4030 | */ | ||
4031 | if (counter->ctx->task) | ||
4032 | pmu = &perf_ops_task_clock; | ||
4033 | else | ||
4034 | pmu = &perf_ops_cpu_clock; | ||
4035 | |||
4036 | break; | ||
4037 | case PERF_COUNT_SW_PAGE_FAULTS: | ||
4038 | case PERF_COUNT_SW_PAGE_FAULTS_MIN: | ||
4039 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | ||
4040 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | ||
4041 | case PERF_COUNT_SW_CPU_MIGRATIONS: | ||
4042 | if (!counter->parent) { | ||
4043 | atomic_inc(&perf_swcounter_enabled[event]); | ||
4044 | counter->destroy = sw_perf_counter_destroy; | ||
4045 | } | ||
4046 | pmu = &perf_ops_generic; | ||
4047 | break; | ||
4048 | } | ||
4049 | |||
4050 | return pmu; | ||
4051 | } | ||
4052 | |||
4053 | /* | ||
4054 | * Allocate and initialize a counter structure | ||
4055 | */ | ||
4056 | static struct perf_counter * | ||
4057 | perf_counter_alloc(struct perf_counter_attr *attr, | ||
4058 | int cpu, | ||
4059 | struct perf_counter_context *ctx, | ||
4060 | struct perf_counter *group_leader, | ||
4061 | struct perf_counter *parent_counter, | ||
4062 | gfp_t gfpflags) | ||
4063 | { | ||
4064 | const struct pmu *pmu; | ||
4065 | struct perf_counter *counter; | ||
4066 | struct hw_perf_counter *hwc; | ||
4067 | long err; | ||
4068 | |||
4069 | counter = kzalloc(sizeof(*counter), gfpflags); | ||
4070 | if (!counter) | ||
4071 | return ERR_PTR(-ENOMEM); | ||
4072 | |||
4073 | /* | ||
4074 | * Single counters are their own group leaders, with an | ||
4075 | * empty sibling list: | ||
4076 | */ | ||
4077 | if (!group_leader) | ||
4078 | group_leader = counter; | ||
4079 | |||
4080 | mutex_init(&counter->child_mutex); | ||
4081 | INIT_LIST_HEAD(&counter->child_list); | ||
4082 | |||
4083 | INIT_LIST_HEAD(&counter->list_entry); | ||
4084 | INIT_LIST_HEAD(&counter->event_entry); | ||
4085 | INIT_LIST_HEAD(&counter->sibling_list); | ||
4086 | init_waitqueue_head(&counter->waitq); | ||
4087 | |||
4088 | mutex_init(&counter->mmap_mutex); | ||
4089 | |||
4090 | counter->cpu = cpu; | ||
4091 | counter->attr = *attr; | ||
4092 | counter->group_leader = group_leader; | ||
4093 | counter->pmu = NULL; | ||
4094 | counter->ctx = ctx; | ||
4095 | counter->oncpu = -1; | ||
4096 | |||
4097 | counter->parent = parent_counter; | ||
4098 | |||
4099 | counter->ns = get_pid_ns(current->nsproxy->pid_ns); | ||
4100 | counter->id = atomic64_inc_return(&perf_counter_id); | ||
4101 | |||
4102 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
4103 | |||
4104 | if (attr->disabled) | ||
4105 | counter->state = PERF_COUNTER_STATE_OFF; | ||
4106 | |||
4107 | pmu = NULL; | ||
4108 | |||
4109 | hwc = &counter->hw; | ||
4110 | hwc->sample_period = attr->sample_period; | ||
4111 | if (attr->freq && attr->sample_freq) | ||
4112 | hwc->sample_period = 1; | ||
4113 | hwc->last_period = hwc->sample_period; | ||
4114 | |||
4115 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
4116 | |||
4117 | /* | ||
4118 | * we currently do not support PERF_FORMAT_GROUP on inherited counters | ||
4119 | */ | ||
4120 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) | ||
4121 | goto done; | ||
4122 | |||
4123 | switch (attr->type) { | ||
4124 | case PERF_TYPE_RAW: | ||
4125 | case PERF_TYPE_HARDWARE: | ||
4126 | case PERF_TYPE_HW_CACHE: | ||
4127 | pmu = hw_perf_counter_init(counter); | ||
4128 | break; | ||
4129 | |||
4130 | case PERF_TYPE_SOFTWARE: | ||
4131 | pmu = sw_perf_counter_init(counter); | ||
4132 | break; | ||
4133 | |||
4134 | case PERF_TYPE_TRACEPOINT: | ||
4135 | pmu = tp_perf_counter_init(counter); | ||
4136 | break; | ||
4137 | |||
4138 | default: | ||
4139 | break; | ||
4140 | } | ||
4141 | done: | ||
4142 | err = 0; | ||
4143 | if (!pmu) | ||
4144 | err = -EINVAL; | ||
4145 | else if (IS_ERR(pmu)) | ||
4146 | err = PTR_ERR(pmu); | ||
4147 | |||
4148 | if (err) { | ||
4149 | if (counter->ns) | ||
4150 | put_pid_ns(counter->ns); | ||
4151 | kfree(counter); | ||
4152 | return ERR_PTR(err); | ||
4153 | } | ||
4154 | |||
4155 | counter->pmu = pmu; | ||
4156 | |||
4157 | if (!counter->parent) { | ||
4158 | atomic_inc(&nr_counters); | ||
4159 | if (counter->attr.mmap) | ||
4160 | atomic_inc(&nr_mmap_counters); | ||
4161 | if (counter->attr.comm) | ||
4162 | atomic_inc(&nr_comm_counters); | ||
4163 | if (counter->attr.task) | ||
4164 | atomic_inc(&nr_task_counters); | ||
4165 | } | ||
4166 | |||
4167 | return counter; | ||
4168 | } | ||
4169 | |||
4170 | static int perf_copy_attr(struct perf_counter_attr __user *uattr, | ||
4171 | struct perf_counter_attr *attr) | ||
4172 | { | ||
4173 | int ret; | ||
4174 | u32 size; | ||
4175 | |||
4176 | if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) | ||
4177 | return -EFAULT; | ||
4178 | |||
4179 | /* | ||
4180 | * zero the full structure, so that a short copy will be nice. | ||
4181 | */ | ||
4182 | memset(attr, 0, sizeof(*attr)); | ||
4183 | |||
4184 | ret = get_user(size, &uattr->size); | ||
4185 | if (ret) | ||
4186 | return ret; | ||
4187 | |||
4188 | if (size > PAGE_SIZE) /* silly large */ | ||
4189 | goto err_size; | ||
4190 | |||
4191 | if (!size) /* abi compat */ | ||
4192 | size = PERF_ATTR_SIZE_VER0; | ||
4193 | |||
4194 | if (size < PERF_ATTR_SIZE_VER0) | ||
4195 | goto err_size; | ||
4196 | |||
4197 | /* | ||
4198 | * If we're handed a bigger struct than we know of, | ||
4199 | * ensure all the unknown bits are 0. | ||
4200 | */ | ||
4201 | if (size > sizeof(*attr)) { | ||
4202 | unsigned long val; | ||
4203 | unsigned long __user *addr; | ||
4204 | unsigned long __user *end; | ||
4205 | |||
4206 | addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr), | ||
4207 | sizeof(unsigned long)); | ||
4208 | end = PTR_ALIGN((void __user *)uattr + size, | ||
4209 | sizeof(unsigned long)); | ||
4210 | |||
4211 | for (; addr < end; addr += sizeof(unsigned long)) { | ||
4212 | ret = get_user(val, addr); | ||
4213 | if (ret) | ||
4214 | return ret; | ||
4215 | if (val) | ||
4216 | goto err_size; | ||
4217 | } | ||
4218 | } | ||
4219 | |||
4220 | ret = copy_from_user(attr, uattr, size); | ||
4221 | if (ret) | ||
4222 | return -EFAULT; | ||
4223 | |||
4224 | /* | ||
4225 | * If the type exists, the corresponding creation will verify | ||
4226 | * the attr->config. | ||
4227 | */ | ||
4228 | if (attr->type >= PERF_TYPE_MAX) | ||
4229 | return -EINVAL; | ||
4230 | |||
4231 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) | ||
4232 | return -EINVAL; | ||
4233 | |||
4234 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | ||
4235 | return -EINVAL; | ||
4236 | |||
4237 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) | ||
4238 | return -EINVAL; | ||
4239 | |||
4240 | out: | ||
4241 | return ret; | ||
4242 | |||
4243 | err_size: | ||
4244 | put_user(sizeof(*attr), &uattr->size); | ||
4245 | ret = -E2BIG; | ||
4246 | goto out; | ||
4247 | } | ||
4248 | |||
4249 | int perf_counter_set_output(struct perf_counter *counter, int output_fd) | ||
4250 | { | ||
4251 | struct perf_counter *output_counter = NULL; | ||
4252 | struct file *output_file = NULL; | ||
4253 | struct perf_counter *old_output; | ||
4254 | int fput_needed = 0; | ||
4255 | int ret = -EINVAL; | ||
4256 | |||
4257 | if (!output_fd) | ||
4258 | goto set; | ||
4259 | |||
4260 | output_file = fget_light(output_fd, &fput_needed); | ||
4261 | if (!output_file) | ||
4262 | return -EBADF; | ||
4263 | |||
4264 | if (output_file->f_op != &perf_fops) | ||
4265 | goto out; | ||
4266 | |||
4267 | output_counter = output_file->private_data; | ||
4268 | |||
4269 | /* Don't chain output fds */ | ||
4270 | if (output_counter->output) | ||
4271 | goto out; | ||
4272 | |||
4273 | /* Don't set an output fd when we already have an output channel */ | ||
4274 | if (counter->data) | ||
4275 | goto out; | ||
4276 | |||
4277 | atomic_long_inc(&output_file->f_count); | ||
4278 | |||
4279 | set: | ||
4280 | mutex_lock(&counter->mmap_mutex); | ||
4281 | old_output = counter->output; | ||
4282 | rcu_assign_pointer(counter->output, output_counter); | ||
4283 | mutex_unlock(&counter->mmap_mutex); | ||
4284 | |||
4285 | if (old_output) { | ||
4286 | /* | ||
4287 | * we need to make sure no existing perf_output_*() | ||
4288 | * is still referencing this counter. | ||
4289 | */ | ||
4290 | synchronize_rcu(); | ||
4291 | fput(old_output->filp); | ||
4292 | } | ||
4293 | |||
4294 | ret = 0; | ||
4295 | out: | ||
4296 | fput_light(output_file, fput_needed); | ||
4297 | return ret; | ||
4298 | } | ||
4299 | |||
4300 | /** | ||
4301 | * sys_perf_counter_open - open a performance counter, associate it to a task/cpu | ||
4302 | * | ||
4303 | * @attr_uptr: event type attributes for monitoring/sampling | ||
4304 | * @pid: target pid | ||
4305 | * @cpu: target cpu | ||
4306 | * @group_fd: group leader counter fd | ||
4307 | */ | ||
4308 | SYSCALL_DEFINE5(perf_counter_open, | ||
4309 | struct perf_counter_attr __user *, attr_uptr, | ||
4310 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) | ||
4311 | { | ||
4312 | struct perf_counter *counter, *group_leader; | ||
4313 | struct perf_counter_attr attr; | ||
4314 | struct perf_counter_context *ctx; | ||
4315 | struct file *counter_file = NULL; | ||
4316 | struct file *group_file = NULL; | ||
4317 | int fput_needed = 0; | ||
4318 | int fput_needed2 = 0; | ||
4319 | int err; | ||
4320 | |||
4321 | /* for future expandability... */ | ||
4322 | if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT)) | ||
4323 | return -EINVAL; | ||
4324 | |||
4325 | err = perf_copy_attr(attr_uptr, &attr); | ||
4326 | if (err) | ||
4327 | return err; | ||
4328 | |||
4329 | if (!attr.exclude_kernel) { | ||
4330 | if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | ||
4331 | return -EACCES; | ||
4332 | } | ||
4333 | |||
4334 | if (attr.freq) { | ||
4335 | if (attr.sample_freq > sysctl_perf_counter_sample_rate) | ||
4336 | return -EINVAL; | ||
4337 | } | ||
4338 | |||
4339 | /* | ||
4340 | * Get the target context (task or percpu): | ||
4341 | */ | ||
4342 | ctx = find_get_context(pid, cpu); | ||
4343 | if (IS_ERR(ctx)) | ||
4344 | return PTR_ERR(ctx); | ||
4345 | |||
4346 | /* | ||
4347 | * Look up the group leader (we will attach this counter to it): | ||
4348 | */ | ||
4349 | group_leader = NULL; | ||
4350 | if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { | ||
4351 | err = -EINVAL; | ||
4352 | group_file = fget_light(group_fd, &fput_needed); | ||
4353 | if (!group_file) | ||
4354 | goto err_put_context; | ||
4355 | if (group_file->f_op != &perf_fops) | ||
4356 | goto err_put_context; | ||
4357 | |||
4358 | group_leader = group_file->private_data; | ||
4359 | /* | ||
4360 | * Do not allow a recursive hierarchy (this new sibling | ||
4361 | * becoming part of another group-sibling): | ||
4362 | */ | ||
4363 | if (group_leader->group_leader != group_leader) | ||
4364 | goto err_put_context; | ||
4365 | /* | ||
4366 | * Do not allow to attach to a group in a different | ||
4367 | * task or CPU context: | ||
4368 | */ | ||
4369 | if (group_leader->ctx != ctx) | ||
4370 | goto err_put_context; | ||
4371 | /* | ||
4372 | * Only a group leader can be exclusive or pinned | ||
4373 | */ | ||
4374 | if (attr.exclusive || attr.pinned) | ||
4375 | goto err_put_context; | ||
4376 | } | ||
4377 | |||
4378 | counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, | ||
4379 | NULL, GFP_KERNEL); | ||
4380 | err = PTR_ERR(counter); | ||
4381 | if (IS_ERR(counter)) | ||
4382 | goto err_put_context; | ||
4383 | |||
4384 | err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); | ||
4385 | if (err < 0) | ||
4386 | goto err_free_put_context; | ||
4387 | |||
4388 | counter_file = fget_light(err, &fput_needed2); | ||
4389 | if (!counter_file) | ||
4390 | goto err_free_put_context; | ||
4391 | |||
4392 | if (flags & PERF_FLAG_FD_OUTPUT) { | ||
4393 | err = perf_counter_set_output(counter, group_fd); | ||
4394 | if (err) | ||
4395 | goto err_fput_free_put_context; | ||
4396 | } | ||
4397 | |||
4398 | counter->filp = counter_file; | ||
4399 | WARN_ON_ONCE(ctx->parent_ctx); | ||
4400 | mutex_lock(&ctx->mutex); | ||
4401 | perf_install_in_context(ctx, counter, cpu); | ||
4402 | ++ctx->generation; | ||
4403 | mutex_unlock(&ctx->mutex); | ||
4404 | |||
4405 | counter->owner = current; | ||
4406 | get_task_struct(current); | ||
4407 | mutex_lock(¤t->perf_counter_mutex); | ||
4408 | list_add_tail(&counter->owner_entry, ¤t->perf_counter_list); | ||
4409 | mutex_unlock(¤t->perf_counter_mutex); | ||
4410 | |||
4411 | err_fput_free_put_context: | ||
4412 | fput_light(counter_file, fput_needed2); | ||
4413 | |||
4414 | err_free_put_context: | ||
4415 | if (err < 0) | ||
4416 | kfree(counter); | ||
4417 | |||
4418 | err_put_context: | ||
4419 | if (err < 0) | ||
4420 | put_ctx(ctx); | ||
4421 | |||
4422 | fput_light(group_file, fput_needed); | ||
4423 | |||
4424 | return err; | ||
4425 | } | ||
4426 | |||
4427 | /* | ||
4428 | * inherit a counter from parent task to child task: | ||
4429 | */ | ||
4430 | static struct perf_counter * | ||
4431 | inherit_counter(struct perf_counter *parent_counter, | ||
4432 | struct task_struct *parent, | ||
4433 | struct perf_counter_context *parent_ctx, | ||
4434 | struct task_struct *child, | ||
4435 | struct perf_counter *group_leader, | ||
4436 | struct perf_counter_context *child_ctx) | ||
4437 | { | ||
4438 | struct perf_counter *child_counter; | ||
4439 | |||
4440 | /* | ||
4441 | * Instead of creating recursive hierarchies of counters, | ||
4442 | * we link inherited counters back to the original parent, | ||
4443 | * which has a filp for sure, which we use as the reference | ||
4444 | * count: | ||
4445 | */ | ||
4446 | if (parent_counter->parent) | ||
4447 | parent_counter = parent_counter->parent; | ||
4448 | |||
4449 | child_counter = perf_counter_alloc(&parent_counter->attr, | ||
4450 | parent_counter->cpu, child_ctx, | ||
4451 | group_leader, parent_counter, | ||
4452 | GFP_KERNEL); | ||
4453 | if (IS_ERR(child_counter)) | ||
4454 | return child_counter; | ||
4455 | get_ctx(child_ctx); | ||
4456 | |||
4457 | /* | ||
4458 | * Make the child state follow the state of the parent counter, | ||
4459 | * not its attr.disabled bit. We hold the parent's mutex, | ||
4460 | * so we won't race with perf_counter_{en, dis}able_family. | ||
4461 | */ | ||
4462 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) | ||
4463 | child_counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
4464 | else | ||
4465 | child_counter->state = PERF_COUNTER_STATE_OFF; | ||
4466 | |||
4467 | if (parent_counter->attr.freq) | ||
4468 | child_counter->hw.sample_period = parent_counter->hw.sample_period; | ||
4469 | |||
4470 | /* | ||
4471 | * Link it up in the child's context: | ||
4472 | */ | ||
4473 | add_counter_to_ctx(child_counter, child_ctx); | ||
4474 | |||
4475 | /* | ||
4476 | * Get a reference to the parent filp - we will fput it | ||
4477 | * when the child counter exits. This is safe to do because | ||
4478 | * we are in the parent and we know that the filp still | ||
4479 | * exists and has a nonzero count: | ||
4480 | */ | ||
4481 | atomic_long_inc(&parent_counter->filp->f_count); | ||
4482 | |||
4483 | /* | ||
4484 | * Link this into the parent counter's child list | ||
4485 | */ | ||
4486 | WARN_ON_ONCE(parent_counter->ctx->parent_ctx); | ||
4487 | mutex_lock(&parent_counter->child_mutex); | ||
4488 | list_add_tail(&child_counter->child_list, &parent_counter->child_list); | ||
4489 | mutex_unlock(&parent_counter->child_mutex); | ||
4490 | |||
4491 | return child_counter; | ||
4492 | } | ||
4493 | |||
4494 | static int inherit_group(struct perf_counter *parent_counter, | ||
4495 | struct task_struct *parent, | ||
4496 | struct perf_counter_context *parent_ctx, | ||
4497 | struct task_struct *child, | ||
4498 | struct perf_counter_context *child_ctx) | ||
4499 | { | ||
4500 | struct perf_counter *leader; | ||
4501 | struct perf_counter *sub; | ||
4502 | struct perf_counter *child_ctr; | ||
4503 | |||
4504 | leader = inherit_counter(parent_counter, parent, parent_ctx, | ||
4505 | child, NULL, child_ctx); | ||
4506 | if (IS_ERR(leader)) | ||
4507 | return PTR_ERR(leader); | ||
4508 | list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { | ||
4509 | child_ctr = inherit_counter(sub, parent, parent_ctx, | ||
4510 | child, leader, child_ctx); | ||
4511 | if (IS_ERR(child_ctr)) | ||
4512 | return PTR_ERR(child_ctr); | ||
4513 | } | ||
4514 | return 0; | ||
4515 | } | ||
4516 | |||
4517 | static void sync_child_counter(struct perf_counter *child_counter, | ||
4518 | struct task_struct *child) | ||
4519 | { | ||
4520 | struct perf_counter *parent_counter = child_counter->parent; | ||
4521 | u64 child_val; | ||
4522 | |||
4523 | if (child_counter->attr.inherit_stat) | ||
4524 | perf_counter_read_event(child_counter, child); | ||
4525 | |||
4526 | child_val = atomic64_read(&child_counter->count); | ||
4527 | |||
4528 | /* | ||
4529 | * Add back the child's count to the parent's count: | ||
4530 | */ | ||
4531 | atomic64_add(child_val, &parent_counter->count); | ||
4532 | atomic64_add(child_counter->total_time_enabled, | ||
4533 | &parent_counter->child_total_time_enabled); | ||
4534 | atomic64_add(child_counter->total_time_running, | ||
4535 | &parent_counter->child_total_time_running); | ||
4536 | |||
4537 | /* | ||
4538 | * Remove this counter from the parent's list | ||
4539 | */ | ||
4540 | WARN_ON_ONCE(parent_counter->ctx->parent_ctx); | ||
4541 | mutex_lock(&parent_counter->child_mutex); | ||
4542 | list_del_init(&child_counter->child_list); | ||
4543 | mutex_unlock(&parent_counter->child_mutex); | ||
4544 | |||
4545 | /* | ||
4546 | * Release the parent counter, if this was the last | ||
4547 | * reference to it. | ||
4548 | */ | ||
4549 | fput(parent_counter->filp); | ||
4550 | } | ||
4551 | |||
4552 | static void | ||
4553 | __perf_counter_exit_task(struct perf_counter *child_counter, | ||
4554 | struct perf_counter_context *child_ctx, | ||
4555 | struct task_struct *child) | ||
4556 | { | ||
4557 | struct perf_counter *parent_counter; | ||
4558 | |||
4559 | update_counter_times(child_counter); | ||
4560 | perf_counter_remove_from_context(child_counter); | ||
4561 | |||
4562 | parent_counter = child_counter->parent; | ||
4563 | /* | ||
4564 | * It can happen that parent exits first, and has counters | ||
4565 | * that are still around due to the child reference. These | ||
4566 | * counters need to be zapped - but otherwise linger. | ||
4567 | */ | ||
4568 | if (parent_counter) { | ||
4569 | sync_child_counter(child_counter, child); | ||
4570 | free_counter(child_counter); | ||
4571 | } | ||
4572 | } | ||
4573 | |||
4574 | /* | ||
4575 | * When a child task exits, feed back counter values to parent counters. | ||
4576 | */ | ||
4577 | void perf_counter_exit_task(struct task_struct *child) | ||
4578 | { | ||
4579 | struct perf_counter *child_counter, *tmp; | ||
4580 | struct perf_counter_context *child_ctx; | ||
4581 | unsigned long flags; | ||
4582 | |||
4583 | if (likely(!child->perf_counter_ctxp)) { | ||
4584 | perf_counter_task(child, NULL, 0); | ||
4585 | return; | ||
4586 | } | ||
4587 | |||
4588 | local_irq_save(flags); | ||
4589 | /* | ||
4590 | * We can't reschedule here because interrupts are disabled, | ||
4591 | * and either child is current or it is a task that can't be | ||
4592 | * scheduled, so we are now safe from rescheduling changing | ||
4593 | * our context. | ||
4594 | */ | ||
4595 | child_ctx = child->perf_counter_ctxp; | ||
4596 | __perf_counter_task_sched_out(child_ctx); | ||
4597 | |||
4598 | /* | ||
4599 | * Take the context lock here so that if find_get_context is | ||
4600 | * reading child->perf_counter_ctxp, we wait until it has | ||
4601 | * incremented the context's refcount before we do put_ctx below. | ||
4602 | */ | ||
4603 | spin_lock(&child_ctx->lock); | ||
4604 | child->perf_counter_ctxp = NULL; | ||
4605 | /* | ||
4606 | * If this context is a clone; unclone it so it can't get | ||
4607 | * swapped to another process while we're removing all | ||
4608 | * the counters from it. | ||
4609 | */ | ||
4610 | unclone_ctx(child_ctx); | ||
4611 | spin_unlock_irqrestore(&child_ctx->lock, flags); | ||
4612 | |||
4613 | /* | ||
4614 | * Report the task dead after unscheduling the counters so that we | ||
4615 | * won't get any samples after PERF_EVENT_EXIT. We can however still | ||
4616 | * get a few PERF_EVENT_READ events. | ||
4617 | */ | ||
4618 | perf_counter_task(child, child_ctx, 0); | ||
4619 | |||
4620 | /* | ||
4621 | * We can recurse on the same lock type through: | ||
4622 | * | ||
4623 | * __perf_counter_exit_task() | ||
4624 | * sync_child_counter() | ||
4625 | * fput(parent_counter->filp) | ||
4626 | * perf_release() | ||
4627 | * mutex_lock(&ctx->mutex) | ||
4628 | * | ||
4629 | * But since its the parent context it won't be the same instance. | ||
4630 | */ | ||
4631 | mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); | ||
4632 | |||
4633 | again: | ||
4634 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | ||
4635 | list_entry) | ||
4636 | __perf_counter_exit_task(child_counter, child_ctx, child); | ||
4637 | |||
4638 | /* | ||
4639 | * If the last counter was a group counter, it will have appended all | ||
4640 | * its siblings to the list, but we obtained 'tmp' before that which | ||
4641 | * will still point to the list head terminating the iteration. | ||
4642 | */ | ||
4643 | if (!list_empty(&child_ctx->counter_list)) | ||
4644 | goto again; | ||
4645 | |||
4646 | mutex_unlock(&child_ctx->mutex); | ||
4647 | |||
4648 | put_ctx(child_ctx); | ||
4649 | } | ||
4650 | |||
4651 | /* | ||
4652 | * free an unexposed, unused context as created by inheritance by | ||
4653 | * init_task below, used by fork() in case of fail. | ||
4654 | */ | ||
4655 | void perf_counter_free_task(struct task_struct *task) | ||
4656 | { | ||
4657 | struct perf_counter_context *ctx = task->perf_counter_ctxp; | ||
4658 | struct perf_counter *counter, *tmp; | ||
4659 | |||
4660 | if (!ctx) | ||
4661 | return; | ||
4662 | |||
4663 | mutex_lock(&ctx->mutex); | ||
4664 | again: | ||
4665 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { | ||
4666 | struct perf_counter *parent = counter->parent; | ||
4667 | |||
4668 | if (WARN_ON_ONCE(!parent)) | ||
4669 | continue; | ||
4670 | |||
4671 | mutex_lock(&parent->child_mutex); | ||
4672 | list_del_init(&counter->child_list); | ||
4673 | mutex_unlock(&parent->child_mutex); | ||
4674 | |||
4675 | fput(parent->filp); | ||
4676 | |||
4677 | list_del_counter(counter, ctx); | ||
4678 | free_counter(counter); | ||
4679 | } | ||
4680 | |||
4681 | if (!list_empty(&ctx->counter_list)) | ||
4682 | goto again; | ||
4683 | |||
4684 | mutex_unlock(&ctx->mutex); | ||
4685 | |||
4686 | put_ctx(ctx); | ||
4687 | } | ||
4688 | |||
4689 | /* | ||
4690 | * Initialize the perf_counter context in task_struct | ||
4691 | */ | ||
4692 | int perf_counter_init_task(struct task_struct *child) | ||
4693 | { | ||
4694 | struct perf_counter_context *child_ctx, *parent_ctx; | ||
4695 | struct perf_counter_context *cloned_ctx; | ||
4696 | struct perf_counter *counter; | ||
4697 | struct task_struct *parent = current; | ||
4698 | int inherited_all = 1; | ||
4699 | int ret = 0; | ||
4700 | |||
4701 | child->perf_counter_ctxp = NULL; | ||
4702 | |||
4703 | mutex_init(&child->perf_counter_mutex); | ||
4704 | INIT_LIST_HEAD(&child->perf_counter_list); | ||
4705 | |||
4706 | if (likely(!parent->perf_counter_ctxp)) | ||
4707 | return 0; | ||
4708 | |||
4709 | /* | ||
4710 | * This is executed from the parent task context, so inherit | ||
4711 | * counters that have been marked for cloning. | ||
4712 | * First allocate and initialize a context for the child. | ||
4713 | */ | ||
4714 | |||
4715 | child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); | ||
4716 | if (!child_ctx) | ||
4717 | return -ENOMEM; | ||
4718 | |||
4719 | __perf_counter_init_context(child_ctx, child); | ||
4720 | child->perf_counter_ctxp = child_ctx; | ||
4721 | get_task_struct(child); | ||
4722 | |||
4723 | /* | ||
4724 | * If the parent's context is a clone, pin it so it won't get | ||
4725 | * swapped under us. | ||
4726 | */ | ||
4727 | parent_ctx = perf_pin_task_context(parent); | ||
4728 | |||
4729 | /* | ||
4730 | * No need to check if parent_ctx != NULL here; since we saw | ||
4731 | * it non-NULL earlier, the only reason for it to become NULL | ||
4732 | * is if we exit, and since we're currently in the middle of | ||
4733 | * a fork we can't be exiting at the same time. | ||
4734 | */ | ||
4735 | |||
4736 | /* | ||
4737 | * Lock the parent list. No need to lock the child - not PID | ||
4738 | * hashed yet and not running, so nobody can access it. | ||
4739 | */ | ||
4740 | mutex_lock(&parent_ctx->mutex); | ||
4741 | |||
4742 | /* | ||
4743 | * We dont have to disable NMIs - we are only looking at | ||
4744 | * the list, not manipulating it: | ||
4745 | */ | ||
4746 | list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { | ||
4747 | if (counter != counter->group_leader) | ||
4748 | continue; | ||
4749 | |||
4750 | if (!counter->attr.inherit) { | ||
4751 | inherited_all = 0; | ||
4752 | continue; | ||
4753 | } | ||
4754 | |||
4755 | ret = inherit_group(counter, parent, parent_ctx, | ||
4756 | child, child_ctx); | ||
4757 | if (ret) { | ||
4758 | inherited_all = 0; | ||
4759 | break; | ||
4760 | } | ||
4761 | } | ||
4762 | |||
4763 | if (inherited_all) { | ||
4764 | /* | ||
4765 | * Mark the child context as a clone of the parent | ||
4766 | * context, or of whatever the parent is a clone of. | ||
4767 | * Note that if the parent is a clone, it could get | ||
4768 | * uncloned at any point, but that doesn't matter | ||
4769 | * because the list of counters and the generation | ||
4770 | * count can't have changed since we took the mutex. | ||
4771 | */ | ||
4772 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | ||
4773 | if (cloned_ctx) { | ||
4774 | child_ctx->parent_ctx = cloned_ctx; | ||
4775 | child_ctx->parent_gen = parent_ctx->parent_gen; | ||
4776 | } else { | ||
4777 | child_ctx->parent_ctx = parent_ctx; | ||
4778 | child_ctx->parent_gen = parent_ctx->generation; | ||
4779 | } | ||
4780 | get_ctx(child_ctx->parent_ctx); | ||
4781 | } | ||
4782 | |||
4783 | mutex_unlock(&parent_ctx->mutex); | ||
4784 | |||
4785 | perf_unpin_context(parent_ctx); | ||
4786 | |||
4787 | return ret; | ||
4788 | } | ||
4789 | |||
4790 | static void __cpuinit perf_counter_init_cpu(int cpu) | ||
4791 | { | ||
4792 | struct perf_cpu_context *cpuctx; | ||
4793 | |||
4794 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4795 | __perf_counter_init_context(&cpuctx->ctx, NULL); | ||
4796 | |||
4797 | spin_lock(&perf_resource_lock); | ||
4798 | cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; | ||
4799 | spin_unlock(&perf_resource_lock); | ||
4800 | |||
4801 | hw_perf_counter_setup(cpu); | ||
4802 | } | ||
4803 | |||
4804 | #ifdef CONFIG_HOTPLUG_CPU | ||
4805 | static void __perf_counter_exit_cpu(void *info) | ||
4806 | { | ||
4807 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
4808 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
4809 | struct perf_counter *counter, *tmp; | ||
4810 | |||
4811 | list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) | ||
4812 | __perf_counter_remove_from_context(counter); | ||
4813 | } | ||
4814 | static void perf_counter_exit_cpu(int cpu) | ||
4815 | { | ||
4816 | struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4817 | struct perf_counter_context *ctx = &cpuctx->ctx; | ||
4818 | |||
4819 | mutex_lock(&ctx->mutex); | ||
4820 | smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); | ||
4821 | mutex_unlock(&ctx->mutex); | ||
4822 | } | ||
4823 | #else | ||
4824 | static inline void perf_counter_exit_cpu(int cpu) { } | ||
4825 | #endif | ||
4826 | |||
4827 | static int __cpuinit | ||
4828 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | ||
4829 | { | ||
4830 | unsigned int cpu = (long)hcpu; | ||
4831 | |||
4832 | switch (action) { | ||
4833 | |||
4834 | case CPU_UP_PREPARE: | ||
4835 | case CPU_UP_PREPARE_FROZEN: | ||
4836 | perf_counter_init_cpu(cpu); | ||
4837 | break; | ||
4838 | |||
4839 | case CPU_ONLINE: | ||
4840 | case CPU_ONLINE_FROZEN: | ||
4841 | hw_perf_counter_setup_online(cpu); | ||
4842 | break; | ||
4843 | |||
4844 | case CPU_DOWN_PREPARE: | ||
4845 | case CPU_DOWN_PREPARE_FROZEN: | ||
4846 | perf_counter_exit_cpu(cpu); | ||
4847 | break; | ||
4848 | |||
4849 | default: | ||
4850 | break; | ||
4851 | } | ||
4852 | |||
4853 | return NOTIFY_OK; | ||
4854 | } | ||
4855 | |||
4856 | /* | ||
4857 | * This has to have a higher priority than migration_notifier in sched.c. | ||
4858 | */ | ||
4859 | static struct notifier_block __cpuinitdata perf_cpu_nb = { | ||
4860 | .notifier_call = perf_cpu_notify, | ||
4861 | .priority = 20, | ||
4862 | }; | ||
4863 | |||
4864 | void __init perf_counter_init(void) | ||
4865 | { | ||
4866 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | ||
4867 | (void *)(long)smp_processor_id()); | ||
4868 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | ||
4869 | (void *)(long)smp_processor_id()); | ||
4870 | register_cpu_notifier(&perf_cpu_nb); | ||
4871 | } | ||
4872 | |||
4873 | static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf) | ||
4874 | { | ||
4875 | return sprintf(buf, "%d\n", perf_reserved_percpu); | ||
4876 | } | ||
4877 | |||
4878 | static ssize_t | ||
4879 | perf_set_reserve_percpu(struct sysdev_class *class, | ||
4880 | const char *buf, | ||
4881 | size_t count) | ||
4882 | { | ||
4883 | struct perf_cpu_context *cpuctx; | ||
4884 | unsigned long val; | ||
4885 | int err, cpu, mpt; | ||
4886 | |||
4887 | err = strict_strtoul(buf, 10, &val); | ||
4888 | if (err) | ||
4889 | return err; | ||
4890 | if (val > perf_max_counters) | ||
4891 | return -EINVAL; | ||
4892 | |||
4893 | spin_lock(&perf_resource_lock); | ||
4894 | perf_reserved_percpu = val; | ||
4895 | for_each_online_cpu(cpu) { | ||
4896 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
4897 | spin_lock_irq(&cpuctx->ctx.lock); | ||
4898 | mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, | ||
4899 | perf_max_counters - perf_reserved_percpu); | ||
4900 | cpuctx->max_pertask = mpt; | ||
4901 | spin_unlock_irq(&cpuctx->ctx.lock); | ||
4902 | } | ||
4903 | spin_unlock(&perf_resource_lock); | ||
4904 | |||
4905 | return count; | ||
4906 | } | ||
4907 | |||
4908 | static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf) | ||
4909 | { | ||
4910 | return sprintf(buf, "%d\n", perf_overcommit); | ||
4911 | } | ||
4912 | |||
4913 | static ssize_t | ||
4914 | perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count) | ||
4915 | { | ||
4916 | unsigned long val; | ||
4917 | int err; | ||
4918 | |||
4919 | err = strict_strtoul(buf, 10, &val); | ||
4920 | if (err) | ||
4921 | return err; | ||
4922 | if (val > 1) | ||
4923 | return -EINVAL; | ||
4924 | |||
4925 | spin_lock(&perf_resource_lock); | ||
4926 | perf_overcommit = val; | ||
4927 | spin_unlock(&perf_resource_lock); | ||
4928 | |||
4929 | return count; | ||
4930 | } | ||
4931 | |||
4932 | static SYSDEV_CLASS_ATTR( | ||
4933 | reserve_percpu, | ||
4934 | 0644, | ||
4935 | perf_show_reserve_percpu, | ||
4936 | perf_set_reserve_percpu | ||
4937 | ); | ||
4938 | |||
4939 | static SYSDEV_CLASS_ATTR( | ||
4940 | overcommit, | ||
4941 | 0644, | ||
4942 | perf_show_overcommit, | ||
4943 | perf_set_overcommit | ||
4944 | ); | ||
4945 | |||
4946 | static struct attribute *perfclass_attrs[] = { | ||
4947 | &attr_reserve_percpu.attr, | ||
4948 | &attr_overcommit.attr, | ||
4949 | NULL | ||
4950 | }; | ||
4951 | |||
4952 | static struct attribute_group perfclass_attr_group = { | ||
4953 | .attrs = perfclass_attrs, | ||
4954 | .name = "perf_counters", | ||
4955 | }; | ||
4956 | |||
4957 | static int __init perf_counter_sysfs_init(void) | ||
4958 | { | ||
4959 | return sysfs_create_group(&cpu_sysdev_class.kset.kobj, | ||
4960 | &perfclass_attr_group); | ||
4961 | } | ||
4962 | device_initcall(perf_counter_sysfs_init); | ||