aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 06:02:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 08:28:04 -0400
commitcdd6c482c9ff9c55475ee7392ec8f672eddb7be6 (patch)
tree81f98a3ab46c589792057fe2392c1e10f8ad7893 /kernel/perf_event.c
parentdfc65094d0313cc48969fa60bcf33d693aeb05a7 (diff)
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\<event\>/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Howells <dhowells@redhat.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c5000
1 files changed, 5000 insertions, 0 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
new file mode 100644
index 000000000000..6e8b99a04e1e
--- /dev/null
+++ b/kernel/perf_event.c
@@ -0,0 +1,5000 @@
1/*
2 * Performance event core code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/file.h>
17#include <linux/poll.h>
18#include <linux/sysfs.h>
19#include <linux/dcache.h>
20#include <linux/percpu.h>
21#include <linux/ptrace.h>
22#include <linux/vmstat.h>
23#include <linux/hardirq.h>
24#include <linux/rculist.h>
25#include <linux/uaccess.h>
26#include <linux/syscalls.h>
27#include <linux/anon_inodes.h>
28#include <linux/kernel_stat.h>
29#include <linux/perf_event.h>
30
31#include <asm/irq_regs.h>
32
33/*
34 * Each CPU has a list of per CPU events:
35 */
36DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38int perf_max_events __read_mostly = 1;
39static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1;
41
42static atomic_t nr_events __read_mostly;
43static atomic_t nr_mmap_events __read_mostly;
44static atomic_t nr_comm_events __read_mostly;
45static atomic_t nr_task_events __read_mostly;
46
47/*
48 * perf event paranoia level:
49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv
53 */
54int sysctl_perf_event_paranoid __read_mostly = 1;
55
56static inline bool perf_paranoid_tracepoint_raw(void)
57{
58 return sysctl_perf_event_paranoid > -1;
59}
60
61static inline bool perf_paranoid_cpu(void)
62{
63 return sysctl_perf_event_paranoid > 0;
64}
65
66static inline bool perf_paranoid_kernel(void)
67{
68 return sysctl_perf_event_paranoid > 1;
69}
70
71int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
72
73/*
74 * max perf event sample rate
75 */
76int sysctl_perf_event_sample_rate __read_mostly = 100000;
77
78static atomic64_t perf_event_id;
79
80/*
81 * Lock for (sysadmin-configurable) event reservations:
82 */
83static DEFINE_SPINLOCK(perf_resource_lock);
84
85/*
86 * Architecture provided APIs - weak aliases:
87 */
88extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
89{
90 return NULL;
91}
92
93void __weak hw_perf_disable(void) { barrier(); }
94void __weak hw_perf_enable(void) { barrier(); }
95
96void __weak hw_perf_event_setup(int cpu) { barrier(); }
97void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
98
99int __weak
100hw_perf_group_sched_in(struct perf_event *group_leader,
101 struct perf_cpu_context *cpuctx,
102 struct perf_event_context *ctx, int cpu)
103{
104 return 0;
105}
106
107void __weak perf_event_print_debug(void) { }
108
109static DEFINE_PER_CPU(int, perf_disable_count);
110
111void __perf_disable(void)
112{
113 __get_cpu_var(perf_disable_count)++;
114}
115
116bool __perf_enable(void)
117{
118 return !--__get_cpu_var(perf_disable_count);
119}
120
121void perf_disable(void)
122{
123 __perf_disable();
124 hw_perf_disable();
125}
126
127void perf_enable(void)
128{
129 if (__perf_enable())
130 hw_perf_enable();
131}
132
133static void get_ctx(struct perf_event_context *ctx)
134{
135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
136}
137
138static void free_ctx(struct rcu_head *head)
139{
140 struct perf_event_context *ctx;
141
142 ctx = container_of(head, struct perf_event_context, rcu_head);
143 kfree(ctx);
144}
145
146static void put_ctx(struct perf_event_context *ctx)
147{
148 if (atomic_dec_and_test(&ctx->refcount)) {
149 if (ctx->parent_ctx)
150 put_ctx(ctx->parent_ctx);
151 if (ctx->task)
152 put_task_struct(ctx->task);
153 call_rcu(&ctx->rcu_head, free_ctx);
154 }
155}
156
157static void unclone_ctx(struct perf_event_context *ctx)
158{
159 if (ctx->parent_ctx) {
160 put_ctx(ctx->parent_ctx);
161 ctx->parent_ctx = NULL;
162 }
163}
164
165/*
166 * If we inherit events we want to return the parent event id
167 * to userspace.
168 */
169static u64 primary_event_id(struct perf_event *event)
170{
171 u64 id = event->id;
172
173 if (event->parent)
174 id = event->parent->id;
175
176 return id;
177}
178
179/*
180 * Get the perf_event_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task.
183 */
184static struct perf_event_context *
185perf_lock_task_context(struct task_struct *task, unsigned long *flags)
186{
187 struct perf_event_context *ctx;
188
189 rcu_read_lock();
190 retry:
191 ctx = rcu_dereference(task->perf_event_ctxp);
192 if (ctx) {
193 /*
194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by
196 * perf_event_task_sched_out, though the
197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry
200 * if so. If we locked the right context, then it
201 * can't get swapped on us any more.
202 */
203 spin_lock_irqsave(&ctx->lock, *flags);
204 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
205 spin_unlock_irqrestore(&ctx->lock, *flags);
206 goto retry;
207 }
208
209 if (!atomic_inc_not_zero(&ctx->refcount)) {
210 spin_unlock_irqrestore(&ctx->lock, *flags);
211 ctx = NULL;
212 }
213 }
214 rcu_read_unlock();
215 return ctx;
216}
217
218/*
219 * Get the context for a task and increment its pin_count so it
220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed.
222 */
223static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
224{
225 struct perf_event_context *ctx;
226 unsigned long flags;
227
228 ctx = perf_lock_task_context(task, &flags);
229 if (ctx) {
230 ++ctx->pin_count;
231 spin_unlock_irqrestore(&ctx->lock, flags);
232 }
233 return ctx;
234}
235
236static void perf_unpin_context(struct perf_event_context *ctx)
237{
238 unsigned long flags;
239
240 spin_lock_irqsave(&ctx->lock, flags);
241 --ctx->pin_count;
242 spin_unlock_irqrestore(&ctx->lock, flags);
243 put_ctx(ctx);
244}
245
246/*
247 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held.
249 */
250static void
251list_add_event(struct perf_event *event, struct perf_event_context *ctx)
252{
253 struct perf_event *group_leader = event->group_leader;
254
255 /*
256 * Depending on whether it is a standalone or sibling event,
257 * add it straight to the context's event list, or to the group
258 * leader's sibling list:
259 */
260 if (group_leader == event)
261 list_add_tail(&event->group_entry, &ctx->group_list);
262 else {
263 list_add_tail(&event->group_entry, &group_leader->sibling_list);
264 group_leader->nr_siblings++;
265 }
266
267 list_add_rcu(&event->event_entry, &ctx->event_list);
268 ctx->nr_events++;
269 if (event->attr.inherit_stat)
270 ctx->nr_stat++;
271}
272
273/*
274 * Remove a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held.
276 */
277static void
278list_del_event(struct perf_event *event, struct perf_event_context *ctx)
279{
280 struct perf_event *sibling, *tmp;
281
282 if (list_empty(&event->group_entry))
283 return;
284 ctx->nr_events--;
285 if (event->attr.inherit_stat)
286 ctx->nr_stat--;
287
288 list_del_init(&event->group_entry);
289 list_del_rcu(&event->event_entry);
290
291 if (event->group_leader != event)
292 event->group_leader->nr_siblings--;
293
294 /*
295 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton events by adding them
297 * to the context list directly:
298 */
299 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
300
301 list_move_tail(&sibling->group_entry, &ctx->group_list);
302 sibling->group_leader = sibling;
303 }
304}
305
306static void
307event_sched_out(struct perf_event *event,
308 struct perf_cpu_context *cpuctx,
309 struct perf_event_context *ctx)
310{
311 if (event->state != PERF_EVENT_STATE_ACTIVE)
312 return;
313
314 event->state = PERF_EVENT_STATE_INACTIVE;
315 if (event->pending_disable) {
316 event->pending_disable = 0;
317 event->state = PERF_EVENT_STATE_OFF;
318 }
319 event->tstamp_stopped = ctx->time;
320 event->pmu->disable(event);
321 event->oncpu = -1;
322
323 if (!is_software_event(event))
324 cpuctx->active_oncpu--;
325 ctx->nr_active--;
326 if (event->attr.exclusive || !cpuctx->active_oncpu)
327 cpuctx->exclusive = 0;
328}
329
330static void
331group_sched_out(struct perf_event *group_event,
332 struct perf_cpu_context *cpuctx,
333 struct perf_event_context *ctx)
334{
335 struct perf_event *event;
336
337 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
338 return;
339
340 event_sched_out(group_event, cpuctx, ctx);
341
342 /*
343 * Schedule out siblings (if any):
344 */
345 list_for_each_entry(event, &group_event->sibling_list, group_entry)
346 event_sched_out(event, cpuctx, ctx);
347
348 if (group_event->attr.exclusive)
349 cpuctx->exclusive = 0;
350}
351
352/*
353 * Cross CPU call to remove a performance event
354 *
355 * We disable the event on the hardware level first. After that we
356 * remove it from the context list.
357 */
358static void __perf_event_remove_from_context(void *info)
359{
360 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
361 struct perf_event *event = info;
362 struct perf_event_context *ctx = event->ctx;
363
364 /*
365 * If this is a task context, we need to check whether it is
366 * the current task context of this cpu. If not it has been
367 * scheduled out before the smp call arrived.
368 */
369 if (ctx->task && cpuctx->task_ctx != ctx)
370 return;
371
372 spin_lock(&ctx->lock);
373 /*
374 * Protect the list operation against NMI by disabling the
375 * events on a global level.
376 */
377 perf_disable();
378
379 event_sched_out(event, cpuctx, ctx);
380
381 list_del_event(event, ctx);
382
383 if (!ctx->task) {
384 /*
385 * Allow more per task events with respect to the
386 * reservation:
387 */
388 cpuctx->max_pertask =
389 min(perf_max_events - ctx->nr_events,
390 perf_max_events - perf_reserved_percpu);
391 }
392
393 perf_enable();
394 spin_unlock(&ctx->lock);
395}
396
397
398/*
399 * Remove the event from a task's (or a CPU's) list of events.
400 *
401 * Must be called with ctx->mutex held.
402 *
403 * CPU events are removed with a smp call. For task events we only
404 * call when the task is on a CPU.
405 *
406 * If event->ctx is a cloned context, callers must make sure that
407 * every task struct that event->ctx->task could possibly point to
408 * remains valid. This is OK when called from perf_release since
409 * that only calls us on the top-level context, which can't be a clone.
410 * When called from perf_event_exit_task, it's OK because the
411 * context has been detached from its task.
412 */
413static void perf_event_remove_from_context(struct perf_event *event)
414{
415 struct perf_event_context *ctx = event->ctx;
416 struct task_struct *task = ctx->task;
417
418 if (!task) {
419 /*
420 * Per cpu events are removed via an smp call and
421 * the removal is always sucessful.
422 */
423 smp_call_function_single(event->cpu,
424 __perf_event_remove_from_context,
425 event, 1);
426 return;
427 }
428
429retry:
430 task_oncpu_function_call(task, __perf_event_remove_from_context,
431 event);
432
433 spin_lock_irq(&ctx->lock);
434 /*
435 * If the context is active we need to retry the smp call.
436 */
437 if (ctx->nr_active && !list_empty(&event->group_entry)) {
438 spin_unlock_irq(&ctx->lock);
439 goto retry;
440 }
441
442 /*
443 * The lock prevents that this context is scheduled in so we
444 * can remove the event safely, if the call above did not
445 * succeed.
446 */
447 if (!list_empty(&event->group_entry)) {
448 list_del_event(event, ctx);
449 }
450 spin_unlock_irq(&ctx->lock);
451}
452
453static inline u64 perf_clock(void)
454{
455 return cpu_clock(smp_processor_id());
456}
457
458/*
459 * Update the record of the current time in a context.
460 */
461static void update_context_time(struct perf_event_context *ctx)
462{
463 u64 now = perf_clock();
464
465 ctx->time += now - ctx->timestamp;
466 ctx->timestamp = now;
467}
468
469/*
470 * Update the total_time_enabled and total_time_running fields for a event.
471 */
472static void update_event_times(struct perf_event *event)
473{
474 struct perf_event_context *ctx = event->ctx;
475 u64 run_end;
476
477 if (event->state < PERF_EVENT_STATE_INACTIVE ||
478 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
479 return;
480
481 event->total_time_enabled = ctx->time - event->tstamp_enabled;
482
483 if (event->state == PERF_EVENT_STATE_INACTIVE)
484 run_end = event->tstamp_stopped;
485 else
486 run_end = ctx->time;
487
488 event->total_time_running = run_end - event->tstamp_running;
489}
490
491/*
492 * Update total_time_enabled and total_time_running for all events in a group.
493 */
494static void update_group_times(struct perf_event *leader)
495{
496 struct perf_event *event;
497
498 update_event_times(leader);
499 list_for_each_entry(event, &leader->sibling_list, group_entry)
500 update_event_times(event);
501}
502
503/*
504 * Cross CPU call to disable a performance event
505 */
506static void __perf_event_disable(void *info)
507{
508 struct perf_event *event = info;
509 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
510 struct perf_event_context *ctx = event->ctx;
511
512 /*
513 * If this is a per-task event, need to check whether this
514 * event's task is the current task on this cpu.
515 */
516 if (ctx->task && cpuctx->task_ctx != ctx)
517 return;
518
519 spin_lock(&ctx->lock);
520
521 /*
522 * If the event is on, turn it off.
523 * If it is in error state, leave it in error state.
524 */
525 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
526 update_context_time(ctx);
527 update_group_times(event);
528 if (event == event->group_leader)
529 group_sched_out(event, cpuctx, ctx);
530 else
531 event_sched_out(event, cpuctx, ctx);
532 event->state = PERF_EVENT_STATE_OFF;
533 }
534
535 spin_unlock(&ctx->lock);
536}
537
538/*
539 * Disable a event.
540 *
541 * If event->ctx is a cloned context, callers must make sure that
542 * every task struct that event->ctx->task could possibly point to
543 * remains valid. This condition is satisifed when called through
544 * perf_event_for_each_child or perf_event_for_each because they
545 * hold the top-level event's child_mutex, so any descendant that
546 * goes to exit will block in sync_child_event.
547 * When called from perf_pending_event it's OK because event->ctx
548 * is the current context on this CPU and preemption is disabled,
549 * hence we can't get into perf_event_task_sched_out for this context.
550 */
551static void perf_event_disable(struct perf_event *event)
552{
553 struct perf_event_context *ctx = event->ctx;
554 struct task_struct *task = ctx->task;
555
556 if (!task) {
557 /*
558 * Disable the event on the cpu that it's on
559 */
560 smp_call_function_single(event->cpu, __perf_event_disable,
561 event, 1);
562 return;
563 }
564
565 retry:
566 task_oncpu_function_call(task, __perf_event_disable, event);
567
568 spin_lock_irq(&ctx->lock);
569 /*
570 * If the event is still active, we need to retry the cross-call.
571 */
572 if (event->state == PERF_EVENT_STATE_ACTIVE) {
573 spin_unlock_irq(&ctx->lock);
574 goto retry;
575 }
576
577 /*
578 * Since we have the lock this context can't be scheduled
579 * in, so we can change the state safely.
580 */
581 if (event->state == PERF_EVENT_STATE_INACTIVE) {
582 update_group_times(event);
583 event->state = PERF_EVENT_STATE_OFF;
584 }
585
586 spin_unlock_irq(&ctx->lock);
587}
588
589static int
590event_sched_in(struct perf_event *event,
591 struct perf_cpu_context *cpuctx,
592 struct perf_event_context *ctx,
593 int cpu)
594{
595 if (event->state <= PERF_EVENT_STATE_OFF)
596 return 0;
597
598 event->state = PERF_EVENT_STATE_ACTIVE;
599 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
600 /*
601 * The new state must be visible before we turn it on in the hardware:
602 */
603 smp_wmb();
604
605 if (event->pmu->enable(event)) {
606 event->state = PERF_EVENT_STATE_INACTIVE;
607 event->oncpu = -1;
608 return -EAGAIN;
609 }
610
611 event->tstamp_running += ctx->time - event->tstamp_stopped;
612
613 if (!is_software_event(event))
614 cpuctx->active_oncpu++;
615 ctx->nr_active++;
616
617 if (event->attr.exclusive)
618 cpuctx->exclusive = 1;
619
620 return 0;
621}
622
623static int
624group_sched_in(struct perf_event *group_event,
625 struct perf_cpu_context *cpuctx,
626 struct perf_event_context *ctx,
627 int cpu)
628{
629 struct perf_event *event, *partial_group;
630 int ret;
631
632 if (group_event->state == PERF_EVENT_STATE_OFF)
633 return 0;
634
635 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
636 if (ret)
637 return ret < 0 ? ret : 0;
638
639 if (event_sched_in(group_event, cpuctx, ctx, cpu))
640 return -EAGAIN;
641
642 /*
643 * Schedule in siblings as one group (if any):
644 */
645 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
646 if (event_sched_in(event, cpuctx, ctx, cpu)) {
647 partial_group = event;
648 goto group_error;
649 }
650 }
651
652 return 0;
653
654group_error:
655 /*
656 * Groups can be scheduled in as one unit only, so undo any
657 * partial group before returning:
658 */
659 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
660 if (event == partial_group)
661 break;
662 event_sched_out(event, cpuctx, ctx);
663 }
664 event_sched_out(group_event, cpuctx, ctx);
665
666 return -EAGAIN;
667}
668
669/*
670 * Return 1 for a group consisting entirely of software events,
671 * 0 if the group contains any hardware events.
672 */
673static int is_software_only_group(struct perf_event *leader)
674{
675 struct perf_event *event;
676
677 if (!is_software_event(leader))
678 return 0;
679
680 list_for_each_entry(event, &leader->sibling_list, group_entry)
681 if (!is_software_event(event))
682 return 0;
683
684 return 1;
685}
686
687/*
688 * Work out whether we can put this event group on the CPU now.
689 */
690static int group_can_go_on(struct perf_event *event,
691 struct perf_cpu_context *cpuctx,
692 int can_add_hw)
693{
694 /*
695 * Groups consisting entirely of software events can always go on.
696 */
697 if (is_software_only_group(event))
698 return 1;
699 /*
700 * If an exclusive group is already on, no other hardware
701 * events can go on.
702 */
703 if (cpuctx->exclusive)
704 return 0;
705 /*
706 * If this group is exclusive and there are already
707 * events on the CPU, it can't go on.
708 */
709 if (event->attr.exclusive && cpuctx->active_oncpu)
710 return 0;
711 /*
712 * Otherwise, try to add it if all previous groups were able
713 * to go on.
714 */
715 return can_add_hw;
716}
717
718static void add_event_to_ctx(struct perf_event *event,
719 struct perf_event_context *ctx)
720{
721 list_add_event(event, ctx);
722 event->tstamp_enabled = ctx->time;
723 event->tstamp_running = ctx->time;
724 event->tstamp_stopped = ctx->time;
725}
726
727/*
728 * Cross CPU call to install and enable a performance event
729 *
730 * Must be called with ctx->mutex held
731 */
732static void __perf_install_in_context(void *info)
733{
734 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
735 struct perf_event *event = info;
736 struct perf_event_context *ctx = event->ctx;
737 struct perf_event *leader = event->group_leader;
738 int cpu = smp_processor_id();
739 int err;
740
741 /*
742 * If this is a task context, we need to check whether it is
743 * the current task context of this cpu. If not it has been
744 * scheduled out before the smp call arrived.
745 * Or possibly this is the right context but it isn't
746 * on this cpu because it had no events.
747 */
748 if (ctx->task && cpuctx->task_ctx != ctx) {
749 if (cpuctx->task_ctx || ctx->task != current)
750 return;
751 cpuctx->task_ctx = ctx;
752 }
753
754 spin_lock(&ctx->lock);
755 ctx->is_active = 1;
756 update_context_time(ctx);
757
758 /*
759 * Protect the list operation against NMI by disabling the
760 * events on a global level. NOP for non NMI based events.
761 */
762 perf_disable();
763
764 add_event_to_ctx(event, ctx);
765
766 /*
767 * Don't put the event on if it is disabled or if
768 * it is in a group and the group isn't on.
769 */
770 if (event->state != PERF_EVENT_STATE_INACTIVE ||
771 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
772 goto unlock;
773
774 /*
775 * An exclusive event can't go on if there are already active
776 * hardware events, and no hardware event can go on if there
777 * is already an exclusive event on.
778 */
779 if (!group_can_go_on(event, cpuctx, 1))
780 err = -EEXIST;
781 else
782 err = event_sched_in(event, cpuctx, ctx, cpu);
783
784 if (err) {
785 /*
786 * This event couldn't go on. If it is in a group
787 * then we have to pull the whole group off.
788 * If the event group is pinned then put it in error state.
789 */
790 if (leader != event)
791 group_sched_out(leader, cpuctx, ctx);
792 if (leader->attr.pinned) {
793 update_group_times(leader);
794 leader->state = PERF_EVENT_STATE_ERROR;
795 }
796 }
797
798 if (!err && !ctx->task && cpuctx->max_pertask)
799 cpuctx->max_pertask--;
800
801 unlock:
802 perf_enable();
803
804 spin_unlock(&ctx->lock);
805}
806
807/*
808 * Attach a performance event to a context
809 *
810 * First we add the event to the list with the hardware enable bit
811 * in event->hw_config cleared.
812 *
813 * If the event is attached to a task which is on a CPU we use a smp
814 * call to enable it in the task context. The task might have been
815 * scheduled away, but we check this in the smp call again.
816 *
817 * Must be called with ctx->mutex held.
818 */
819static void
820perf_install_in_context(struct perf_event_context *ctx,
821 struct perf_event *event,
822 int cpu)
823{
824 struct task_struct *task = ctx->task;
825
826 if (!task) {
827 /*
828 * Per cpu events are installed via an smp call and
829 * the install is always sucessful.
830 */
831 smp_call_function_single(cpu, __perf_install_in_context,
832 event, 1);
833 return;
834 }
835
836retry:
837 task_oncpu_function_call(task, __perf_install_in_context,
838 event);
839
840 spin_lock_irq(&ctx->lock);
841 /*
842 * we need to retry the smp call.
843 */
844 if (ctx->is_active && list_empty(&event->group_entry)) {
845 spin_unlock_irq(&ctx->lock);
846 goto retry;
847 }
848
849 /*
850 * The lock prevents that this context is scheduled in so we
851 * can add the event safely, if it the call above did not
852 * succeed.
853 */
854 if (list_empty(&event->group_entry))
855 add_event_to_ctx(event, ctx);
856 spin_unlock_irq(&ctx->lock);
857}
858
859/*
860 * Put a event into inactive state and update time fields.
861 * Enabling the leader of a group effectively enables all
862 * the group members that aren't explicitly disabled, so we
863 * have to update their ->tstamp_enabled also.
864 * Note: this works for group members as well as group leaders
865 * since the non-leader members' sibling_lists will be empty.
866 */
867static void __perf_event_mark_enabled(struct perf_event *event,
868 struct perf_event_context *ctx)
869{
870 struct perf_event *sub;
871
872 event->state = PERF_EVENT_STATE_INACTIVE;
873 event->tstamp_enabled = ctx->time - event->total_time_enabled;
874 list_for_each_entry(sub, &event->sibling_list, group_entry)
875 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
876 sub->tstamp_enabled =
877 ctx->time - sub->total_time_enabled;
878}
879
880/*
881 * Cross CPU call to enable a performance event
882 */
883static void __perf_event_enable(void *info)
884{
885 struct perf_event *event = info;
886 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
887 struct perf_event_context *ctx = event->ctx;
888 struct perf_event *leader = event->group_leader;
889 int err;
890
891 /*
892 * If this is a per-task event, need to check whether this
893 * event's task is the current task on this cpu.
894 */
895 if (ctx->task && cpuctx->task_ctx != ctx) {
896 if (cpuctx->task_ctx || ctx->task != current)
897 return;
898 cpuctx->task_ctx = ctx;
899 }
900
901 spin_lock(&ctx->lock);
902 ctx->is_active = 1;
903 update_context_time(ctx);
904
905 if (event->state >= PERF_EVENT_STATE_INACTIVE)
906 goto unlock;
907 __perf_event_mark_enabled(event, ctx);
908
909 /*
910 * If the event is in a group and isn't the group leader,
911 * then don't put it on unless the group is on.
912 */
913 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
914 goto unlock;
915
916 if (!group_can_go_on(event, cpuctx, 1)) {
917 err = -EEXIST;
918 } else {
919 perf_disable();
920 if (event == leader)
921 err = group_sched_in(event, cpuctx, ctx,
922 smp_processor_id());
923 else
924 err = event_sched_in(event, cpuctx, ctx,
925 smp_processor_id());
926 perf_enable();
927 }
928
929 if (err) {
930 /*
931 * If this event can't go on and it's part of a
932 * group, then the whole group has to come off.
933 */
934 if (leader != event)
935 group_sched_out(leader, cpuctx, ctx);
936 if (leader->attr.pinned) {
937 update_group_times(leader);
938 leader->state = PERF_EVENT_STATE_ERROR;
939 }
940 }
941
942 unlock:
943 spin_unlock(&ctx->lock);
944}
945
946/*
947 * Enable a event.
948 *
949 * If event->ctx is a cloned context, callers must make sure that
950 * every task struct that event->ctx->task could possibly point to
951 * remains valid. This condition is satisfied when called through
952 * perf_event_for_each_child or perf_event_for_each as described
953 * for perf_event_disable.
954 */
955static void perf_event_enable(struct perf_event *event)
956{
957 struct perf_event_context *ctx = event->ctx;
958 struct task_struct *task = ctx->task;
959
960 if (!task) {
961 /*
962 * Enable the event on the cpu that it's on
963 */
964 smp_call_function_single(event->cpu, __perf_event_enable,
965 event, 1);
966 return;
967 }
968
969 spin_lock_irq(&ctx->lock);
970 if (event->state >= PERF_EVENT_STATE_INACTIVE)
971 goto out;
972
973 /*
974 * If the event is in error state, clear that first.
975 * That way, if we see the event in error state below, we
976 * know that it has gone back into error state, as distinct
977 * from the task having been scheduled away before the
978 * cross-call arrived.
979 */
980 if (event->state == PERF_EVENT_STATE_ERROR)
981 event->state = PERF_EVENT_STATE_OFF;
982
983 retry:
984 spin_unlock_irq(&ctx->lock);
985 task_oncpu_function_call(task, __perf_event_enable, event);
986
987 spin_lock_irq(&ctx->lock);
988
989 /*
990 * If the context is active and the event is still off,
991 * we need to retry the cross-call.
992 */
993 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
994 goto retry;
995
996 /*
997 * Since we have the lock this context can't be scheduled
998 * in, so we can change the state safely.
999 */
1000 if (event->state == PERF_EVENT_STATE_OFF)
1001 __perf_event_mark_enabled(event, ctx);
1002
1003 out:
1004 spin_unlock_irq(&ctx->lock);
1005}
1006
1007static int perf_event_refresh(struct perf_event *event, int refresh)
1008{
1009 /*
1010 * not supported on inherited events
1011 */
1012 if (event->attr.inherit)
1013 return -EINVAL;
1014
1015 atomic_add(refresh, &event->event_limit);
1016 perf_event_enable(event);
1017
1018 return 0;
1019}
1020
1021void __perf_event_sched_out(struct perf_event_context *ctx,
1022 struct perf_cpu_context *cpuctx)
1023{
1024 struct perf_event *event;
1025
1026 spin_lock(&ctx->lock);
1027 ctx->is_active = 0;
1028 if (likely(!ctx->nr_events))
1029 goto out;
1030 update_context_time(ctx);
1031
1032 perf_disable();
1033 if (ctx->nr_active) {
1034 list_for_each_entry(event, &ctx->group_list, group_entry) {
1035 if (event != event->group_leader)
1036 event_sched_out(event, cpuctx, ctx);
1037 else
1038 group_sched_out(event, cpuctx, ctx);
1039 }
1040 }
1041 perf_enable();
1042 out:
1043 spin_unlock(&ctx->lock);
1044}
1045
1046/*
1047 * Test whether two contexts are equivalent, i.e. whether they
1048 * have both been cloned from the same version of the same context
1049 * and they both have the same number of enabled events.
1050 * If the number of enabled events is the same, then the set
1051 * of enabled events should be the same, because these are both
1052 * inherited contexts, therefore we can't access individual events
1053 * in them directly with an fd; we can only enable/disable all
1054 * events via prctl, or enable/disable all events in a family
1055 * via ioctl, which will have the same effect on both contexts.
1056 */
1057static int context_equiv(struct perf_event_context *ctx1,
1058 struct perf_event_context *ctx2)
1059{
1060 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1061 && ctx1->parent_gen == ctx2->parent_gen
1062 && !ctx1->pin_count && !ctx2->pin_count;
1063}
1064
1065static void __perf_event_read(void *event);
1066
1067static void __perf_event_sync_stat(struct perf_event *event,
1068 struct perf_event *next_event)
1069{
1070 u64 value;
1071
1072 if (!event->attr.inherit_stat)
1073 return;
1074
1075 /*
1076 * Update the event value, we cannot use perf_event_read()
1077 * because we're in the middle of a context switch and have IRQs
1078 * disabled, which upsets smp_call_function_single(), however
1079 * we know the event must be on the current CPU, therefore we
1080 * don't need to use it.
1081 */
1082 switch (event->state) {
1083 case PERF_EVENT_STATE_ACTIVE:
1084 __perf_event_read(event);
1085 break;
1086
1087 case PERF_EVENT_STATE_INACTIVE:
1088 update_event_times(event);
1089 break;
1090
1091 default:
1092 break;
1093 }
1094
1095 /*
1096 * In order to keep per-task stats reliable we need to flip the event
1097 * values when we flip the contexts.
1098 */
1099 value = atomic64_read(&next_event->count);
1100 value = atomic64_xchg(&event->count, value);
1101 atomic64_set(&next_event->count, value);
1102
1103 swap(event->total_time_enabled, next_event->total_time_enabled);
1104 swap(event->total_time_running, next_event->total_time_running);
1105
1106 /*
1107 * Since we swizzled the values, update the user visible data too.
1108 */
1109 perf_event_update_userpage(event);
1110 perf_event_update_userpage(next_event);
1111}
1112
1113#define list_next_entry(pos, member) \
1114 list_entry(pos->member.next, typeof(*pos), member)
1115
1116static void perf_event_sync_stat(struct perf_event_context *ctx,
1117 struct perf_event_context *next_ctx)
1118{
1119 struct perf_event *event, *next_event;
1120
1121 if (!ctx->nr_stat)
1122 return;
1123
1124 event = list_first_entry(&ctx->event_list,
1125 struct perf_event, event_entry);
1126
1127 next_event = list_first_entry(&next_ctx->event_list,
1128 struct perf_event, event_entry);
1129
1130 while (&event->event_entry != &ctx->event_list &&
1131 &next_event->event_entry != &next_ctx->event_list) {
1132
1133 __perf_event_sync_stat(event, next_event);
1134
1135 event = list_next_entry(event, event_entry);
1136 next_event = list_next_entry(next_event, event_entry);
1137 }
1138}
1139
1140/*
1141 * Called from scheduler to remove the events of the current task,
1142 * with interrupts disabled.
1143 *
1144 * We stop each event and update the event value in event->count.
1145 *
1146 * This does not protect us against NMI, but disable()
1147 * sets the disabled bit in the control field of event _before_
1148 * accessing the event control register. If a NMI hits, then it will
1149 * not restart the event.
1150 */
1151void perf_event_task_sched_out(struct task_struct *task,
1152 struct task_struct *next, int cpu)
1153{
1154 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1155 struct perf_event_context *ctx = task->perf_event_ctxp;
1156 struct perf_event_context *next_ctx;
1157 struct perf_event_context *parent;
1158 struct pt_regs *regs;
1159 int do_switch = 1;
1160
1161 regs = task_pt_regs(task);
1162 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1163
1164 if (likely(!ctx || !cpuctx->task_ctx))
1165 return;
1166
1167 update_context_time(ctx);
1168
1169 rcu_read_lock();
1170 parent = rcu_dereference(ctx->parent_ctx);
1171 next_ctx = next->perf_event_ctxp;
1172 if (parent && next_ctx &&
1173 rcu_dereference(next_ctx->parent_ctx) == parent) {
1174 /*
1175 * Looks like the two contexts are clones, so we might be
1176 * able to optimize the context switch. We lock both
1177 * contexts and check that they are clones under the
1178 * lock (including re-checking that neither has been
1179 * uncloned in the meantime). It doesn't matter which
1180 * order we take the locks because no other cpu could
1181 * be trying to lock both of these tasks.
1182 */
1183 spin_lock(&ctx->lock);
1184 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1185 if (context_equiv(ctx, next_ctx)) {
1186 /*
1187 * XXX do we need a memory barrier of sorts
1188 * wrt to rcu_dereference() of perf_event_ctxp
1189 */
1190 task->perf_event_ctxp = next_ctx;
1191 next->perf_event_ctxp = ctx;
1192 ctx->task = next;
1193 next_ctx->task = task;
1194 do_switch = 0;
1195
1196 perf_event_sync_stat(ctx, next_ctx);
1197 }
1198 spin_unlock(&next_ctx->lock);
1199 spin_unlock(&ctx->lock);
1200 }
1201 rcu_read_unlock();
1202
1203 if (do_switch) {
1204 __perf_event_sched_out(ctx, cpuctx);
1205 cpuctx->task_ctx = NULL;
1206 }
1207}
1208
1209/*
1210 * Called with IRQs disabled
1211 */
1212static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1213{
1214 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1215
1216 if (!cpuctx->task_ctx)
1217 return;
1218
1219 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1220 return;
1221
1222 __perf_event_sched_out(ctx, cpuctx);
1223 cpuctx->task_ctx = NULL;
1224}
1225
1226/*
1227 * Called with IRQs disabled
1228 */
1229static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1230{
1231 __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1232}
1233
1234static void
1235__perf_event_sched_in(struct perf_event_context *ctx,
1236 struct perf_cpu_context *cpuctx, int cpu)
1237{
1238 struct perf_event *event;
1239 int can_add_hw = 1;
1240
1241 spin_lock(&ctx->lock);
1242 ctx->is_active = 1;
1243 if (likely(!ctx->nr_events))
1244 goto out;
1245
1246 ctx->timestamp = perf_clock();
1247
1248 perf_disable();
1249
1250 /*
1251 * First go through the list and put on any pinned groups
1252 * in order to give them the best chance of going on.
1253 */
1254 list_for_each_entry(event, &ctx->group_list, group_entry) {
1255 if (event->state <= PERF_EVENT_STATE_OFF ||
1256 !event->attr.pinned)
1257 continue;
1258 if (event->cpu != -1 && event->cpu != cpu)
1259 continue;
1260
1261 if (event != event->group_leader)
1262 event_sched_in(event, cpuctx, ctx, cpu);
1263 else {
1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 }
1267
1268 /*
1269 * If this pinned group hasn't been scheduled,
1270 * put it in error state.
1271 */
1272 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1273 update_group_times(event);
1274 event->state = PERF_EVENT_STATE_ERROR;
1275 }
1276 }
1277
1278 list_for_each_entry(event, &ctx->group_list, group_entry) {
1279 /*
1280 * Ignore events in OFF or ERROR state, and
1281 * ignore pinned events since we did them already.
1282 */
1283 if (event->state <= PERF_EVENT_STATE_OFF ||
1284 event->attr.pinned)
1285 continue;
1286
1287 /*
1288 * Listen to the 'cpu' scheduling filter constraint
1289 * of events:
1290 */
1291 if (event->cpu != -1 && event->cpu != cpu)
1292 continue;
1293
1294 if (event != event->group_leader) {
1295 if (event_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0;
1297 } else {
1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0;
1301 }
1302 }
1303 }
1304 perf_enable();
1305 out:
1306 spin_unlock(&ctx->lock);
1307}
1308
1309/*
1310 * Called from scheduler to add the events of the current task
1311 * with interrupts disabled.
1312 *
1313 * We restore the event value and then enable it.
1314 *
1315 * This does not protect us against NMI, but enable()
1316 * sets the enabled bit in the control field of event _before_
1317 * accessing the event control register. If a NMI hits, then it will
1318 * keep the event running.
1319 */
1320void perf_event_task_sched_in(struct task_struct *task, int cpu)
1321{
1322 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1323 struct perf_event_context *ctx = task->perf_event_ctxp;
1324
1325 if (likely(!ctx))
1326 return;
1327 if (cpuctx->task_ctx == ctx)
1328 return;
1329 __perf_event_sched_in(ctx, cpuctx, cpu);
1330 cpuctx->task_ctx = ctx;
1331}
1332
1333static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1334{
1335 struct perf_event_context *ctx = &cpuctx->ctx;
1336
1337 __perf_event_sched_in(ctx, cpuctx, cpu);
1338}
1339
1340#define MAX_INTERRUPTS (~0ULL)
1341
1342static void perf_log_throttle(struct perf_event *event, int enable);
1343
1344static void perf_adjust_period(struct perf_event *event, u64 events)
1345{
1346 struct hw_perf_event *hwc = &event->hw;
1347 u64 period, sample_period;
1348 s64 delta;
1349
1350 events *= hwc->sample_period;
1351 period = div64_u64(events, event->attr.sample_freq);
1352
1353 delta = (s64)(period - hwc->sample_period);
1354 delta = (delta + 7) / 8; /* low pass filter */
1355
1356 sample_period = hwc->sample_period + delta;
1357
1358 if (!sample_period)
1359 sample_period = 1;
1360
1361 hwc->sample_period = sample_period;
1362}
1363
1364static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1365{
1366 struct perf_event *event;
1367 struct hw_perf_event *hwc;
1368 u64 interrupts, freq;
1369
1370 spin_lock(&ctx->lock);
1371 list_for_each_entry(event, &ctx->group_list, group_entry) {
1372 if (event->state != PERF_EVENT_STATE_ACTIVE)
1373 continue;
1374
1375 hwc = &event->hw;
1376
1377 interrupts = hwc->interrupts;
1378 hwc->interrupts = 0;
1379
1380 /*
1381 * unthrottle events on the tick
1382 */
1383 if (interrupts == MAX_INTERRUPTS) {
1384 perf_log_throttle(event, 1);
1385 event->pmu->unthrottle(event);
1386 interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1387 }
1388
1389 if (!event->attr.freq || !event->attr.sample_freq)
1390 continue;
1391
1392 /*
1393 * if the specified freq < HZ then we need to skip ticks
1394 */
1395 if (event->attr.sample_freq < HZ) {
1396 freq = event->attr.sample_freq;
1397
1398 hwc->freq_count += freq;
1399 hwc->freq_interrupts += interrupts;
1400
1401 if (hwc->freq_count < HZ)
1402 continue;
1403
1404 interrupts = hwc->freq_interrupts;
1405 hwc->freq_interrupts = 0;
1406 hwc->freq_count -= HZ;
1407 } else
1408 freq = HZ;
1409
1410 perf_adjust_period(event, freq * interrupts);
1411
1412 /*
1413 * In order to avoid being stalled by an (accidental) huge
1414 * sample period, force reset the sample period if we didn't
1415 * get any events in this freq period.
1416 */
1417 if (!interrupts) {
1418 perf_disable();
1419 event->pmu->disable(event);
1420 atomic64_set(&hwc->period_left, 0);
1421 event->pmu->enable(event);
1422 perf_enable();
1423 }
1424 }
1425 spin_unlock(&ctx->lock);
1426}
1427
1428/*
1429 * Round-robin a context's events:
1430 */
1431static void rotate_ctx(struct perf_event_context *ctx)
1432{
1433 struct perf_event *event;
1434
1435 if (!ctx->nr_events)
1436 return;
1437
1438 spin_lock(&ctx->lock);
1439 /*
1440 * Rotate the first entry last (works just fine for group events too):
1441 */
1442 perf_disable();
1443 list_for_each_entry(event, &ctx->group_list, group_entry) {
1444 list_move_tail(&event->group_entry, &ctx->group_list);
1445 break;
1446 }
1447 perf_enable();
1448
1449 spin_unlock(&ctx->lock);
1450}
1451
1452void perf_event_task_tick(struct task_struct *curr, int cpu)
1453{
1454 struct perf_cpu_context *cpuctx;
1455 struct perf_event_context *ctx;
1456
1457 if (!atomic_read(&nr_events))
1458 return;
1459
1460 cpuctx = &per_cpu(perf_cpu_context, cpu);
1461 ctx = curr->perf_event_ctxp;
1462
1463 perf_ctx_adjust_freq(&cpuctx->ctx);
1464 if (ctx)
1465 perf_ctx_adjust_freq(ctx);
1466
1467 perf_event_cpu_sched_out(cpuctx);
1468 if (ctx)
1469 __perf_event_task_sched_out(ctx);
1470
1471 rotate_ctx(&cpuctx->ctx);
1472 if (ctx)
1473 rotate_ctx(ctx);
1474
1475 perf_event_cpu_sched_in(cpuctx, cpu);
1476 if (ctx)
1477 perf_event_task_sched_in(curr, cpu);
1478}
1479
1480/*
1481 * Enable all of a task's events that have been marked enable-on-exec.
1482 * This expects task == current.
1483 */
1484static void perf_event_enable_on_exec(struct task_struct *task)
1485{
1486 struct perf_event_context *ctx;
1487 struct perf_event *event;
1488 unsigned long flags;
1489 int enabled = 0;
1490
1491 local_irq_save(flags);
1492 ctx = task->perf_event_ctxp;
1493 if (!ctx || !ctx->nr_events)
1494 goto out;
1495
1496 __perf_event_task_sched_out(ctx);
1497
1498 spin_lock(&ctx->lock);
1499
1500 list_for_each_entry(event, &ctx->group_list, group_entry) {
1501 if (!event->attr.enable_on_exec)
1502 continue;
1503 event->attr.enable_on_exec = 0;
1504 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1505 continue;
1506 __perf_event_mark_enabled(event, ctx);
1507 enabled = 1;
1508 }
1509
1510 /*
1511 * Unclone this context if we enabled any event.
1512 */
1513 if (enabled)
1514 unclone_ctx(ctx);
1515
1516 spin_unlock(&ctx->lock);
1517
1518 perf_event_task_sched_in(task, smp_processor_id());
1519 out:
1520 local_irq_restore(flags);
1521}
1522
1523/*
1524 * Cross CPU call to read the hardware event
1525 */
1526static void __perf_event_read(void *info)
1527{
1528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1529 struct perf_event *event = info;
1530 struct perf_event_context *ctx = event->ctx;
1531 unsigned long flags;
1532
1533 /*
1534 * If this is a task context, we need to check whether it is
1535 * the current task context of this cpu. If not it has been
1536 * scheduled out before the smp call arrived. In that case
1537 * event->count would have been updated to a recent sample
1538 * when the event was scheduled out.
1539 */
1540 if (ctx->task && cpuctx->task_ctx != ctx)
1541 return;
1542
1543 local_irq_save(flags);
1544 if (ctx->is_active)
1545 update_context_time(ctx);
1546 event->pmu->read(event);
1547 update_event_times(event);
1548 local_irq_restore(flags);
1549}
1550
1551static u64 perf_event_read(struct perf_event *event)
1552{
1553 /*
1554 * If event is enabled and currently active on a CPU, update the
1555 * value in the event structure:
1556 */
1557 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1558 smp_call_function_single(event->oncpu,
1559 __perf_event_read, event, 1);
1560 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1561 update_event_times(event);
1562 }
1563
1564 return atomic64_read(&event->count);
1565}
1566
1567/*
1568 * Initialize the perf_event context in a task_struct:
1569 */
1570static void
1571__perf_event_init_context(struct perf_event_context *ctx,
1572 struct task_struct *task)
1573{
1574 memset(ctx, 0, sizeof(*ctx));
1575 spin_lock_init(&ctx->lock);
1576 mutex_init(&ctx->mutex);
1577 INIT_LIST_HEAD(&ctx->group_list);
1578 INIT_LIST_HEAD(&ctx->event_list);
1579 atomic_set(&ctx->refcount, 1);
1580 ctx->task = task;
1581}
1582
1583static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1584{
1585 struct perf_event_context *ctx;
1586 struct perf_cpu_context *cpuctx;
1587 struct task_struct *task;
1588 unsigned long flags;
1589 int err;
1590
1591 /*
1592 * If cpu is not a wildcard then this is a percpu event:
1593 */
1594 if (cpu != -1) {
1595 /* Must be root to operate on a CPU event: */
1596 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1597 return ERR_PTR(-EACCES);
1598
1599 if (cpu < 0 || cpu > num_possible_cpus())
1600 return ERR_PTR(-EINVAL);
1601
1602 /*
1603 * We could be clever and allow to attach a event to an
1604 * offline CPU and activate it when the CPU comes up, but
1605 * that's for later.
1606 */
1607 if (!cpu_isset(cpu, cpu_online_map))
1608 return ERR_PTR(-ENODEV);
1609
1610 cpuctx = &per_cpu(perf_cpu_context, cpu);
1611 ctx = &cpuctx->ctx;
1612 get_ctx(ctx);
1613
1614 return ctx;
1615 }
1616
1617 rcu_read_lock();
1618 if (!pid)
1619 task = current;
1620 else
1621 task = find_task_by_vpid(pid);
1622 if (task)
1623 get_task_struct(task);
1624 rcu_read_unlock();
1625
1626 if (!task)
1627 return ERR_PTR(-ESRCH);
1628
1629 /*
1630 * Can't attach events to a dying task.
1631 */
1632 err = -ESRCH;
1633 if (task->flags & PF_EXITING)
1634 goto errout;
1635
1636 /* Reuse ptrace permission checks for now. */
1637 err = -EACCES;
1638 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1639 goto errout;
1640
1641 retry:
1642 ctx = perf_lock_task_context(task, &flags);
1643 if (ctx) {
1644 unclone_ctx(ctx);
1645 spin_unlock_irqrestore(&ctx->lock, flags);
1646 }
1647
1648 if (!ctx) {
1649 ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1650 err = -ENOMEM;
1651 if (!ctx)
1652 goto errout;
1653 __perf_event_init_context(ctx, task);
1654 get_ctx(ctx);
1655 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1656 /*
1657 * We raced with some other task; use
1658 * the context they set.
1659 */
1660 kfree(ctx);
1661 goto retry;
1662 }
1663 get_task_struct(task);
1664 }
1665
1666 put_task_struct(task);
1667 return ctx;
1668
1669 errout:
1670 put_task_struct(task);
1671 return ERR_PTR(err);
1672}
1673
1674static void free_event_rcu(struct rcu_head *head)
1675{
1676 struct perf_event *event;
1677
1678 event = container_of(head, struct perf_event, rcu_head);
1679 if (event->ns)
1680 put_pid_ns(event->ns);
1681 kfree(event);
1682}
1683
1684static void perf_pending_sync(struct perf_event *event);
1685
1686static void free_event(struct perf_event *event)
1687{
1688 perf_pending_sync(event);
1689
1690 if (!event->parent) {
1691 atomic_dec(&nr_events);
1692 if (event->attr.mmap)
1693 atomic_dec(&nr_mmap_events);
1694 if (event->attr.comm)
1695 atomic_dec(&nr_comm_events);
1696 if (event->attr.task)
1697 atomic_dec(&nr_task_events);
1698 }
1699
1700 if (event->output) {
1701 fput(event->output->filp);
1702 event->output = NULL;
1703 }
1704
1705 if (event->destroy)
1706 event->destroy(event);
1707
1708 put_ctx(event->ctx);
1709 call_rcu(&event->rcu_head, free_event_rcu);
1710}
1711
1712/*
1713 * Called when the last reference to the file is gone.
1714 */
1715static int perf_release(struct inode *inode, struct file *file)
1716{
1717 struct perf_event *event = file->private_data;
1718 struct perf_event_context *ctx = event->ctx;
1719
1720 file->private_data = NULL;
1721
1722 WARN_ON_ONCE(ctx->parent_ctx);
1723 mutex_lock(&ctx->mutex);
1724 perf_event_remove_from_context(event);
1725 mutex_unlock(&ctx->mutex);
1726
1727 mutex_lock(&event->owner->perf_event_mutex);
1728 list_del_init(&event->owner_entry);
1729 mutex_unlock(&event->owner->perf_event_mutex);
1730 put_task_struct(event->owner);
1731
1732 free_event(event);
1733
1734 return 0;
1735}
1736
1737static int perf_event_read_size(struct perf_event *event)
1738{
1739 int entry = sizeof(u64); /* value */
1740 int size = 0;
1741 int nr = 1;
1742
1743 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1744 size += sizeof(u64);
1745
1746 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1747 size += sizeof(u64);
1748
1749 if (event->attr.read_format & PERF_FORMAT_ID)
1750 entry += sizeof(u64);
1751
1752 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1753 nr += event->group_leader->nr_siblings;
1754 size += sizeof(u64);
1755 }
1756
1757 size += entry * nr;
1758
1759 return size;
1760}
1761
1762static u64 perf_event_read_value(struct perf_event *event)
1763{
1764 struct perf_event *child;
1765 u64 total = 0;
1766
1767 total += perf_event_read(event);
1768 list_for_each_entry(child, &event->child_list, child_list)
1769 total += perf_event_read(child);
1770
1771 return total;
1772}
1773
1774static int perf_event_read_entry(struct perf_event *event,
1775 u64 read_format, char __user *buf)
1776{
1777 int n = 0, count = 0;
1778 u64 values[2];
1779
1780 values[n++] = perf_event_read_value(event);
1781 if (read_format & PERF_FORMAT_ID)
1782 values[n++] = primary_event_id(event);
1783
1784 count = n * sizeof(u64);
1785
1786 if (copy_to_user(buf, values, count))
1787 return -EFAULT;
1788
1789 return count;
1790}
1791
1792static int perf_event_read_group(struct perf_event *event,
1793 u64 read_format, char __user *buf)
1794{
1795 struct perf_event *leader = event->group_leader, *sub;
1796 int n = 0, size = 0, err = -EFAULT;
1797 u64 values[3];
1798
1799 values[n++] = 1 + leader->nr_siblings;
1800 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1801 values[n++] = leader->total_time_enabled +
1802 atomic64_read(&leader->child_total_time_enabled);
1803 }
1804 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1805 values[n++] = leader->total_time_running +
1806 atomic64_read(&leader->child_total_time_running);
1807 }
1808
1809 size = n * sizeof(u64);
1810
1811 if (copy_to_user(buf, values, size))
1812 return -EFAULT;
1813
1814 err = perf_event_read_entry(leader, read_format, buf + size);
1815 if (err < 0)
1816 return err;
1817
1818 size += err;
1819
1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1821 err = perf_event_read_entry(sub, read_format,
1822 buf + size);
1823 if (err < 0)
1824 return err;
1825
1826 size += err;
1827 }
1828
1829 return size;
1830}
1831
1832static int perf_event_read_one(struct perf_event *event,
1833 u64 read_format, char __user *buf)
1834{
1835 u64 values[4];
1836 int n = 0;
1837
1838 values[n++] = perf_event_read_value(event);
1839 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1840 values[n++] = event->total_time_enabled +
1841 atomic64_read(&event->child_total_time_enabled);
1842 }
1843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1844 values[n++] = event->total_time_running +
1845 atomic64_read(&event->child_total_time_running);
1846 }
1847 if (read_format & PERF_FORMAT_ID)
1848 values[n++] = primary_event_id(event);
1849
1850 if (copy_to_user(buf, values, n * sizeof(u64)))
1851 return -EFAULT;
1852
1853 return n * sizeof(u64);
1854}
1855
1856/*
1857 * Read the performance event - simple non blocking version for now
1858 */
1859static ssize_t
1860perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1861{
1862 u64 read_format = event->attr.read_format;
1863 int ret;
1864
1865 /*
1866 * Return end-of-file for a read on a event that is in
1867 * error state (i.e. because it was pinned but it couldn't be
1868 * scheduled on to the CPU at some point).
1869 */
1870 if (event->state == PERF_EVENT_STATE_ERROR)
1871 return 0;
1872
1873 if (count < perf_event_read_size(event))
1874 return -ENOSPC;
1875
1876 WARN_ON_ONCE(event->ctx->parent_ctx);
1877 mutex_lock(&event->child_mutex);
1878 if (read_format & PERF_FORMAT_GROUP)
1879 ret = perf_event_read_group(event, read_format, buf);
1880 else
1881 ret = perf_event_read_one(event, read_format, buf);
1882 mutex_unlock(&event->child_mutex);
1883
1884 return ret;
1885}
1886
1887static ssize_t
1888perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1889{
1890 struct perf_event *event = file->private_data;
1891
1892 return perf_read_hw(event, buf, count);
1893}
1894
1895static unsigned int perf_poll(struct file *file, poll_table *wait)
1896{
1897 struct perf_event *event = file->private_data;
1898 struct perf_mmap_data *data;
1899 unsigned int events = POLL_HUP;
1900
1901 rcu_read_lock();
1902 data = rcu_dereference(event->data);
1903 if (data)
1904 events = atomic_xchg(&data->poll, 0);
1905 rcu_read_unlock();
1906
1907 poll_wait(file, &event->waitq, wait);
1908
1909 return events;
1910}
1911
1912static void perf_event_reset(struct perf_event *event)
1913{
1914 (void)perf_event_read(event);
1915 atomic64_set(&event->count, 0);
1916 perf_event_update_userpage(event);
1917}
1918
1919/*
1920 * Holding the top-level event's child_mutex means that any
1921 * descendant process that has inherited this event will block
1922 * in sync_child_event if it goes to exit, thus satisfying the
1923 * task existence requirements of perf_event_enable/disable.
1924 */
1925static void perf_event_for_each_child(struct perf_event *event,
1926 void (*func)(struct perf_event *))
1927{
1928 struct perf_event *child;
1929
1930 WARN_ON_ONCE(event->ctx->parent_ctx);
1931 mutex_lock(&event->child_mutex);
1932 func(event);
1933 list_for_each_entry(child, &event->child_list, child_list)
1934 func(child);
1935 mutex_unlock(&event->child_mutex);
1936}
1937
1938static void perf_event_for_each(struct perf_event *event,
1939 void (*func)(struct perf_event *))
1940{
1941 struct perf_event_context *ctx = event->ctx;
1942 struct perf_event *sibling;
1943
1944 WARN_ON_ONCE(ctx->parent_ctx);
1945 mutex_lock(&ctx->mutex);
1946 event = event->group_leader;
1947
1948 perf_event_for_each_child(event, func);
1949 func(event);
1950 list_for_each_entry(sibling, &event->sibling_list, group_entry)
1951 perf_event_for_each_child(event, func);
1952 mutex_unlock(&ctx->mutex);
1953}
1954
1955static int perf_event_period(struct perf_event *event, u64 __user *arg)
1956{
1957 struct perf_event_context *ctx = event->ctx;
1958 unsigned long size;
1959 int ret = 0;
1960 u64 value;
1961
1962 if (!event->attr.sample_period)
1963 return -EINVAL;
1964
1965 size = copy_from_user(&value, arg, sizeof(value));
1966 if (size != sizeof(value))
1967 return -EFAULT;
1968
1969 if (!value)
1970 return -EINVAL;
1971
1972 spin_lock_irq(&ctx->lock);
1973 if (event->attr.freq) {
1974 if (value > sysctl_perf_event_sample_rate) {
1975 ret = -EINVAL;
1976 goto unlock;
1977 }
1978
1979 event->attr.sample_freq = value;
1980 } else {
1981 event->attr.sample_period = value;
1982 event->hw.sample_period = value;
1983 }
1984unlock:
1985 spin_unlock_irq(&ctx->lock);
1986
1987 return ret;
1988}
1989
1990int perf_event_set_output(struct perf_event *event, int output_fd);
1991
1992static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1993{
1994 struct perf_event *event = file->private_data;
1995 void (*func)(struct perf_event *);
1996 u32 flags = arg;
1997
1998 switch (cmd) {
1999 case PERF_EVENT_IOC_ENABLE:
2000 func = perf_event_enable;
2001 break;
2002 case PERF_EVENT_IOC_DISABLE:
2003 func = perf_event_disable;
2004 break;
2005 case PERF_EVENT_IOC_RESET:
2006 func = perf_event_reset;
2007 break;
2008
2009 case PERF_EVENT_IOC_REFRESH:
2010 return perf_event_refresh(event, arg);
2011
2012 case PERF_EVENT_IOC_PERIOD:
2013 return perf_event_period(event, (u64 __user *)arg);
2014
2015 case PERF_EVENT_IOC_SET_OUTPUT:
2016 return perf_event_set_output(event, arg);
2017
2018 default:
2019 return -ENOTTY;
2020 }
2021
2022 if (flags & PERF_IOC_FLAG_GROUP)
2023 perf_event_for_each(event, func);
2024 else
2025 perf_event_for_each_child(event, func);
2026
2027 return 0;
2028}
2029
2030int perf_event_task_enable(void)
2031{
2032 struct perf_event *event;
2033
2034 mutex_lock(&current->perf_event_mutex);
2035 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2036 perf_event_for_each_child(event, perf_event_enable);
2037 mutex_unlock(&current->perf_event_mutex);
2038
2039 return 0;
2040}
2041
2042int perf_event_task_disable(void)
2043{
2044 struct perf_event *event;
2045
2046 mutex_lock(&current->perf_event_mutex);
2047 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2048 perf_event_for_each_child(event, perf_event_disable);
2049 mutex_unlock(&current->perf_event_mutex);
2050
2051 return 0;
2052}
2053
2054#ifndef PERF_EVENT_INDEX_OFFSET
2055# define PERF_EVENT_INDEX_OFFSET 0
2056#endif
2057
2058static int perf_event_index(struct perf_event *event)
2059{
2060 if (event->state != PERF_EVENT_STATE_ACTIVE)
2061 return 0;
2062
2063 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2064}
2065
2066/*
2067 * Callers need to ensure there can be no nesting of this function, otherwise
2068 * the seqlock logic goes bad. We can not serialize this because the arch
2069 * code calls this from NMI context.
2070 */
2071void perf_event_update_userpage(struct perf_event *event)
2072{
2073 struct perf_event_mmap_page *userpg;
2074 struct perf_mmap_data *data;
2075
2076 rcu_read_lock();
2077 data = rcu_dereference(event->data);
2078 if (!data)
2079 goto unlock;
2080
2081 userpg = data->user_page;
2082
2083 /*
2084 * Disable preemption so as to not let the corresponding user-space
2085 * spin too long if we get preempted.
2086 */
2087 preempt_disable();
2088 ++userpg->lock;
2089 barrier();
2090 userpg->index = perf_event_index(event);
2091 userpg->offset = atomic64_read(&event->count);
2092 if (event->state == PERF_EVENT_STATE_ACTIVE)
2093 userpg->offset -= atomic64_read(&event->hw.prev_count);
2094
2095 userpg->time_enabled = event->total_time_enabled +
2096 atomic64_read(&event->child_total_time_enabled);
2097
2098 userpg->time_running = event->total_time_running +
2099 atomic64_read(&event->child_total_time_running);
2100
2101 barrier();
2102 ++userpg->lock;
2103 preempt_enable();
2104unlock:
2105 rcu_read_unlock();
2106}
2107
2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2109{
2110 struct perf_event *event = vma->vm_file->private_data;
2111 struct perf_mmap_data *data;
2112 int ret = VM_FAULT_SIGBUS;
2113
2114 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2115 if (vmf->pgoff == 0)
2116 ret = 0;
2117 return ret;
2118 }
2119
2120 rcu_read_lock();
2121 data = rcu_dereference(event->data);
2122 if (!data)
2123 goto unlock;
2124
2125 if (vmf->pgoff == 0) {
2126 vmf->page = virt_to_page(data->user_page);
2127 } else {
2128 int nr = vmf->pgoff - 1;
2129
2130 if ((unsigned)nr > data->nr_pages)
2131 goto unlock;
2132
2133 if (vmf->flags & FAULT_FLAG_WRITE)
2134 goto unlock;
2135
2136 vmf->page = virt_to_page(data->data_pages[nr]);
2137 }
2138
2139 get_page(vmf->page);
2140 vmf->page->mapping = vma->vm_file->f_mapping;
2141 vmf->page->index = vmf->pgoff;
2142
2143 ret = 0;
2144unlock:
2145 rcu_read_unlock();
2146
2147 return ret;
2148}
2149
2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2151{
2152 struct perf_mmap_data *data;
2153 unsigned long size;
2154 int i;
2155
2156 WARN_ON(atomic_read(&event->mmap_count));
2157
2158 size = sizeof(struct perf_mmap_data);
2159 size += nr_pages * sizeof(void *);
2160
2161 data = kzalloc(size, GFP_KERNEL);
2162 if (!data)
2163 goto fail;
2164
2165 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2166 if (!data->user_page)
2167 goto fail_user_page;
2168
2169 for (i = 0; i < nr_pages; i++) {
2170 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2171 if (!data->data_pages[i])
2172 goto fail_data_pages;
2173 }
2174
2175 data->nr_pages = nr_pages;
2176 atomic_set(&data->lock, -1);
2177
2178 if (event->attr.watermark) {
2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2180 event->attr.wakeup_watermark);
2181 }
2182 if (!data->watermark)
2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2184
2185 rcu_assign_pointer(event->data, data);
2186
2187 return 0;
2188
2189fail_data_pages:
2190 for (i--; i >= 0; i--)
2191 free_page((unsigned long)data->data_pages[i]);
2192
2193 free_page((unsigned long)data->user_page);
2194
2195fail_user_page:
2196 kfree(data);
2197
2198fail:
2199 return -ENOMEM;
2200}
2201
2202static void perf_mmap_free_page(unsigned long addr)
2203{
2204 struct page *page = virt_to_page((void *)addr);
2205
2206 page->mapping = NULL;
2207 __free_page(page);
2208}
2209
2210static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2211{
2212 struct perf_mmap_data *data;
2213 int i;
2214
2215 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2216
2217 perf_mmap_free_page((unsigned long)data->user_page);
2218 for (i = 0; i < data->nr_pages; i++)
2219 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2220
2221 kfree(data);
2222}
2223
2224static void perf_mmap_data_free(struct perf_event *event)
2225{
2226 struct perf_mmap_data *data = event->data;
2227
2228 WARN_ON(atomic_read(&event->mmap_count));
2229
2230 rcu_assign_pointer(event->data, NULL);
2231 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2232}
2233
2234static void perf_mmap_open(struct vm_area_struct *vma)
2235{
2236 struct perf_event *event = vma->vm_file->private_data;
2237
2238 atomic_inc(&event->mmap_count);
2239}
2240
2241static void perf_mmap_close(struct vm_area_struct *vma)
2242{
2243 struct perf_event *event = vma->vm_file->private_data;
2244
2245 WARN_ON_ONCE(event->ctx->parent_ctx);
2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2247 struct user_struct *user = current_user();
2248
2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm);
2250 vma->vm_mm->locked_vm -= event->data->nr_locked;
2251 perf_mmap_data_free(event);
2252 mutex_unlock(&event->mmap_mutex);
2253 }
2254}
2255
2256static struct vm_operations_struct perf_mmap_vmops = {
2257 .open = perf_mmap_open,
2258 .close = perf_mmap_close,
2259 .fault = perf_mmap_fault,
2260 .page_mkwrite = perf_mmap_fault,
2261};
2262
2263static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2264{
2265 struct perf_event *event = file->private_data;
2266 unsigned long user_locked, user_lock_limit;
2267 struct user_struct *user = current_user();
2268 unsigned long locked, lock_limit;
2269 unsigned long vma_size;
2270 unsigned long nr_pages;
2271 long user_extra, extra;
2272 int ret = 0;
2273
2274 if (!(vma->vm_flags & VM_SHARED))
2275 return -EINVAL;
2276
2277 vma_size = vma->vm_end - vma->vm_start;
2278 nr_pages = (vma_size / PAGE_SIZE) - 1;
2279
2280 /*
2281 * If we have data pages ensure they're a power-of-two number, so we
2282 * can do bitmasks instead of modulo.
2283 */
2284 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2285 return -EINVAL;
2286
2287 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2288 return -EINVAL;
2289
2290 if (vma->vm_pgoff != 0)
2291 return -EINVAL;
2292
2293 WARN_ON_ONCE(event->ctx->parent_ctx);
2294 mutex_lock(&event->mmap_mutex);
2295 if (event->output) {
2296 ret = -EINVAL;
2297 goto unlock;
2298 }
2299
2300 if (atomic_inc_not_zero(&event->mmap_count)) {
2301 if (nr_pages != event->data->nr_pages)
2302 ret = -EINVAL;
2303 goto unlock;
2304 }
2305
2306 user_extra = nr_pages + 1;
2307 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2308
2309 /*
2310 * Increase the limit linearly with more CPUs:
2311 */
2312 user_lock_limit *= num_online_cpus();
2313
2314 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2315
2316 extra = 0;
2317 if (user_locked > user_lock_limit)
2318 extra = user_locked - user_lock_limit;
2319
2320 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2321 lock_limit >>= PAGE_SHIFT;
2322 locked = vma->vm_mm->locked_vm + extra;
2323
2324 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2325 !capable(CAP_IPC_LOCK)) {
2326 ret = -EPERM;
2327 goto unlock;
2328 }
2329
2330 WARN_ON(event->data);
2331 ret = perf_mmap_data_alloc(event, nr_pages);
2332 if (ret)
2333 goto unlock;
2334
2335 atomic_set(&event->mmap_count, 1);
2336 atomic_long_add(user_extra, &user->locked_vm);
2337 vma->vm_mm->locked_vm += extra;
2338 event->data->nr_locked = extra;
2339 if (vma->vm_flags & VM_WRITE)
2340 event->data->writable = 1;
2341
2342unlock:
2343 mutex_unlock(&event->mmap_mutex);
2344
2345 vma->vm_flags |= VM_RESERVED;
2346 vma->vm_ops = &perf_mmap_vmops;
2347
2348 return ret;
2349}
2350
2351static int perf_fasync(int fd, struct file *filp, int on)
2352{
2353 struct inode *inode = filp->f_path.dentry->d_inode;
2354 struct perf_event *event = filp->private_data;
2355 int retval;
2356
2357 mutex_lock(&inode->i_mutex);
2358 retval = fasync_helper(fd, filp, on, &event->fasync);
2359 mutex_unlock(&inode->i_mutex);
2360
2361 if (retval < 0)
2362 return retval;
2363
2364 return 0;
2365}
2366
2367static const struct file_operations perf_fops = {
2368 .release = perf_release,
2369 .read = perf_read,
2370 .poll = perf_poll,
2371 .unlocked_ioctl = perf_ioctl,
2372 .compat_ioctl = perf_ioctl,
2373 .mmap = perf_mmap,
2374 .fasync = perf_fasync,
2375};
2376
2377/*
2378 * Perf event wakeup
2379 *
2380 * If there's data, ensure we set the poll() state and publish everything
2381 * to user-space before waking everybody up.
2382 */
2383
2384void perf_event_wakeup(struct perf_event *event)
2385{
2386 wake_up_all(&event->waitq);
2387
2388 if (event->pending_kill) {
2389 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2390 event->pending_kill = 0;
2391 }
2392}
2393
2394/*
2395 * Pending wakeups
2396 *
2397 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2398 *
2399 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2400 * single linked list and use cmpxchg() to add entries lockless.
2401 */
2402
2403static void perf_pending_event(struct perf_pending_entry *entry)
2404{
2405 struct perf_event *event = container_of(entry,
2406 struct perf_event, pending);
2407
2408 if (event->pending_disable) {
2409 event->pending_disable = 0;
2410 __perf_event_disable(event);
2411 }
2412
2413 if (event->pending_wakeup) {
2414 event->pending_wakeup = 0;
2415 perf_event_wakeup(event);
2416 }
2417}
2418
2419#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2420
2421static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2422 PENDING_TAIL,
2423};
2424
2425static void perf_pending_queue(struct perf_pending_entry *entry,
2426 void (*func)(struct perf_pending_entry *))
2427{
2428 struct perf_pending_entry **head;
2429
2430 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2431 return;
2432
2433 entry->func = func;
2434
2435 head = &get_cpu_var(perf_pending_head);
2436
2437 do {
2438 entry->next = *head;
2439 } while (cmpxchg(head, entry->next, entry) != entry->next);
2440
2441 set_perf_event_pending();
2442
2443 put_cpu_var(perf_pending_head);
2444}
2445
2446static int __perf_pending_run(void)
2447{
2448 struct perf_pending_entry *list;
2449 int nr = 0;
2450
2451 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2452 while (list != PENDING_TAIL) {
2453 void (*func)(struct perf_pending_entry *);
2454 struct perf_pending_entry *entry = list;
2455
2456 list = list->next;
2457
2458 func = entry->func;
2459 entry->next = NULL;
2460 /*
2461 * Ensure we observe the unqueue before we issue the wakeup,
2462 * so that we won't be waiting forever.
2463 * -- see perf_not_pending().
2464 */
2465 smp_wmb();
2466
2467 func(entry);
2468 nr++;
2469 }
2470
2471 return nr;
2472}
2473
2474static inline int perf_not_pending(struct perf_event *event)
2475{
2476 /*
2477 * If we flush on whatever cpu we run, there is a chance we don't
2478 * need to wait.
2479 */
2480 get_cpu();
2481 __perf_pending_run();
2482 put_cpu();
2483
2484 /*
2485 * Ensure we see the proper queue state before going to sleep
2486 * so that we do not miss the wakeup. -- see perf_pending_handle()
2487 */
2488 smp_rmb();
2489 return event->pending.next == NULL;
2490}
2491
2492static void perf_pending_sync(struct perf_event *event)
2493{
2494 wait_event(event->waitq, perf_not_pending(event));
2495}
2496
2497void perf_event_do_pending(void)
2498{
2499 __perf_pending_run();
2500}
2501
2502/*
2503 * Callchain support -- arch specific
2504 */
2505
2506__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2507{
2508 return NULL;
2509}
2510
2511/*
2512 * Output
2513 */
2514static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2515 unsigned long offset, unsigned long head)
2516{
2517 unsigned long mask;
2518
2519 if (!data->writable)
2520 return true;
2521
2522 mask = (data->nr_pages << PAGE_SHIFT) - 1;
2523
2524 offset = (offset - tail) & mask;
2525 head = (head - tail) & mask;
2526
2527 if ((int)(head - offset) < 0)
2528 return false;
2529
2530 return true;
2531}
2532
2533static void perf_output_wakeup(struct perf_output_handle *handle)
2534{
2535 atomic_set(&handle->data->poll, POLL_IN);
2536
2537 if (handle->nmi) {
2538 handle->event->pending_wakeup = 1;
2539 perf_pending_queue(&handle->event->pending,
2540 perf_pending_event);
2541 } else
2542 perf_event_wakeup(handle->event);
2543}
2544
2545/*
2546 * Curious locking construct.
2547 *
2548 * We need to ensure a later event_id doesn't publish a head when a former
2549 * event_id isn't done writing. However since we need to deal with NMIs we
2550 * cannot fully serialize things.
2551 *
2552 * What we do is serialize between CPUs so we only have to deal with NMI
2553 * nesting on a single CPU.
2554 *
2555 * We only publish the head (and generate a wakeup) when the outer-most
2556 * event_id completes.
2557 */
2558static void perf_output_lock(struct perf_output_handle *handle)
2559{
2560 struct perf_mmap_data *data = handle->data;
2561 int cpu;
2562
2563 handle->locked = 0;
2564
2565 local_irq_save(handle->flags);
2566 cpu = smp_processor_id();
2567
2568 if (in_nmi() && atomic_read(&data->lock) == cpu)
2569 return;
2570
2571 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2572 cpu_relax();
2573
2574 handle->locked = 1;
2575}
2576
2577static void perf_output_unlock(struct perf_output_handle *handle)
2578{
2579 struct perf_mmap_data *data = handle->data;
2580 unsigned long head;
2581 int cpu;
2582
2583 data->done_head = data->head;
2584
2585 if (!handle->locked)
2586 goto out;
2587
2588again:
2589 /*
2590 * The xchg implies a full barrier that ensures all writes are done
2591 * before we publish the new head, matched by a rmb() in userspace when
2592 * reading this position.
2593 */
2594 while ((head = atomic_long_xchg(&data->done_head, 0)))
2595 data->user_page->data_head = head;
2596
2597 /*
2598 * NMI can happen here, which means we can miss a done_head update.
2599 */
2600
2601 cpu = atomic_xchg(&data->lock, -1);
2602 WARN_ON_ONCE(cpu != smp_processor_id());
2603
2604 /*
2605 * Therefore we have to validate we did not indeed do so.
2606 */
2607 if (unlikely(atomic_long_read(&data->done_head))) {
2608 /*
2609 * Since we had it locked, we can lock it again.
2610 */
2611 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2612 cpu_relax();
2613
2614 goto again;
2615 }
2616
2617 if (atomic_xchg(&data->wakeup, 0))
2618 perf_output_wakeup(handle);
2619out:
2620 local_irq_restore(handle->flags);
2621}
2622
2623void perf_output_copy(struct perf_output_handle *handle,
2624 const void *buf, unsigned int len)
2625{
2626 unsigned int pages_mask;
2627 unsigned int offset;
2628 unsigned int size;
2629 void **pages;
2630
2631 offset = handle->offset;
2632 pages_mask = handle->data->nr_pages - 1;
2633 pages = handle->data->data_pages;
2634
2635 do {
2636 unsigned int page_offset;
2637 int nr;
2638
2639 nr = (offset >> PAGE_SHIFT) & pages_mask;
2640 page_offset = offset & (PAGE_SIZE - 1);
2641 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2642
2643 memcpy(pages[nr] + page_offset, buf, size);
2644
2645 len -= size;
2646 buf += size;
2647 offset += size;
2648 } while (len);
2649
2650 handle->offset = offset;
2651
2652 /*
2653 * Check we didn't copy past our reservation window, taking the
2654 * possible unsigned int wrap into account.
2655 */
2656 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2657}
2658
2659int perf_output_begin(struct perf_output_handle *handle,
2660 struct perf_event *event, unsigned int size,
2661 int nmi, int sample)
2662{
2663 struct perf_event *output_event;
2664 struct perf_mmap_data *data;
2665 unsigned long tail, offset, head;
2666 int have_lost;
2667 struct {
2668 struct perf_event_header header;
2669 u64 id;
2670 u64 lost;
2671 } lost_event;
2672
2673 rcu_read_lock();
2674 /*
2675 * For inherited events we send all the output towards the parent.
2676 */
2677 if (event->parent)
2678 event = event->parent;
2679
2680 output_event = rcu_dereference(event->output);
2681 if (output_event)
2682 event = output_event;
2683
2684 data = rcu_dereference(event->data);
2685 if (!data)
2686 goto out;
2687
2688 handle->data = data;
2689 handle->event = event;
2690 handle->nmi = nmi;
2691 handle->sample = sample;
2692
2693 if (!data->nr_pages)
2694 goto fail;
2695
2696 have_lost = atomic_read(&data->lost);
2697 if (have_lost)
2698 size += sizeof(lost_event);
2699
2700 perf_output_lock(handle);
2701
2702 do {
2703 /*
2704 * Userspace could choose to issue a mb() before updating the
2705 * tail pointer. So that all reads will be completed before the
2706 * write is issued.
2707 */
2708 tail = ACCESS_ONCE(data->user_page->data_tail);
2709 smp_rmb();
2710 offset = head = atomic_long_read(&data->head);
2711 head += size;
2712 if (unlikely(!perf_output_space(data, tail, offset, head)))
2713 goto fail;
2714 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2715
2716 handle->offset = offset;
2717 handle->head = head;
2718
2719 if (head - tail > data->watermark)
2720 atomic_set(&data->wakeup, 1);
2721
2722 if (have_lost) {
2723 lost_event.header.type = PERF_RECORD_LOST;
2724 lost_event.header.misc = 0;
2725 lost_event.header.size = sizeof(lost_event);
2726 lost_event.id = event->id;
2727 lost_event.lost = atomic_xchg(&data->lost, 0);
2728
2729 perf_output_put(handle, lost_event);
2730 }
2731
2732 return 0;
2733
2734fail:
2735 atomic_inc(&data->lost);
2736 perf_output_unlock(handle);
2737out:
2738 rcu_read_unlock();
2739
2740 return -ENOSPC;
2741}
2742
2743void perf_output_end(struct perf_output_handle *handle)
2744{
2745 struct perf_event *event = handle->event;
2746 struct perf_mmap_data *data = handle->data;
2747
2748 int wakeup_events = event->attr.wakeup_events;
2749
2750 if (handle->sample && wakeup_events) {
2751 int events = atomic_inc_return(&data->events);
2752 if (events >= wakeup_events) {
2753 atomic_sub(wakeup_events, &data->events);
2754 atomic_set(&data->wakeup, 1);
2755 }
2756 }
2757
2758 perf_output_unlock(handle);
2759 rcu_read_unlock();
2760}
2761
2762static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2763{
2764 /*
2765 * only top level events have the pid namespace they were created in
2766 */
2767 if (event->parent)
2768 event = event->parent;
2769
2770 return task_tgid_nr_ns(p, event->ns);
2771}
2772
2773static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2774{
2775 /*
2776 * only top level events have the pid namespace they were created in
2777 */
2778 if (event->parent)
2779 event = event->parent;
2780
2781 return task_pid_nr_ns(p, event->ns);
2782}
2783
2784static void perf_output_read_one(struct perf_output_handle *handle,
2785 struct perf_event *event)
2786{
2787 u64 read_format = event->attr.read_format;
2788 u64 values[4];
2789 int n = 0;
2790
2791 values[n++] = atomic64_read(&event->count);
2792 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2793 values[n++] = event->total_time_enabled +
2794 atomic64_read(&event->child_total_time_enabled);
2795 }
2796 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2797 values[n++] = event->total_time_running +
2798 atomic64_read(&event->child_total_time_running);
2799 }
2800 if (read_format & PERF_FORMAT_ID)
2801 values[n++] = primary_event_id(event);
2802
2803 perf_output_copy(handle, values, n * sizeof(u64));
2804}
2805
2806/*
2807 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2808 */
2809static void perf_output_read_group(struct perf_output_handle *handle,
2810 struct perf_event *event)
2811{
2812 struct perf_event *leader = event->group_leader, *sub;
2813 u64 read_format = event->attr.read_format;
2814 u64 values[5];
2815 int n = 0;
2816
2817 values[n++] = 1 + leader->nr_siblings;
2818
2819 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2820 values[n++] = leader->total_time_enabled;
2821
2822 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2823 values[n++] = leader->total_time_running;
2824
2825 if (leader != event)
2826 leader->pmu->read(leader);
2827
2828 values[n++] = atomic64_read(&leader->count);
2829 if (read_format & PERF_FORMAT_ID)
2830 values[n++] = primary_event_id(leader);
2831
2832 perf_output_copy(handle, values, n * sizeof(u64));
2833
2834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2835 n = 0;
2836
2837 if (sub != event)
2838 sub->pmu->read(sub);
2839
2840 values[n++] = atomic64_read(&sub->count);
2841 if (read_format & PERF_FORMAT_ID)
2842 values[n++] = primary_event_id(sub);
2843
2844 perf_output_copy(handle, values, n * sizeof(u64));
2845 }
2846}
2847
2848static void perf_output_read(struct perf_output_handle *handle,
2849 struct perf_event *event)
2850{
2851 if (event->attr.read_format & PERF_FORMAT_GROUP)
2852 perf_output_read_group(handle, event);
2853 else
2854 perf_output_read_one(handle, event);
2855}
2856
2857void perf_output_sample(struct perf_output_handle *handle,
2858 struct perf_event_header *header,
2859 struct perf_sample_data *data,
2860 struct perf_event *event)
2861{
2862 u64 sample_type = data->type;
2863
2864 perf_output_put(handle, *header);
2865
2866 if (sample_type & PERF_SAMPLE_IP)
2867 perf_output_put(handle, data->ip);
2868
2869 if (sample_type & PERF_SAMPLE_TID)
2870 perf_output_put(handle, data->tid_entry);
2871
2872 if (sample_type & PERF_SAMPLE_TIME)
2873 perf_output_put(handle, data->time);
2874
2875 if (sample_type & PERF_SAMPLE_ADDR)
2876 perf_output_put(handle, data->addr);
2877
2878 if (sample_type & PERF_SAMPLE_ID)
2879 perf_output_put(handle, data->id);
2880
2881 if (sample_type & PERF_SAMPLE_STREAM_ID)
2882 perf_output_put(handle, data->stream_id);
2883
2884 if (sample_type & PERF_SAMPLE_CPU)
2885 perf_output_put(handle, data->cpu_entry);
2886
2887 if (sample_type & PERF_SAMPLE_PERIOD)
2888 perf_output_put(handle, data->period);
2889
2890 if (sample_type & PERF_SAMPLE_READ)
2891 perf_output_read(handle, event);
2892
2893 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2894 if (data->callchain) {
2895 int size = 1;
2896
2897 if (data->callchain)
2898 size += data->callchain->nr;
2899
2900 size *= sizeof(u64);
2901
2902 perf_output_copy(handle, data->callchain, size);
2903 } else {
2904 u64 nr = 0;
2905 perf_output_put(handle, nr);
2906 }
2907 }
2908
2909 if (sample_type & PERF_SAMPLE_RAW) {
2910 if (data->raw) {
2911 perf_output_put(handle, data->raw->size);
2912 perf_output_copy(handle, data->raw->data,
2913 data->raw->size);
2914 } else {
2915 struct {
2916 u32 size;
2917 u32 data;
2918 } raw = {
2919 .size = sizeof(u32),
2920 .data = 0,
2921 };
2922 perf_output_put(handle, raw);
2923 }
2924 }
2925}
2926
2927void perf_prepare_sample(struct perf_event_header *header,
2928 struct perf_sample_data *data,
2929 struct perf_event *event,
2930 struct pt_regs *regs)
2931{
2932 u64 sample_type = event->attr.sample_type;
2933
2934 data->type = sample_type;
2935
2936 header->type = PERF_RECORD_SAMPLE;
2937 header->size = sizeof(*header);
2938
2939 header->misc = 0;
2940 header->misc |= perf_misc_flags(regs);
2941
2942 if (sample_type & PERF_SAMPLE_IP) {
2943 data->ip = perf_instruction_pointer(regs);
2944
2945 header->size += sizeof(data->ip);
2946 }
2947
2948 if (sample_type & PERF_SAMPLE_TID) {
2949 /* namespace issues */
2950 data->tid_entry.pid = perf_event_pid(event, current);
2951 data->tid_entry.tid = perf_event_tid(event, current);
2952
2953 header->size += sizeof(data->tid_entry);
2954 }
2955
2956 if (sample_type & PERF_SAMPLE_TIME) {
2957 data->time = perf_clock();
2958
2959 header->size += sizeof(data->time);
2960 }
2961
2962 if (sample_type & PERF_SAMPLE_ADDR)
2963 header->size += sizeof(data->addr);
2964
2965 if (sample_type & PERF_SAMPLE_ID) {
2966 data->id = primary_event_id(event);
2967
2968 header->size += sizeof(data->id);
2969 }
2970
2971 if (sample_type & PERF_SAMPLE_STREAM_ID) {
2972 data->stream_id = event->id;
2973
2974 header->size += sizeof(data->stream_id);
2975 }
2976
2977 if (sample_type & PERF_SAMPLE_CPU) {
2978 data->cpu_entry.cpu = raw_smp_processor_id();
2979 data->cpu_entry.reserved = 0;
2980
2981 header->size += sizeof(data->cpu_entry);
2982 }
2983
2984 if (sample_type & PERF_SAMPLE_PERIOD)
2985 header->size += sizeof(data->period);
2986
2987 if (sample_type & PERF_SAMPLE_READ)
2988 header->size += perf_event_read_size(event);
2989
2990 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2991 int size = 1;
2992
2993 data->callchain = perf_callchain(regs);
2994
2995 if (data->callchain)
2996 size += data->callchain->nr;
2997
2998 header->size += size * sizeof(u64);
2999 }
3000
3001 if (sample_type & PERF_SAMPLE_RAW) {
3002 int size = sizeof(u32);
3003
3004 if (data->raw)
3005 size += data->raw->size;
3006 else
3007 size += sizeof(u32);
3008
3009 WARN_ON_ONCE(size & (sizeof(u64)-1));
3010 header->size += size;
3011 }
3012}
3013
3014static void perf_event_output(struct perf_event *event, int nmi,
3015 struct perf_sample_data *data,
3016 struct pt_regs *regs)
3017{
3018 struct perf_output_handle handle;
3019 struct perf_event_header header;
3020
3021 perf_prepare_sample(&header, data, event, regs);
3022
3023 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3024 return;
3025
3026 perf_output_sample(&handle, &header, data, event);
3027
3028 perf_output_end(&handle);
3029}
3030
3031/*
3032 * read event_id
3033 */
3034
3035struct perf_read_event {
3036 struct perf_event_header header;
3037
3038 u32 pid;
3039 u32 tid;
3040};
3041
3042static void
3043perf_event_read_event(struct perf_event *event,
3044 struct task_struct *task)
3045{
3046 struct perf_output_handle handle;
3047 struct perf_read_event read_event = {
3048 .header = {
3049 .type = PERF_RECORD_READ,
3050 .misc = 0,
3051 .size = sizeof(read_event) + perf_event_read_size(event),
3052 },
3053 .pid = perf_event_pid(event, task),
3054 .tid = perf_event_tid(event, task),
3055 };
3056 int ret;
3057
3058 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3059 if (ret)
3060 return;
3061
3062 perf_output_put(&handle, read_event);
3063 perf_output_read(&handle, event);
3064
3065 perf_output_end(&handle);
3066}
3067
3068/*
3069 * task tracking -- fork/exit
3070 *
3071 * enabled by: attr.comm | attr.mmap | attr.task
3072 */
3073
3074struct perf_task_event {
3075 struct task_struct *task;
3076 struct perf_event_context *task_ctx;
3077
3078 struct {
3079 struct perf_event_header header;
3080
3081 u32 pid;
3082 u32 ppid;
3083 u32 tid;
3084 u32 ptid;
3085 u64 time;
3086 } event_id;
3087};
3088
3089static void perf_event_task_output(struct perf_event *event,
3090 struct perf_task_event *task_event)
3091{
3092 struct perf_output_handle handle;
3093 int size;
3094 struct task_struct *task = task_event->task;
3095 int ret;
3096
3097 size = task_event->event_id.header.size;
3098 ret = perf_output_begin(&handle, event, size, 0, 0);
3099
3100 if (ret)
3101 return;
3102
3103 task_event->event_id.pid = perf_event_pid(event, task);
3104 task_event->event_id.ppid = perf_event_pid(event, current);
3105
3106 task_event->event_id.tid = perf_event_tid(event, task);
3107 task_event->event_id.ptid = perf_event_tid(event, current);
3108
3109 task_event->event_id.time = perf_clock();
3110
3111 perf_output_put(&handle, task_event->event_id);
3112
3113 perf_output_end(&handle);
3114}
3115
3116static int perf_event_task_match(struct perf_event *event)
3117{
3118 if (event->attr.comm || event->attr.mmap || event->attr.task)
3119 return 1;
3120
3121 return 0;
3122}
3123
3124static void perf_event_task_ctx(struct perf_event_context *ctx,
3125 struct perf_task_event *task_event)
3126{
3127 struct perf_event *event;
3128
3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3130 return;
3131
3132 rcu_read_lock();
3133 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3134 if (perf_event_task_match(event))
3135 perf_event_task_output(event, task_event);
3136 }
3137 rcu_read_unlock();
3138}
3139
3140static void perf_event_task_event(struct perf_task_event *task_event)
3141{
3142 struct perf_cpu_context *cpuctx;
3143 struct perf_event_context *ctx = task_event->task_ctx;
3144
3145 cpuctx = &get_cpu_var(perf_cpu_context);
3146 perf_event_task_ctx(&cpuctx->ctx, task_event);
3147 put_cpu_var(perf_cpu_context);
3148
3149 rcu_read_lock();
3150 if (!ctx)
3151 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3152 if (ctx)
3153 perf_event_task_ctx(ctx, task_event);
3154 rcu_read_unlock();
3155}
3156
3157static void perf_event_task(struct task_struct *task,
3158 struct perf_event_context *task_ctx,
3159 int new)
3160{
3161 struct perf_task_event task_event;
3162
3163 if (!atomic_read(&nr_comm_events) &&
3164 !atomic_read(&nr_mmap_events) &&
3165 !atomic_read(&nr_task_events))
3166 return;
3167
3168 task_event = (struct perf_task_event){
3169 .task = task,
3170 .task_ctx = task_ctx,
3171 .event_id = {
3172 .header = {
3173 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3174 .misc = 0,
3175 .size = sizeof(task_event.event_id),
3176 },
3177 /* .pid */
3178 /* .ppid */
3179 /* .tid */
3180 /* .ptid */
3181 },
3182 };
3183
3184 perf_event_task_event(&task_event);
3185}
3186
3187void perf_event_fork(struct task_struct *task)
3188{
3189 perf_event_task(task, NULL, 1);
3190}
3191
3192/*
3193 * comm tracking
3194 */
3195
3196struct perf_comm_event {
3197 struct task_struct *task;
3198 char *comm;
3199 int comm_size;
3200
3201 struct {
3202 struct perf_event_header header;
3203
3204 u32 pid;
3205 u32 tid;
3206 } event_id;
3207};
3208
3209static void perf_event_comm_output(struct perf_event *event,
3210 struct perf_comm_event *comm_event)
3211{
3212 struct perf_output_handle handle;
3213 int size = comm_event->event_id.header.size;
3214 int ret = perf_output_begin(&handle, event, size, 0, 0);
3215
3216 if (ret)
3217 return;
3218
3219 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3220 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3221
3222 perf_output_put(&handle, comm_event->event_id);
3223 perf_output_copy(&handle, comm_event->comm,
3224 comm_event->comm_size);
3225 perf_output_end(&handle);
3226}
3227
3228static int perf_event_comm_match(struct perf_event *event)
3229{
3230 if (event->attr.comm)
3231 return 1;
3232
3233 return 0;
3234}
3235
3236static void perf_event_comm_ctx(struct perf_event_context *ctx,
3237 struct perf_comm_event *comm_event)
3238{
3239 struct perf_event *event;
3240
3241 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3242 return;
3243
3244 rcu_read_lock();
3245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3246 if (perf_event_comm_match(event))
3247 perf_event_comm_output(event, comm_event);
3248 }
3249 rcu_read_unlock();
3250}
3251
3252static void perf_event_comm_event(struct perf_comm_event *comm_event)
3253{
3254 struct perf_cpu_context *cpuctx;
3255 struct perf_event_context *ctx;
3256 unsigned int size;
3257 char comm[TASK_COMM_LEN];
3258
3259 memset(comm, 0, sizeof(comm));
3260 strncpy(comm, comm_event->task->comm, sizeof(comm));
3261 size = ALIGN(strlen(comm)+1, sizeof(u64));
3262
3263 comm_event->comm = comm;
3264 comm_event->comm_size = size;
3265
3266 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3267
3268 cpuctx = &get_cpu_var(perf_cpu_context);
3269 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3270 put_cpu_var(perf_cpu_context);
3271
3272 rcu_read_lock();
3273 /*
3274 * doesn't really matter which of the child contexts the
3275 * events ends up in.
3276 */
3277 ctx = rcu_dereference(current->perf_event_ctxp);
3278 if (ctx)
3279 perf_event_comm_ctx(ctx, comm_event);
3280 rcu_read_unlock();
3281}
3282
3283void perf_event_comm(struct task_struct *task)
3284{
3285 struct perf_comm_event comm_event;
3286
3287 if (task->perf_event_ctxp)
3288 perf_event_enable_on_exec(task);
3289
3290 if (!atomic_read(&nr_comm_events))
3291 return;
3292
3293 comm_event = (struct perf_comm_event){
3294 .task = task,
3295 /* .comm */
3296 /* .comm_size */
3297 .event_id = {
3298 .header = {
3299 .type = PERF_RECORD_COMM,
3300 .misc = 0,
3301 /* .size */
3302 },
3303 /* .pid */
3304 /* .tid */
3305 },
3306 };
3307
3308 perf_event_comm_event(&comm_event);
3309}
3310
3311/*
3312 * mmap tracking
3313 */
3314
3315struct perf_mmap_event {
3316 struct vm_area_struct *vma;
3317
3318 const char *file_name;
3319 int file_size;
3320
3321 struct {
3322 struct perf_event_header header;
3323
3324 u32 pid;
3325 u32 tid;
3326 u64 start;
3327 u64 len;
3328 u64 pgoff;
3329 } event_id;
3330};
3331
3332static void perf_event_mmap_output(struct perf_event *event,
3333 struct perf_mmap_event *mmap_event)
3334{
3335 struct perf_output_handle handle;
3336 int size = mmap_event->event_id.header.size;
3337 int ret = perf_output_begin(&handle, event, size, 0, 0);
3338
3339 if (ret)
3340 return;
3341
3342 mmap_event->event_id.pid = perf_event_pid(event, current);
3343 mmap_event->event_id.tid = perf_event_tid(event, current);
3344
3345 perf_output_put(&handle, mmap_event->event_id);
3346 perf_output_copy(&handle, mmap_event->file_name,
3347 mmap_event->file_size);
3348 perf_output_end(&handle);
3349}
3350
3351static int perf_event_mmap_match(struct perf_event *event,
3352 struct perf_mmap_event *mmap_event)
3353{
3354 if (event->attr.mmap)
3355 return 1;
3356
3357 return 0;
3358}
3359
3360static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3361 struct perf_mmap_event *mmap_event)
3362{
3363 struct perf_event *event;
3364
3365 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3366 return;
3367
3368 rcu_read_lock();
3369 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3370 if (perf_event_mmap_match(event, mmap_event))
3371 perf_event_mmap_output(event, mmap_event);
3372 }
3373 rcu_read_unlock();
3374}
3375
3376static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3377{
3378 struct perf_cpu_context *cpuctx;
3379 struct perf_event_context *ctx;
3380 struct vm_area_struct *vma = mmap_event->vma;
3381 struct file *file = vma->vm_file;
3382 unsigned int size;
3383 char tmp[16];
3384 char *buf = NULL;
3385 const char *name;
3386
3387 memset(tmp, 0, sizeof(tmp));
3388
3389 if (file) {
3390 /*
3391 * d_path works from the end of the buffer backwards, so we
3392 * need to add enough zero bytes after the string to handle
3393 * the 64bit alignment we do later.
3394 */
3395 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3396 if (!buf) {
3397 name = strncpy(tmp, "//enomem", sizeof(tmp));
3398 goto got_name;
3399 }
3400 name = d_path(&file->f_path, buf, PATH_MAX);
3401 if (IS_ERR(name)) {
3402 name = strncpy(tmp, "//toolong", sizeof(tmp));
3403 goto got_name;
3404 }
3405 } else {
3406 if (arch_vma_name(mmap_event->vma)) {
3407 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3408 sizeof(tmp));
3409 goto got_name;
3410 }
3411
3412 if (!vma->vm_mm) {
3413 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3414 goto got_name;
3415 }
3416
3417 name = strncpy(tmp, "//anon", sizeof(tmp));
3418 goto got_name;
3419 }
3420
3421got_name:
3422 size = ALIGN(strlen(name)+1, sizeof(u64));
3423
3424 mmap_event->file_name = name;
3425 mmap_event->file_size = size;
3426
3427 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3428
3429 cpuctx = &get_cpu_var(perf_cpu_context);
3430 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3431 put_cpu_var(perf_cpu_context);
3432
3433 rcu_read_lock();
3434 /*
3435 * doesn't really matter which of the child contexts the
3436 * events ends up in.
3437 */
3438 ctx = rcu_dereference(current->perf_event_ctxp);
3439 if (ctx)
3440 perf_event_mmap_ctx(ctx, mmap_event);
3441 rcu_read_unlock();
3442
3443 kfree(buf);
3444}
3445
3446void __perf_event_mmap(struct vm_area_struct *vma)
3447{
3448 struct perf_mmap_event mmap_event;
3449
3450 if (!atomic_read(&nr_mmap_events))
3451 return;
3452
3453 mmap_event = (struct perf_mmap_event){
3454 .vma = vma,
3455 /* .file_name */
3456 /* .file_size */
3457 .event_id = {
3458 .header = {
3459 .type = PERF_RECORD_MMAP,
3460 .misc = 0,
3461 /* .size */
3462 },
3463 /* .pid */
3464 /* .tid */
3465 .start = vma->vm_start,
3466 .len = vma->vm_end - vma->vm_start,
3467 .pgoff = vma->vm_pgoff,
3468 },
3469 };
3470
3471 perf_event_mmap_event(&mmap_event);
3472}
3473
3474/*
3475 * IRQ throttle logging
3476 */
3477
3478static void perf_log_throttle(struct perf_event *event, int enable)
3479{
3480 struct perf_output_handle handle;
3481 int ret;
3482
3483 struct {
3484 struct perf_event_header header;
3485 u64 time;
3486 u64 id;
3487 u64 stream_id;
3488 } throttle_event = {
3489 .header = {
3490 .type = PERF_RECORD_THROTTLE,
3491 .misc = 0,
3492 .size = sizeof(throttle_event),
3493 },
3494 .time = perf_clock(),
3495 .id = primary_event_id(event),
3496 .stream_id = event->id,
3497 };
3498
3499 if (enable)
3500 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3501
3502 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3503 if (ret)
3504 return;
3505
3506 perf_output_put(&handle, throttle_event);
3507 perf_output_end(&handle);
3508}
3509
3510/*
3511 * Generic event overflow handling, sampling.
3512 */
3513
3514static int __perf_event_overflow(struct perf_event *event, int nmi,
3515 int throttle, struct perf_sample_data *data,
3516 struct pt_regs *regs)
3517{
3518 int events = atomic_read(&event->event_limit);
3519 struct hw_perf_event *hwc = &event->hw;
3520 int ret = 0;
3521
3522 throttle = (throttle && event->pmu->unthrottle != NULL);
3523
3524 if (!throttle) {
3525 hwc->interrupts++;
3526 } else {
3527 if (hwc->interrupts != MAX_INTERRUPTS) {
3528 hwc->interrupts++;
3529 if (HZ * hwc->interrupts >
3530 (u64)sysctl_perf_event_sample_rate) {
3531 hwc->interrupts = MAX_INTERRUPTS;
3532 perf_log_throttle(event, 0);
3533 ret = 1;
3534 }
3535 } else {
3536 /*
3537 * Keep re-disabling events even though on the previous
3538 * pass we disabled it - just in case we raced with a
3539 * sched-in and the event got enabled again:
3540 */
3541 ret = 1;
3542 }
3543 }
3544
3545 if (event->attr.freq) {
3546 u64 now = perf_clock();
3547 s64 delta = now - hwc->freq_stamp;
3548
3549 hwc->freq_stamp = now;
3550
3551 if (delta > 0 && delta < TICK_NSEC)
3552 perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3553 }
3554
3555 /*
3556 * XXX event_limit might not quite work as expected on inherited
3557 * events
3558 */
3559
3560 event->pending_kill = POLL_IN;
3561 if (events && atomic_dec_and_test(&event->event_limit)) {
3562 ret = 1;
3563 event->pending_kill = POLL_HUP;
3564 if (nmi) {
3565 event->pending_disable = 1;
3566 perf_pending_queue(&event->pending,
3567 perf_pending_event);
3568 } else
3569 perf_event_disable(event);
3570 }
3571
3572 perf_event_output(event, nmi, data, regs);
3573 return ret;
3574}
3575
3576int perf_event_overflow(struct perf_event *event, int nmi,
3577 struct perf_sample_data *data,
3578 struct pt_regs *regs)
3579{
3580 return __perf_event_overflow(event, nmi, 1, data, regs);
3581}
3582
3583/*
3584 * Generic software event infrastructure
3585 */
3586
3587/*
3588 * We directly increment event->count and keep a second value in
3589 * event->hw.period_left to count intervals. This period event
3590 * is kept in the range [-sample_period, 0] so that we can use the
3591 * sign as trigger.
3592 */
3593
3594static u64 perf_swevent_set_period(struct perf_event *event)
3595{
3596 struct hw_perf_event *hwc = &event->hw;
3597 u64 period = hwc->last_period;
3598 u64 nr, offset;
3599 s64 old, val;
3600
3601 hwc->last_period = hwc->sample_period;
3602
3603again:
3604 old = val = atomic64_read(&hwc->period_left);
3605 if (val < 0)
3606 return 0;
3607
3608 nr = div64_u64(period + val, period);
3609 offset = nr * period;
3610 val -= offset;
3611 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3612 goto again;
3613
3614 return nr;
3615}
3616
3617static void perf_swevent_overflow(struct perf_event *event,
3618 int nmi, struct perf_sample_data *data,
3619 struct pt_regs *regs)
3620{
3621 struct hw_perf_event *hwc = &event->hw;
3622 int throttle = 0;
3623 u64 overflow;
3624
3625 data->period = event->hw.last_period;
3626 overflow = perf_swevent_set_period(event);
3627
3628 if (hwc->interrupts == MAX_INTERRUPTS)
3629 return;
3630
3631 for (; overflow; overflow--) {
3632 if (__perf_event_overflow(event, nmi, throttle,
3633 data, regs)) {
3634 /*
3635 * We inhibit the overflow from happening when
3636 * hwc->interrupts == MAX_INTERRUPTS.
3637 */
3638 break;
3639 }
3640 throttle = 1;
3641 }
3642}
3643
3644static void perf_swevent_unthrottle(struct perf_event *event)
3645{
3646 /*
3647 * Nothing to do, we already reset hwc->interrupts.
3648 */
3649}
3650
3651static void perf_swevent_add(struct perf_event *event, u64 nr,
3652 int nmi, struct perf_sample_data *data,
3653 struct pt_regs *regs)
3654{
3655 struct hw_perf_event *hwc = &event->hw;
3656
3657 atomic64_add(nr, &event->count);
3658
3659 if (!hwc->sample_period)
3660 return;
3661
3662 if (!regs)
3663 return;
3664
3665 if (!atomic64_add_negative(nr, &hwc->period_left))
3666 perf_swevent_overflow(event, nmi, data, regs);
3667}
3668
3669static int perf_swevent_is_counting(struct perf_event *event)
3670{
3671 /*
3672 * The event is active, we're good!
3673 */
3674 if (event->state == PERF_EVENT_STATE_ACTIVE)
3675 return 1;
3676
3677 /*
3678 * The event is off/error, not counting.
3679 */
3680 if (event->state != PERF_EVENT_STATE_INACTIVE)
3681 return 0;
3682
3683 /*
3684 * The event is inactive, if the context is active
3685 * we're part of a group that didn't make it on the 'pmu',
3686 * not counting.
3687 */
3688 if (event->ctx->is_active)
3689 return 0;
3690
3691 /*
3692 * We're inactive and the context is too, this means the
3693 * task is scheduled out, we're counting events that happen
3694 * to us, like migration events.
3695 */
3696 return 1;
3697}
3698
3699static int perf_swevent_match(struct perf_event *event,
3700 enum perf_type_id type,
3701 u32 event_id, struct pt_regs *regs)
3702{
3703 if (!perf_swevent_is_counting(event))
3704 return 0;
3705
3706 if (event->attr.type != type)
3707 return 0;
3708 if (event->attr.config != event_id)
3709 return 0;
3710
3711 if (regs) {
3712 if (event->attr.exclude_user && user_mode(regs))
3713 return 0;
3714
3715 if (event->attr.exclude_kernel && !user_mode(regs))
3716 return 0;
3717 }
3718
3719 return 1;
3720}
3721
3722static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3723 enum perf_type_id type,
3724 u32 event_id, u64 nr, int nmi,
3725 struct perf_sample_data *data,
3726 struct pt_regs *regs)
3727{
3728 struct perf_event *event;
3729
3730 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3731 return;
3732
3733 rcu_read_lock();
3734 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3735 if (perf_swevent_match(event, type, event_id, regs))
3736 perf_swevent_add(event, nr, nmi, data, regs);
3737 }
3738 rcu_read_unlock();
3739}
3740
3741static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
3742{
3743 if (in_nmi())
3744 return &cpuctx->recursion[3];
3745
3746 if (in_irq())
3747 return &cpuctx->recursion[2];
3748
3749 if (in_softirq())
3750 return &cpuctx->recursion[1];
3751
3752 return &cpuctx->recursion[0];
3753}
3754
3755static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3756 u64 nr, int nmi,
3757 struct perf_sample_data *data,
3758 struct pt_regs *regs)
3759{
3760 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3761 int *recursion = perf_swevent_recursion_context(cpuctx);
3762 struct perf_event_context *ctx;
3763
3764 if (*recursion)
3765 goto out;
3766
3767 (*recursion)++;
3768 barrier();
3769
3770 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3771 nr, nmi, data, regs);
3772 rcu_read_lock();
3773 /*
3774 * doesn't really matter which of the child contexts the
3775 * events ends up in.
3776 */
3777 ctx = rcu_dereference(current->perf_event_ctxp);
3778 if (ctx)
3779 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3780 rcu_read_unlock();
3781
3782 barrier();
3783 (*recursion)--;
3784
3785out:
3786 put_cpu_var(perf_cpu_context);
3787}
3788
3789void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3790 struct pt_regs *regs, u64 addr)
3791{
3792 struct perf_sample_data data = {
3793 .addr = addr,
3794 };
3795
3796 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
3797 &data, regs);
3798}
3799
3800static void perf_swevent_read(struct perf_event *event)
3801{
3802}
3803
3804static int perf_swevent_enable(struct perf_event *event)
3805{
3806 struct hw_perf_event *hwc = &event->hw;
3807
3808 if (hwc->sample_period) {
3809 hwc->last_period = hwc->sample_period;
3810 perf_swevent_set_period(event);
3811 }
3812 return 0;
3813}
3814
3815static void perf_swevent_disable(struct perf_event *event)
3816{
3817}
3818
3819static const struct pmu perf_ops_generic = {
3820 .enable = perf_swevent_enable,
3821 .disable = perf_swevent_disable,
3822 .read = perf_swevent_read,
3823 .unthrottle = perf_swevent_unthrottle,
3824};
3825
3826/*
3827 * hrtimer based swevent callback
3828 */
3829
3830static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3831{
3832 enum hrtimer_restart ret = HRTIMER_RESTART;
3833 struct perf_sample_data data;
3834 struct pt_regs *regs;
3835 struct perf_event *event;
3836 u64 period;
3837
3838 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
3839 event->pmu->read(event);
3840
3841 data.addr = 0;
3842 regs = get_irq_regs();
3843 /*
3844 * In case we exclude kernel IPs or are somehow not in interrupt
3845 * context, provide the next best thing, the user IP.
3846 */
3847 if ((event->attr.exclude_kernel || !regs) &&
3848 !event->attr.exclude_user)
3849 regs = task_pt_regs(current);
3850
3851 if (regs) {
3852 if (perf_event_overflow(event, 0, &data, regs))
3853 ret = HRTIMER_NORESTART;
3854 }
3855
3856 period = max_t(u64, 10000, event->hw.sample_period);
3857 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3858
3859 return ret;
3860}
3861
3862/*
3863 * Software event: cpu wall time clock
3864 */
3865
3866static void cpu_clock_perf_event_update(struct perf_event *event)
3867{
3868 int cpu = raw_smp_processor_id();
3869 s64 prev;
3870 u64 now;
3871
3872 now = cpu_clock(cpu);
3873 prev = atomic64_read(&event->hw.prev_count);
3874 atomic64_set(&event->hw.prev_count, now);
3875 atomic64_add(now - prev, &event->count);
3876}
3877
3878static int cpu_clock_perf_event_enable(struct perf_event *event)
3879{
3880 struct hw_perf_event *hwc = &event->hw;
3881 int cpu = raw_smp_processor_id();
3882
3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3885 hwc->hrtimer.function = perf_swevent_hrtimer;
3886 if (hwc->sample_period) {
3887 u64 period = max_t(u64, 10000, hwc->sample_period);
3888 __hrtimer_start_range_ns(&hwc->hrtimer,
3889 ns_to_ktime(period), 0,
3890 HRTIMER_MODE_REL, 0);
3891 }
3892
3893 return 0;
3894}
3895
3896static void cpu_clock_perf_event_disable(struct perf_event *event)
3897{
3898 if (event->hw.sample_period)
3899 hrtimer_cancel(&event->hw.hrtimer);
3900 cpu_clock_perf_event_update(event);
3901}
3902
3903static void cpu_clock_perf_event_read(struct perf_event *event)
3904{
3905 cpu_clock_perf_event_update(event);
3906}
3907
3908static const struct pmu perf_ops_cpu_clock = {
3909 .enable = cpu_clock_perf_event_enable,
3910 .disable = cpu_clock_perf_event_disable,
3911 .read = cpu_clock_perf_event_read,
3912};
3913
3914/*
3915 * Software event: task time clock
3916 */
3917
3918static void task_clock_perf_event_update(struct perf_event *event, u64 now)
3919{
3920 u64 prev;
3921 s64 delta;
3922
3923 prev = atomic64_xchg(&event->hw.prev_count, now);
3924 delta = now - prev;
3925 atomic64_add(delta, &event->count);
3926}
3927
3928static int task_clock_perf_event_enable(struct perf_event *event)
3929{
3930 struct hw_perf_event *hwc = &event->hw;
3931 u64 now;
3932
3933 now = event->ctx->time;
3934
3935 atomic64_set(&hwc->prev_count, now);
3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3937 hwc->hrtimer.function = perf_swevent_hrtimer;
3938 if (hwc->sample_period) {
3939 u64 period = max_t(u64, 10000, hwc->sample_period);
3940 __hrtimer_start_range_ns(&hwc->hrtimer,
3941 ns_to_ktime(period), 0,
3942 HRTIMER_MODE_REL, 0);
3943 }
3944
3945 return 0;
3946}
3947
3948static void task_clock_perf_event_disable(struct perf_event *event)
3949{
3950 if (event->hw.sample_period)
3951 hrtimer_cancel(&event->hw.hrtimer);
3952 task_clock_perf_event_update(event, event->ctx->time);
3953
3954}
3955
3956static void task_clock_perf_event_read(struct perf_event *event)
3957{
3958 u64 time;
3959
3960 if (!in_nmi()) {
3961 update_context_time(event->ctx);
3962 time = event->ctx->time;
3963 } else {
3964 u64 now = perf_clock();
3965 u64 delta = now - event->ctx->timestamp;
3966 time = event->ctx->time + delta;
3967 }
3968
3969 task_clock_perf_event_update(event, time);
3970}
3971
3972static const struct pmu perf_ops_task_clock = {
3973 .enable = task_clock_perf_event_enable,
3974 .disable = task_clock_perf_event_disable,
3975 .read = task_clock_perf_event_read,
3976};
3977
3978#ifdef CONFIG_EVENT_PROFILE
3979void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3980 int entry_size)
3981{
3982 struct perf_raw_record raw = {
3983 .size = entry_size,
3984 .data = record,
3985 };
3986
3987 struct perf_sample_data data = {
3988 .addr = addr,
3989 .raw = &raw,
3990 };
3991
3992 struct pt_regs *regs = get_irq_regs();
3993
3994 if (!regs)
3995 regs = task_pt_regs(current);
3996
3997 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3998 &data, regs);
3999}
4000EXPORT_SYMBOL_GPL(perf_tp_event);
4001
4002extern int ftrace_profile_enable(int);
4003extern void ftrace_profile_disable(int);
4004
4005static void tp_perf_event_destroy(struct perf_event *event)
4006{
4007 ftrace_profile_disable(event->attr.config);
4008}
4009
4010static const struct pmu *tp_perf_event_init(struct perf_event *event)
4011{
4012 /*
4013 * Raw tracepoint data is a severe data leak, only allow root to
4014 * have these.
4015 */
4016 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4017 perf_paranoid_tracepoint_raw() &&
4018 !capable(CAP_SYS_ADMIN))
4019 return ERR_PTR(-EPERM);
4020
4021 if (ftrace_profile_enable(event->attr.config))
4022 return NULL;
4023
4024 event->destroy = tp_perf_event_destroy;
4025
4026 return &perf_ops_generic;
4027}
4028#else
4029static const struct pmu *tp_perf_event_init(struct perf_event *event)
4030{
4031 return NULL;
4032}
4033#endif
4034
4035atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4036
4037static void sw_perf_event_destroy(struct perf_event *event)
4038{
4039 u64 event_id = event->attr.config;
4040
4041 WARN_ON(event->parent);
4042
4043 atomic_dec(&perf_swevent_enabled[event_id]);
4044}
4045
4046static const struct pmu *sw_perf_event_init(struct perf_event *event)
4047{
4048 const struct pmu *pmu = NULL;
4049 u64 event_id = event->attr.config;
4050
4051 /*
4052 * Software events (currently) can't in general distinguish
4053 * between user, kernel and hypervisor events.
4054 * However, context switches and cpu migrations are considered
4055 * to be kernel events, and page faults are never hypervisor
4056 * events.
4057 */
4058 switch (event_id) {
4059 case PERF_COUNT_SW_CPU_CLOCK:
4060 pmu = &perf_ops_cpu_clock;
4061
4062 break;
4063 case PERF_COUNT_SW_TASK_CLOCK:
4064 /*
4065 * If the user instantiates this as a per-cpu event,
4066 * use the cpu_clock event instead.
4067 */
4068 if (event->ctx->task)
4069 pmu = &perf_ops_task_clock;
4070 else
4071 pmu = &perf_ops_cpu_clock;
4072
4073 break;
4074 case PERF_COUNT_SW_PAGE_FAULTS:
4075 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4076 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4077 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4078 case PERF_COUNT_SW_CPU_MIGRATIONS:
4079 if (!event->parent) {
4080 atomic_inc(&perf_swevent_enabled[event_id]);
4081 event->destroy = sw_perf_event_destroy;
4082 }
4083 pmu = &perf_ops_generic;
4084 break;
4085 }
4086
4087 return pmu;
4088}
4089
4090/*
4091 * Allocate and initialize a event structure
4092 */
4093static struct perf_event *
4094perf_event_alloc(struct perf_event_attr *attr,
4095 int cpu,
4096 struct perf_event_context *ctx,
4097 struct perf_event *group_leader,
4098 struct perf_event *parent_event,
4099 gfp_t gfpflags)
4100{
4101 const struct pmu *pmu;
4102 struct perf_event *event;
4103 struct hw_perf_event *hwc;
4104 long err;
4105
4106 event = kzalloc(sizeof(*event), gfpflags);
4107 if (!event)
4108 return ERR_PTR(-ENOMEM);
4109
4110 /*
4111 * Single events are their own group leaders, with an
4112 * empty sibling list:
4113 */
4114 if (!group_leader)
4115 group_leader = event;
4116
4117 mutex_init(&event->child_mutex);
4118 INIT_LIST_HEAD(&event->child_list);
4119
4120 INIT_LIST_HEAD(&event->group_entry);
4121 INIT_LIST_HEAD(&event->event_entry);
4122 INIT_LIST_HEAD(&event->sibling_list);
4123 init_waitqueue_head(&event->waitq);
4124
4125 mutex_init(&event->mmap_mutex);
4126
4127 event->cpu = cpu;
4128 event->attr = *attr;
4129 event->group_leader = group_leader;
4130 event->pmu = NULL;
4131 event->ctx = ctx;
4132 event->oncpu = -1;
4133
4134 event->parent = parent_event;
4135
4136 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4137 event->id = atomic64_inc_return(&perf_event_id);
4138
4139 event->state = PERF_EVENT_STATE_INACTIVE;
4140
4141 if (attr->disabled)
4142 event->state = PERF_EVENT_STATE_OFF;
4143
4144 pmu = NULL;
4145
4146 hwc = &event->hw;
4147 hwc->sample_period = attr->sample_period;
4148 if (attr->freq && attr->sample_freq)
4149 hwc->sample_period = 1;
4150 hwc->last_period = hwc->sample_period;
4151
4152 atomic64_set(&hwc->period_left, hwc->sample_period);
4153
4154 /*
4155 * we currently do not support PERF_FORMAT_GROUP on inherited events
4156 */
4157 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4158 goto done;
4159
4160 switch (attr->type) {
4161 case PERF_TYPE_RAW:
4162 case PERF_TYPE_HARDWARE:
4163 case PERF_TYPE_HW_CACHE:
4164 pmu = hw_perf_event_init(event);
4165 break;
4166
4167 case PERF_TYPE_SOFTWARE:
4168 pmu = sw_perf_event_init(event);
4169 break;
4170
4171 case PERF_TYPE_TRACEPOINT:
4172 pmu = tp_perf_event_init(event);
4173 break;
4174
4175 default:
4176 break;
4177 }
4178done:
4179 err = 0;
4180 if (!pmu)
4181 err = -EINVAL;
4182 else if (IS_ERR(pmu))
4183 err = PTR_ERR(pmu);
4184
4185 if (err) {
4186 if (event->ns)
4187 put_pid_ns(event->ns);
4188 kfree(event);
4189 return ERR_PTR(err);
4190 }
4191
4192 event->pmu = pmu;
4193
4194 if (!event->parent) {
4195 atomic_inc(&nr_events);
4196 if (event->attr.mmap)
4197 atomic_inc(&nr_mmap_events);
4198 if (event->attr.comm)
4199 atomic_inc(&nr_comm_events);
4200 if (event->attr.task)
4201 atomic_inc(&nr_task_events);
4202 }
4203
4204 return event;
4205}
4206
4207static int perf_copy_attr(struct perf_event_attr __user *uattr,
4208 struct perf_event_attr *attr)
4209{
4210 u32 size;
4211 int ret;
4212
4213 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4214 return -EFAULT;
4215
4216 /*
4217 * zero the full structure, so that a short copy will be nice.
4218 */
4219 memset(attr, 0, sizeof(*attr));
4220
4221 ret = get_user(size, &uattr->size);
4222 if (ret)
4223 return ret;
4224
4225 if (size > PAGE_SIZE) /* silly large */
4226 goto err_size;
4227
4228 if (!size) /* abi compat */
4229 size = PERF_ATTR_SIZE_VER0;
4230
4231 if (size < PERF_ATTR_SIZE_VER0)
4232 goto err_size;
4233
4234 /*
4235 * If we're handed a bigger struct than we know of,
4236 * ensure all the unknown bits are 0 - i.e. new
4237 * user-space does not rely on any kernel feature
4238 * extensions we dont know about yet.
4239 */
4240 if (size > sizeof(*attr)) {
4241 unsigned char __user *addr;
4242 unsigned char __user *end;
4243 unsigned char val;
4244
4245 addr = (void __user *)uattr + sizeof(*attr);
4246 end = (void __user *)uattr + size;
4247
4248 for (; addr < end; addr++) {
4249 ret = get_user(val, addr);
4250 if (ret)
4251 return ret;
4252 if (val)
4253 goto err_size;
4254 }
4255 size = sizeof(*attr);
4256 }
4257
4258 ret = copy_from_user(attr, uattr, size);
4259 if (ret)
4260 return -EFAULT;
4261
4262 /*
4263 * If the type exists, the corresponding creation will verify
4264 * the attr->config.
4265 */
4266 if (attr->type >= PERF_TYPE_MAX)
4267 return -EINVAL;
4268
4269 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
4270 return -EINVAL;
4271
4272 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4273 return -EINVAL;
4274
4275 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4276 return -EINVAL;
4277
4278out:
4279 return ret;
4280
4281err_size:
4282 put_user(sizeof(*attr), &uattr->size);
4283 ret = -E2BIG;
4284 goto out;
4285}
4286
4287int perf_event_set_output(struct perf_event *event, int output_fd)
4288{
4289 struct perf_event *output_event = NULL;
4290 struct file *output_file = NULL;
4291 struct perf_event *old_output;
4292 int fput_needed = 0;
4293 int ret = -EINVAL;
4294
4295 if (!output_fd)
4296 goto set;
4297
4298 output_file = fget_light(output_fd, &fput_needed);
4299 if (!output_file)
4300 return -EBADF;
4301
4302 if (output_file->f_op != &perf_fops)
4303 goto out;
4304
4305 output_event = output_file->private_data;
4306
4307 /* Don't chain output fds */
4308 if (output_event->output)
4309 goto out;
4310
4311 /* Don't set an output fd when we already have an output channel */
4312 if (event->data)
4313 goto out;
4314
4315 atomic_long_inc(&output_file->f_count);
4316
4317set:
4318 mutex_lock(&event->mmap_mutex);
4319 old_output = event->output;
4320 rcu_assign_pointer(event->output, output_event);
4321 mutex_unlock(&event->mmap_mutex);
4322
4323 if (old_output) {
4324 /*
4325 * we need to make sure no existing perf_output_*()
4326 * is still referencing this event.
4327 */
4328 synchronize_rcu();
4329 fput(old_output->filp);
4330 }
4331
4332 ret = 0;
4333out:
4334 fput_light(output_file, fput_needed);
4335 return ret;
4336}
4337
4338/**
4339 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4340 *
4341 * @attr_uptr: event_id type attributes for monitoring/sampling
4342 * @pid: target pid
4343 * @cpu: target cpu
4344 * @group_fd: group leader event fd
4345 */
4346SYSCALL_DEFINE5(perf_event_open,
4347 struct perf_event_attr __user *, attr_uptr,
4348 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4349{
4350 struct perf_event *event, *group_leader;
4351 struct perf_event_attr attr;
4352 struct perf_event_context *ctx;
4353 struct file *event_file = NULL;
4354 struct file *group_file = NULL;
4355 int fput_needed = 0;
4356 int fput_needed2 = 0;
4357 int err;
4358
4359 /* for future expandability... */
4360 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4361 return -EINVAL;
4362
4363 err = perf_copy_attr(attr_uptr, &attr);
4364 if (err)
4365 return err;
4366
4367 if (!attr.exclude_kernel) {
4368 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4369 return -EACCES;
4370 }
4371
4372 if (attr.freq) {
4373 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4374 return -EINVAL;
4375 }
4376
4377 /*
4378 * Get the target context (task or percpu):
4379 */
4380 ctx = find_get_context(pid, cpu);
4381 if (IS_ERR(ctx))
4382 return PTR_ERR(ctx);
4383
4384 /*
4385 * Look up the group leader (we will attach this event to it):
4386 */
4387 group_leader = NULL;
4388 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
4389 err = -EINVAL;
4390 group_file = fget_light(group_fd, &fput_needed);
4391 if (!group_file)
4392 goto err_put_context;
4393 if (group_file->f_op != &perf_fops)
4394 goto err_put_context;
4395
4396 group_leader = group_file->private_data;
4397 /*
4398 * Do not allow a recursive hierarchy (this new sibling
4399 * becoming part of another group-sibling):
4400 */
4401 if (group_leader->group_leader != group_leader)
4402 goto err_put_context;
4403 /*
4404 * Do not allow to attach to a group in a different
4405 * task or CPU context:
4406 */
4407 if (group_leader->ctx != ctx)
4408 goto err_put_context;
4409 /*
4410 * Only a group leader can be exclusive or pinned
4411 */
4412 if (attr.exclusive || attr.pinned)
4413 goto err_put_context;
4414 }
4415
4416 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4417 NULL, GFP_KERNEL);
4418 err = PTR_ERR(event);
4419 if (IS_ERR(event))
4420 goto err_put_context;
4421
4422 err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
4423 if (err < 0)
4424 goto err_free_put_context;
4425
4426 event_file = fget_light(err, &fput_needed2);
4427 if (!event_file)
4428 goto err_free_put_context;
4429
4430 if (flags & PERF_FLAG_FD_OUTPUT) {
4431 err = perf_event_set_output(event, group_fd);
4432 if (err)
4433 goto err_fput_free_put_context;
4434 }
4435
4436 event->filp = event_file;
4437 WARN_ON_ONCE(ctx->parent_ctx);
4438 mutex_lock(&ctx->mutex);
4439 perf_install_in_context(ctx, event, cpu);
4440 ++ctx->generation;
4441 mutex_unlock(&ctx->mutex);
4442
4443 event->owner = current;
4444 get_task_struct(current);
4445 mutex_lock(&current->perf_event_mutex);
4446 list_add_tail(&event->owner_entry, &current->perf_event_list);
4447 mutex_unlock(&current->perf_event_mutex);
4448
4449err_fput_free_put_context:
4450 fput_light(event_file, fput_needed2);
4451
4452err_free_put_context:
4453 if (err < 0)
4454 kfree(event);
4455
4456err_put_context:
4457 if (err < 0)
4458 put_ctx(ctx);
4459
4460 fput_light(group_file, fput_needed);
4461
4462 return err;
4463}
4464
4465/*
4466 * inherit a event from parent task to child task:
4467 */
4468static struct perf_event *
4469inherit_event(struct perf_event *parent_event,
4470 struct task_struct *parent,
4471 struct perf_event_context *parent_ctx,
4472 struct task_struct *child,
4473 struct perf_event *group_leader,
4474 struct perf_event_context *child_ctx)
4475{
4476 struct perf_event *child_event;
4477
4478 /*
4479 * Instead of creating recursive hierarchies of events,
4480 * we link inherited events back to the original parent,
4481 * which has a filp for sure, which we use as the reference
4482 * count:
4483 */
4484 if (parent_event->parent)
4485 parent_event = parent_event->parent;
4486
4487 child_event = perf_event_alloc(&parent_event->attr,
4488 parent_event->cpu, child_ctx,
4489 group_leader, parent_event,
4490 GFP_KERNEL);
4491 if (IS_ERR(child_event))
4492 return child_event;
4493 get_ctx(child_ctx);
4494
4495 /*
4496 * Make the child state follow the state of the parent event,
4497 * not its attr.disabled bit. We hold the parent's mutex,
4498 * so we won't race with perf_event_{en, dis}able_family.
4499 */
4500 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4501 child_event->state = PERF_EVENT_STATE_INACTIVE;
4502 else
4503 child_event->state = PERF_EVENT_STATE_OFF;
4504
4505 if (parent_event->attr.freq)
4506 child_event->hw.sample_period = parent_event->hw.sample_period;
4507
4508 /*
4509 * Link it up in the child's context:
4510 */
4511 add_event_to_ctx(child_event, child_ctx);
4512
4513 /*
4514 * Get a reference to the parent filp - we will fput it
4515 * when the child event exits. This is safe to do because
4516 * we are in the parent and we know that the filp still
4517 * exists and has a nonzero count:
4518 */
4519 atomic_long_inc(&parent_event->filp->f_count);
4520
4521 /*
4522 * Link this into the parent event's child list
4523 */
4524 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4525 mutex_lock(&parent_event->child_mutex);
4526 list_add_tail(&child_event->child_list, &parent_event->child_list);
4527 mutex_unlock(&parent_event->child_mutex);
4528
4529 return child_event;
4530}
4531
4532static int inherit_group(struct perf_event *parent_event,
4533 struct task_struct *parent,
4534 struct perf_event_context *parent_ctx,
4535 struct task_struct *child,
4536 struct perf_event_context *child_ctx)
4537{
4538 struct perf_event *leader;
4539 struct perf_event *sub;
4540 struct perf_event *child_ctr;
4541
4542 leader = inherit_event(parent_event, parent, parent_ctx,
4543 child, NULL, child_ctx);
4544 if (IS_ERR(leader))
4545 return PTR_ERR(leader);
4546 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4547 child_ctr = inherit_event(sub, parent, parent_ctx,
4548 child, leader, child_ctx);
4549 if (IS_ERR(child_ctr))
4550 return PTR_ERR(child_ctr);
4551 }
4552 return 0;
4553}
4554
4555static void sync_child_event(struct perf_event *child_event,
4556 struct task_struct *child)
4557{
4558 struct perf_event *parent_event = child_event->parent;
4559 u64 child_val;
4560
4561 if (child_event->attr.inherit_stat)
4562 perf_event_read_event(child_event, child);
4563
4564 child_val = atomic64_read(&child_event->count);
4565
4566 /*
4567 * Add back the child's count to the parent's count:
4568 */
4569 atomic64_add(child_val, &parent_event->count);
4570 atomic64_add(child_event->total_time_enabled,
4571 &parent_event->child_total_time_enabled);
4572 atomic64_add(child_event->total_time_running,
4573 &parent_event->child_total_time_running);
4574
4575 /*
4576 * Remove this event from the parent's list
4577 */
4578 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4579 mutex_lock(&parent_event->child_mutex);
4580 list_del_init(&child_event->child_list);
4581 mutex_unlock(&parent_event->child_mutex);
4582
4583 /*
4584 * Release the parent event, if this was the last
4585 * reference to it.
4586 */
4587 fput(parent_event->filp);
4588}
4589
4590static void
4591__perf_event_exit_task(struct perf_event *child_event,
4592 struct perf_event_context *child_ctx,
4593 struct task_struct *child)
4594{
4595 struct perf_event *parent_event;
4596
4597 update_event_times(child_event);
4598 perf_event_remove_from_context(child_event);
4599
4600 parent_event = child_event->parent;
4601 /*
4602 * It can happen that parent exits first, and has events
4603 * that are still around due to the child reference. These
4604 * events need to be zapped - but otherwise linger.
4605 */
4606 if (parent_event) {
4607 sync_child_event(child_event, child);
4608 free_event(child_event);
4609 }
4610}
4611
4612/*
4613 * When a child task exits, feed back event values to parent events.
4614 */
4615void perf_event_exit_task(struct task_struct *child)
4616{
4617 struct perf_event *child_event, *tmp;
4618 struct perf_event_context *child_ctx;
4619 unsigned long flags;
4620
4621 if (likely(!child->perf_event_ctxp)) {
4622 perf_event_task(child, NULL, 0);
4623 return;
4624 }
4625
4626 local_irq_save(flags);
4627 /*
4628 * We can't reschedule here because interrupts are disabled,
4629 * and either child is current or it is a task that can't be
4630 * scheduled, so we are now safe from rescheduling changing
4631 * our context.
4632 */
4633 child_ctx = child->perf_event_ctxp;
4634 __perf_event_task_sched_out(child_ctx);
4635
4636 /*
4637 * Take the context lock here so that if find_get_context is
4638 * reading child->perf_event_ctxp, we wait until it has
4639 * incremented the context's refcount before we do put_ctx below.
4640 */
4641 spin_lock(&child_ctx->lock);
4642 child->perf_event_ctxp = NULL;
4643 /*
4644 * If this context is a clone; unclone it so it can't get
4645 * swapped to another process while we're removing all
4646 * the events from it.
4647 */
4648 unclone_ctx(child_ctx);
4649 spin_unlock_irqrestore(&child_ctx->lock, flags);
4650
4651 /*
4652 * Report the task dead after unscheduling the events so that we
4653 * won't get any samples after PERF_RECORD_EXIT. We can however still
4654 * get a few PERF_RECORD_READ events.
4655 */
4656 perf_event_task(child, child_ctx, 0);
4657
4658 /*
4659 * We can recurse on the same lock type through:
4660 *
4661 * __perf_event_exit_task()
4662 * sync_child_event()
4663 * fput(parent_event->filp)
4664 * perf_release()
4665 * mutex_lock(&ctx->mutex)
4666 *
4667 * But since its the parent context it won't be the same instance.
4668 */
4669 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4670
4671again:
4672 list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
4673 group_entry)
4674 __perf_event_exit_task(child_event, child_ctx, child);
4675
4676 /*
4677 * If the last event was a group event, it will have appended all
4678 * its siblings to the list, but we obtained 'tmp' before that which
4679 * will still point to the list head terminating the iteration.
4680 */
4681 if (!list_empty(&child_ctx->group_list))
4682 goto again;
4683
4684 mutex_unlock(&child_ctx->mutex);
4685
4686 put_ctx(child_ctx);
4687}
4688
4689/*
4690 * free an unexposed, unused context as created by inheritance by
4691 * init_task below, used by fork() in case of fail.
4692 */
4693void perf_event_free_task(struct task_struct *task)
4694{
4695 struct perf_event_context *ctx = task->perf_event_ctxp;
4696 struct perf_event *event, *tmp;
4697
4698 if (!ctx)
4699 return;
4700
4701 mutex_lock(&ctx->mutex);
4702again:
4703 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
4704 struct perf_event *parent = event->parent;
4705
4706 if (WARN_ON_ONCE(!parent))
4707 continue;
4708
4709 mutex_lock(&parent->child_mutex);
4710 list_del_init(&event->child_list);
4711 mutex_unlock(&parent->child_mutex);
4712
4713 fput(parent->filp);
4714
4715 list_del_event(event, ctx);
4716 free_event(event);
4717 }
4718
4719 if (!list_empty(&ctx->group_list))
4720 goto again;
4721
4722 mutex_unlock(&ctx->mutex);
4723
4724 put_ctx(ctx);
4725}
4726
4727/*
4728 * Initialize the perf_event context in task_struct
4729 */
4730int perf_event_init_task(struct task_struct *child)
4731{
4732 struct perf_event_context *child_ctx, *parent_ctx;
4733 struct perf_event_context *cloned_ctx;
4734 struct perf_event *event;
4735 struct task_struct *parent = current;
4736 int inherited_all = 1;
4737 int ret = 0;
4738
4739 child->perf_event_ctxp = NULL;
4740
4741 mutex_init(&child->perf_event_mutex);
4742 INIT_LIST_HEAD(&child->perf_event_list);
4743
4744 if (likely(!parent->perf_event_ctxp))
4745 return 0;
4746
4747 /*
4748 * This is executed from the parent task context, so inherit
4749 * events that have been marked for cloning.
4750 * First allocate and initialize a context for the child.
4751 */
4752
4753 child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4754 if (!child_ctx)
4755 return -ENOMEM;
4756
4757 __perf_event_init_context(child_ctx, child);
4758 child->perf_event_ctxp = child_ctx;
4759 get_task_struct(child);
4760
4761 /*
4762 * If the parent's context is a clone, pin it so it won't get
4763 * swapped under us.
4764 */
4765 parent_ctx = perf_pin_task_context(parent);
4766
4767 /*
4768 * No need to check if parent_ctx != NULL here; since we saw
4769 * it non-NULL earlier, the only reason for it to become NULL
4770 * is if we exit, and since we're currently in the middle of
4771 * a fork we can't be exiting at the same time.
4772 */
4773
4774 /*
4775 * Lock the parent list. No need to lock the child - not PID
4776 * hashed yet and not running, so nobody can access it.
4777 */
4778 mutex_lock(&parent_ctx->mutex);
4779
4780 /*
4781 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it:
4783 */
4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
4785 if (event != event->group_leader)
4786 continue;
4787
4788 if (!event->attr.inherit) {
4789 inherited_all = 0;
4790 continue;
4791 }
4792
4793 ret = inherit_group(event, parent, parent_ctx,
4794 child, child_ctx);
4795 if (ret) {
4796 inherited_all = 0;
4797 break;
4798 }
4799 }
4800
4801 if (inherited_all) {
4802 /*
4803 * Mark the child context as a clone of the parent
4804 * context, or of whatever the parent is a clone of.
4805 * Note that if the parent is a clone, it could get
4806 * uncloned at any point, but that doesn't matter
4807 * because the list of events and the generation
4808 * count can't have changed since we took the mutex.
4809 */
4810 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4811 if (cloned_ctx) {
4812 child_ctx->parent_ctx = cloned_ctx;
4813 child_ctx->parent_gen = parent_ctx->parent_gen;
4814 } else {
4815 child_ctx->parent_ctx = parent_ctx;
4816 child_ctx->parent_gen = parent_ctx->generation;
4817 }
4818 get_ctx(child_ctx->parent_ctx);
4819 }
4820
4821 mutex_unlock(&parent_ctx->mutex);
4822
4823 perf_unpin_context(parent_ctx);
4824
4825 return ret;
4826}
4827
4828static void __cpuinit perf_event_init_cpu(int cpu)
4829{
4830 struct perf_cpu_context *cpuctx;
4831
4832 cpuctx = &per_cpu(perf_cpu_context, cpu);
4833 __perf_event_init_context(&cpuctx->ctx, NULL);
4834
4835 spin_lock(&perf_resource_lock);
4836 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4837 spin_unlock(&perf_resource_lock);
4838
4839 hw_perf_event_setup(cpu);
4840}
4841
4842#ifdef CONFIG_HOTPLUG_CPU
4843static void __perf_event_exit_cpu(void *info)
4844{
4845 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4846 struct perf_event_context *ctx = &cpuctx->ctx;
4847 struct perf_event *event, *tmp;
4848
4849 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
4850 __perf_event_remove_from_context(event);
4851}
4852static void perf_event_exit_cpu(int cpu)
4853{
4854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4855 struct perf_event_context *ctx = &cpuctx->ctx;
4856
4857 mutex_lock(&ctx->mutex);
4858 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
4859 mutex_unlock(&ctx->mutex);
4860}
4861#else
4862static inline void perf_event_exit_cpu(int cpu) { }
4863#endif
4864
4865static int __cpuinit
4866perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4867{
4868 unsigned int cpu = (long)hcpu;
4869
4870 switch (action) {
4871
4872 case CPU_UP_PREPARE:
4873 case CPU_UP_PREPARE_FROZEN:
4874 perf_event_init_cpu(cpu);
4875 break;
4876
4877 case CPU_ONLINE:
4878 case CPU_ONLINE_FROZEN:
4879 hw_perf_event_setup_online(cpu);
4880 break;
4881
4882 case CPU_DOWN_PREPARE:
4883 case CPU_DOWN_PREPARE_FROZEN:
4884 perf_event_exit_cpu(cpu);
4885 break;
4886
4887 default:
4888 break;
4889 }
4890
4891 return NOTIFY_OK;
4892}
4893
4894/*
4895 * This has to have a higher priority than migration_notifier in sched.c.
4896 */
4897static struct notifier_block __cpuinitdata perf_cpu_nb = {
4898 .notifier_call = perf_cpu_notify,
4899 .priority = 20,
4900};
4901
4902void __init perf_event_init(void)
4903{
4904 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4905 (void *)(long)smp_processor_id());
4906 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4907 (void *)(long)smp_processor_id());
4908 register_cpu_notifier(&perf_cpu_nb);
4909}
4910
4911static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4912{
4913 return sprintf(buf, "%d\n", perf_reserved_percpu);
4914}
4915
4916static ssize_t
4917perf_set_reserve_percpu(struct sysdev_class *class,
4918 const char *buf,
4919 size_t count)
4920{
4921 struct perf_cpu_context *cpuctx;
4922 unsigned long val;
4923 int err, cpu, mpt;
4924
4925 err = strict_strtoul(buf, 10, &val);
4926 if (err)
4927 return err;
4928 if (val > perf_max_events)
4929 return -EINVAL;
4930
4931 spin_lock(&perf_resource_lock);
4932 perf_reserved_percpu = val;
4933 for_each_online_cpu(cpu) {
4934 cpuctx = &per_cpu(perf_cpu_context, cpu);
4935 spin_lock_irq(&cpuctx->ctx.lock);
4936 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
4937 perf_max_events - perf_reserved_percpu);
4938 cpuctx->max_pertask = mpt;
4939 spin_unlock_irq(&cpuctx->ctx.lock);
4940 }
4941 spin_unlock(&perf_resource_lock);
4942
4943 return count;
4944}
4945
4946static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4947{
4948 return sprintf(buf, "%d\n", perf_overcommit);
4949}
4950
4951static ssize_t
4952perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4953{
4954 unsigned long val;
4955 int err;
4956
4957 err = strict_strtoul(buf, 10, &val);
4958 if (err)
4959 return err;
4960 if (val > 1)
4961 return -EINVAL;
4962
4963 spin_lock(&perf_resource_lock);
4964 perf_overcommit = val;
4965 spin_unlock(&perf_resource_lock);
4966
4967 return count;
4968}
4969
4970static SYSDEV_CLASS_ATTR(
4971 reserve_percpu,
4972 0644,
4973 perf_show_reserve_percpu,
4974 perf_set_reserve_percpu
4975 );
4976
4977static SYSDEV_CLASS_ATTR(
4978 overcommit,
4979 0644,
4980 perf_show_overcommit,
4981 perf_set_overcommit
4982 );
4983
4984static struct attribute *perfclass_attrs[] = {
4985 &attr_reserve_percpu.attr,
4986 &attr_overcommit.attr,
4987 NULL
4988};
4989
4990static struct attribute_group perfclass_attr_group = {
4991 .attrs = perfclass_attrs,
4992 .name = "perf_events",
4993};
4994
4995static int __init perf_event_sysfs_init(void)
4996{
4997 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4998 &perfclass_attr_group);
4999}
5000device_initcall(perf_event_sysfs_init);