aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h462
1 files changed, 48 insertions, 414 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 972f90d7a32..368bd70f1d2 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -1,5 +1,9 @@
1/* 1/*
2 * Performance counters: 2 * NOTE: this file will be removed in a future kernel release, it is
3 * provided as a courtesy copy of user-space code that relies on the
4 * old (pre-rename) symbols and constants.
5 *
6 * Performance events:
3 * 7 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar 9 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
@@ -131,19 +135,19 @@ enum perf_counter_sample_format {
131 * as specified by attr.read_format: 135 * as specified by attr.read_format:
132 * 136 *
133 * struct read_format { 137 * struct read_format {
134 * { u64 value; 138 * { u64 value;
135 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 139 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
136 * { u64 time_running; } && PERF_FORMAT_RUNNING 140 * { u64 time_running; } && PERF_FORMAT_RUNNING
137 * { u64 id; } && PERF_FORMAT_ID 141 * { u64 id; } && PERF_FORMAT_ID
138 * } && !PERF_FORMAT_GROUP 142 * } && !PERF_FORMAT_GROUP
139 * 143 *
140 * { u64 nr; 144 * { u64 nr;
141 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 145 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
142 * { u64 time_running; } && PERF_FORMAT_RUNNING 146 * { u64 time_running; } && PERF_FORMAT_RUNNING
143 * { u64 value; 147 * { u64 value;
144 * { u64 id; } && PERF_FORMAT_ID 148 * { u64 id; } && PERF_FORMAT_ID
145 * } cntr[nr]; 149 * } cntr[nr];
146 * } && PERF_FORMAT_GROUP 150 * } && PERF_FORMAT_GROUP
147 * }; 151 * };
148 */ 152 */
149enum perf_counter_read_format { 153enum perf_counter_read_format {
@@ -199,10 +203,14 @@ struct perf_counter_attr {
199 inherit_stat : 1, /* per task counts */ 203 inherit_stat : 1, /* per task counts */
200 enable_on_exec : 1, /* next exec enables */ 204 enable_on_exec : 1, /* next exec enables */
201 task : 1, /* trace fork/exit */ 205 task : 1, /* trace fork/exit */
206 watermark : 1, /* wakeup_watermark */
202 207
203 __reserved_1 : 50; 208 __reserved_1 : 49;
204 209
205 __u32 wakeup_events; /* wakeup every n events */ 210 union {
211 __u32 wakeup_events; /* wakeup every n events */
212 __u32 wakeup_watermark; /* bytes before wakeup */
213 };
206 __u32 __reserved_2; 214 __u32 __reserved_2;
207 215
208 __u64 __reserved_3; 216 __u64 __reserved_3;
@@ -310,9 +318,9 @@ enum perf_event_type {
310 318
311 /* 319 /*
312 * struct { 320 * struct {
313 * struct perf_event_header header; 321 * struct perf_event_header header;
314 * u64 id; 322 * u64 id;
315 * u64 lost; 323 * u64 lost;
316 * }; 324 * };
317 */ 325 */
318 PERF_EVENT_LOST = 2, 326 PERF_EVENT_LOST = 2,
@@ -332,6 +340,7 @@ enum perf_event_type {
332 * struct perf_event_header header; 340 * struct perf_event_header header;
333 * u32 pid, ppid; 341 * u32 pid, ppid;
334 * u32 tid, ptid; 342 * u32 tid, ptid;
343 * u64 time;
335 * }; 344 * };
336 */ 345 */
337 PERF_EVENT_EXIT = 4, 346 PERF_EVENT_EXIT = 4,
@@ -352,16 +361,17 @@ enum perf_event_type {
352 * struct perf_event_header header; 361 * struct perf_event_header header;
353 * u32 pid, ppid; 362 * u32 pid, ppid;
354 * u32 tid, ptid; 363 * u32 tid, ptid;
364 * { u64 time; } && PERF_SAMPLE_TIME
355 * }; 365 * };
356 */ 366 */
357 PERF_EVENT_FORK = 7, 367 PERF_EVENT_FORK = 7,
358 368
359 /* 369 /*
360 * struct { 370 * struct {
361 * struct perf_event_header header; 371 * struct perf_event_header header;
362 * u32 pid, tid; 372 * u32 pid, tid;
363 * 373 *
364 * struct read_format values; 374 * struct read_format values;
365 * }; 375 * };
366 */ 376 */
367 PERF_EVENT_READ = 8, 377 PERF_EVENT_READ = 8,
@@ -377,23 +387,23 @@ enum perf_event_type {
377 * { u64 id; } && PERF_SAMPLE_ID 387 * { u64 id; } && PERF_SAMPLE_ID
378 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 388 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
379 * { u32 cpu, res; } && PERF_SAMPLE_CPU 389 * { u32 cpu, res; } && PERF_SAMPLE_CPU
380 * { u64 period; } && PERF_SAMPLE_PERIOD 390 * { u64 period; } && PERF_SAMPLE_PERIOD
381 * 391 *
382 * { struct read_format values; } && PERF_SAMPLE_READ 392 * { struct read_format values; } && PERF_SAMPLE_READ
383 * 393 *
384 * { u64 nr, 394 * { u64 nr,
385 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 395 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
386 * 396 *
387 * # 397 * #
388 * # The RAW record below is opaque data wrt the ABI 398 * # The RAW record below is opaque data wrt the ABI
389 * # 399 * #
390 * # That is, the ABI doesn't make any promises wrt to 400 * # That is, the ABI doesn't make any promises wrt to
391 * # the stability of its content, it may vary depending 401 * # the stability of its content, it may vary depending
392 * # on event, hardware, kernel version and phase of 402 * # on event, hardware, kernel version and phase of
393 * # the moon. 403 * # the moon.
394 * # 404 * #
395 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 405 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
396 * # 406 * #
397 * 407 *
398 * { u32 size; 408 * { u32 size;
399 * char data[size];}&& PERF_SAMPLE_RAW 409 * char data[size];}&& PERF_SAMPLE_RAW
@@ -416,392 +426,16 @@ enum perf_callchain_context {
416 PERF_CONTEXT_MAX = (__u64)-4095, 426 PERF_CONTEXT_MAX = (__u64)-4095,
417}; 427};
418 428
419#define PERF_FLAG_FD_NO_GROUP (1U << 0) 429#define PERF_FLAG_FD_NO_GROUP (1U << 0)
420#define PERF_FLAG_FD_OUTPUT (1U << 1) 430#define PERF_FLAG_FD_OUTPUT (1U << 1)
421 431
422#ifdef __KERNEL__
423/* 432/*
424 * Kernel-internal data types and definitions: 433 * In case some app still references the old symbols:
425 */
426
427#ifdef CONFIG_PERF_COUNTERS
428# include <asm/perf_counter.h>
429#endif
430
431#include <linux/list.h>
432#include <linux/mutex.h>
433#include <linux/rculist.h>
434#include <linux/rcupdate.h>
435#include <linux/spinlock.h>
436#include <linux/hrtimer.h>
437#include <linux/fs.h>
438#include <linux/pid_namespace.h>
439#include <asm/atomic.h>
440
441#define PERF_MAX_STACK_DEPTH 255
442
443struct perf_callchain_entry {
444 __u64 nr;
445 __u64 ip[PERF_MAX_STACK_DEPTH];
446};
447
448struct perf_raw_record {
449 u32 size;
450 void *data;
451};
452
453struct task_struct;
454
455/**
456 * struct hw_perf_counter - performance counter hardware details:
457 */
458struct hw_perf_counter {
459#ifdef CONFIG_PERF_COUNTERS
460 union {
461 struct { /* hardware */
462 u64 config;
463 unsigned long config_base;
464 unsigned long counter_base;
465 int idx;
466 };
467 union { /* software */
468 atomic64_t count;
469 struct hrtimer hrtimer;
470 };
471 };
472 atomic64_t prev_count;
473 u64 sample_period;
474 u64 last_period;
475 atomic64_t period_left;
476 u64 interrupts;
477
478 u64 freq_count;
479 u64 freq_interrupts;
480 u64 freq_stamp;
481#endif
482};
483
484struct perf_counter;
485
486/**
487 * struct pmu - generic performance monitoring unit
488 */
489struct pmu {
490 int (*enable) (struct perf_counter *counter);
491 void (*disable) (struct perf_counter *counter);
492 void (*read) (struct perf_counter *counter);
493 void (*unthrottle) (struct perf_counter *counter);
494};
495
496/**
497 * enum perf_counter_active_state - the states of a counter
498 */
499enum perf_counter_active_state {
500 PERF_COUNTER_STATE_ERROR = -2,
501 PERF_COUNTER_STATE_OFF = -1,
502 PERF_COUNTER_STATE_INACTIVE = 0,
503 PERF_COUNTER_STATE_ACTIVE = 1,
504};
505
506struct file;
507
508struct perf_mmap_data {
509 struct rcu_head rcu_head;
510 int nr_pages; /* nr of data pages */
511 int writable; /* are we writable */
512 int nr_locked; /* nr pages mlocked */
513
514 atomic_t poll; /* POLL_ for wakeups */
515 atomic_t events; /* event limit */
516
517 atomic_long_t head; /* write position */
518 atomic_long_t done_head; /* completed head */
519
520 atomic_t lock; /* concurrent writes */
521 atomic_t wakeup; /* needs a wakeup */
522 atomic_t lost; /* nr records lost */
523
524 struct perf_counter_mmap_page *user_page;
525 void *data_pages[0];
526};
527
528struct perf_pending_entry {
529 struct perf_pending_entry *next;
530 void (*func)(struct perf_pending_entry *);
531};
532
533/**
534 * struct perf_counter - performance counter kernel representation:
535 */
536struct perf_counter {
537#ifdef CONFIG_PERF_COUNTERS
538 struct list_head list_entry;
539 struct list_head event_entry;
540 struct list_head sibling_list;
541 int nr_siblings;
542 struct perf_counter *group_leader;
543 struct perf_counter *output;
544 const struct pmu *pmu;
545
546 enum perf_counter_active_state state;
547 atomic64_t count;
548
549 /*
550 * These are the total time in nanoseconds that the counter
551 * has been enabled (i.e. eligible to run, and the task has
552 * been scheduled in, if this is a per-task counter)
553 * and running (scheduled onto the CPU), respectively.
554 *
555 * They are computed from tstamp_enabled, tstamp_running and
556 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
557 */
558 u64 total_time_enabled;
559 u64 total_time_running;
560
561 /*
562 * These are timestamps used for computing total_time_enabled
563 * and total_time_running when the counter is in INACTIVE or
564 * ACTIVE state, measured in nanoseconds from an arbitrary point
565 * in time.
566 * tstamp_enabled: the notional time when the counter was enabled
567 * tstamp_running: the notional time when the counter was scheduled on
568 * tstamp_stopped: in INACTIVE state, the notional time when the
569 * counter was scheduled off.
570 */
571 u64 tstamp_enabled;
572 u64 tstamp_running;
573 u64 tstamp_stopped;
574
575 struct perf_counter_attr attr;
576 struct hw_perf_counter hw;
577
578 struct perf_counter_context *ctx;
579 struct file *filp;
580
581 /*
582 * These accumulate total time (in nanoseconds) that children
583 * counters have been enabled and running, respectively.
584 */
585 atomic64_t child_total_time_enabled;
586 atomic64_t child_total_time_running;
587
588 /*
589 * Protect attach/detach and child_list:
590 */
591 struct mutex child_mutex;
592 struct list_head child_list;
593 struct perf_counter *parent;
594
595 int oncpu;
596 int cpu;
597
598 struct list_head owner_entry;
599 struct task_struct *owner;
600
601 /* mmap bits */
602 struct mutex mmap_mutex;
603 atomic_t mmap_count;
604 struct perf_mmap_data *data;
605
606 /* poll related */
607 wait_queue_head_t waitq;
608 struct fasync_struct *fasync;
609
610 /* delayed work for NMIs and such */
611 int pending_wakeup;
612 int pending_kill;
613 int pending_disable;
614 struct perf_pending_entry pending;
615
616 atomic_t event_limit;
617
618 void (*destroy)(struct perf_counter *);
619 struct rcu_head rcu_head;
620
621 struct pid_namespace *ns;
622 u64 id;
623#endif
624};
625
626/**
627 * struct perf_counter_context - counter context structure
628 *
629 * Used as a container for task counters and CPU counters as well:
630 */
631struct perf_counter_context {
632 /*
633 * Protect the states of the counters in the list,
634 * nr_active, and the list:
635 */
636 spinlock_t lock;
637 /*
638 * Protect the list of counters. Locking either mutex or lock
639 * is sufficient to ensure the list doesn't change; to change
640 * the list you need to lock both the mutex and the spinlock.
641 */
642 struct mutex mutex;
643
644 struct list_head counter_list;
645 struct list_head event_list;
646 int nr_counters;
647 int nr_active;
648 int is_active;
649 int nr_stat;
650 atomic_t refcount;
651 struct task_struct *task;
652
653 /*
654 * Context clock, runs when context enabled.
655 */
656 u64 time;
657 u64 timestamp;
658
659 /*
660 * These fields let us detect when two contexts have both
661 * been cloned (inherited) from a common ancestor.
662 */
663 struct perf_counter_context *parent_ctx;
664 u64 parent_gen;
665 u64 generation;
666 int pin_count;
667 struct rcu_head rcu_head;
668};
669
670/**
671 * struct perf_counter_cpu_context - per cpu counter context structure
672 */ 434 */
673struct perf_cpu_context {
674 struct perf_counter_context ctx;
675 struct perf_counter_context *task_ctx;
676 int active_oncpu;
677 int max_pertask;
678 int exclusive;
679 435
680 /* 436#define __NR_perf_counter_open __NR_perf_event_open
681 * Recursion avoidance:
682 *
683 * task, softirq, irq, nmi context
684 */
685 int recursion[4];
686};
687 437
688#ifdef CONFIG_PERF_COUNTERS 438#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
439#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
689 440
690/*
691 * Set by architecture code:
692 */
693extern int perf_max_counters;
694
695extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
696
697extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
698extern void perf_counter_task_sched_out(struct task_struct *task,
699 struct task_struct *next, int cpu);
700extern void perf_counter_task_tick(struct task_struct *task, int cpu);
701extern int perf_counter_init_task(struct task_struct *child);
702extern void perf_counter_exit_task(struct task_struct *child);
703extern void perf_counter_free_task(struct task_struct *task);
704extern void set_perf_counter_pending(void);
705extern void perf_counter_do_pending(void);
706extern void perf_counter_print_debug(void);
707extern void __perf_disable(void);
708extern bool __perf_enable(void);
709extern void perf_disable(void);
710extern void perf_enable(void);
711extern int perf_counter_task_disable(void);
712extern int perf_counter_task_enable(void);
713extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
714 struct perf_cpu_context *cpuctx,
715 struct perf_counter_context *ctx, int cpu);
716extern void perf_counter_update_userpage(struct perf_counter *counter);
717
718struct perf_sample_data {
719 struct pt_regs *regs;
720 u64 addr;
721 u64 period;
722 struct perf_raw_record *raw;
723};
724
725extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
726 struct perf_sample_data *data);
727extern void perf_counter_output(struct perf_counter *counter, int nmi,
728 struct perf_sample_data *data);
729
730/*
731 * Return 1 for a software counter, 0 for a hardware counter
732 */
733static inline int is_software_counter(struct perf_counter *counter)
734{
735 return (counter->attr.type != PERF_TYPE_RAW) &&
736 (counter->attr.type != PERF_TYPE_HARDWARE) &&
737 (counter->attr.type != PERF_TYPE_HW_CACHE);
738}
739
740extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
741
742extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
743
744static inline void
745perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
746{
747 if (atomic_read(&perf_swcounter_enabled[event]))
748 __perf_swcounter_event(event, nr, nmi, regs, addr);
749}
750
751extern void __perf_counter_mmap(struct vm_area_struct *vma);
752
753static inline void perf_counter_mmap(struct vm_area_struct *vma)
754{
755 if (vma->vm_flags & VM_EXEC)
756 __perf_counter_mmap(vma);
757}
758
759extern void perf_counter_comm(struct task_struct *tsk);
760extern void perf_counter_fork(struct task_struct *tsk);
761
762extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
763
764extern int sysctl_perf_counter_paranoid;
765extern int sysctl_perf_counter_mlock;
766extern int sysctl_perf_counter_sample_rate;
767
768extern void perf_counter_init(void);
769extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
770 void *record, int entry_size);
771
772#ifndef perf_misc_flags
773#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
774 PERF_EVENT_MISC_KERNEL)
775#define perf_instruction_pointer(regs) instruction_pointer(regs)
776#endif
777
778#else
779static inline void
780perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
781static inline void
782perf_counter_task_sched_out(struct task_struct *task,
783 struct task_struct *next, int cpu) { }
784static inline void
785perf_counter_task_tick(struct task_struct *task, int cpu) { }
786static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
787static inline void perf_counter_exit_task(struct task_struct *child) { }
788static inline void perf_counter_free_task(struct task_struct *task) { }
789static inline void perf_counter_do_pending(void) { }
790static inline void perf_counter_print_debug(void) { }
791static inline void perf_disable(void) { }
792static inline void perf_enable(void) { }
793static inline int perf_counter_task_disable(void) { return -EINVAL; }
794static inline int perf_counter_task_enable(void) { return -EINVAL; }
795
796static inline void
797perf_swcounter_event(u32 event, u64 nr, int nmi,
798 struct pt_regs *regs, u64 addr) { }
799
800static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
801static inline void perf_counter_comm(struct task_struct *tsk) { }
802static inline void perf_counter_fork(struct task_struct *tsk) { }
803static inline void perf_counter_init(void) { }
804#endif
805
806#endif /* __KERNEL__ */
807#endif /* _LINUX_PERF_COUNTER_H */ 441#endif /* _LINUX_PERF_COUNTER_H */