aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h201
1 files changed, 139 insertions, 62 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8e375440403..716f99b682c1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -203,8 +203,20 @@ struct perf_event_attr {
203 enable_on_exec : 1, /* next exec enables */ 203 enable_on_exec : 1, /* next exec enables */
204 task : 1, /* trace fork/exit */ 204 task : 1, /* trace fork/exit */
205 watermark : 1, /* wakeup_watermark */ 205 watermark : 1, /* wakeup_watermark */
206 206 /*
207 __reserved_1 : 49; 207 * precise_ip:
208 *
209 * 0 - SAMPLE_IP can have arbitrary skid
210 * 1 - SAMPLE_IP must have constant skid
211 * 2 - SAMPLE_IP requested to have 0 skid
212 * 3 - SAMPLE_IP must have 0 skid
213 *
214 * See also PERF_RECORD_MISC_EXACT_IP
215 */
216 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */
218
219 __reserved_1 : 46;
208 220
209 union { 221 union {
210 __u32 wakeup_events; /* wakeup every n events */ 222 __u32 wakeup_events; /* wakeup every n events */
@@ -287,11 +299,24 @@ struct perf_event_mmap_page {
287 __u64 data_tail; /* user-space written tail */ 299 __u64 data_tail; /* user-space written tail */
288}; 300};
289 301
290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 302#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 303#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292#define PERF_RECORD_MISC_KERNEL (1 << 0) 304#define PERF_RECORD_MISC_KERNEL (1 << 0)
293#define PERF_RECORD_MISC_USER (2 << 0) 305#define PERF_RECORD_MISC_USER (2 << 0)
294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 306#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
307#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
308#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
309
310/*
311 * Indicates that the content of PERF_SAMPLE_IP points to
312 * the actual instruction that triggered the event. See also
313 * perf_event_attr::precise_ip.
314 */
315#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
316/*
317 * Reserve the last bit to indicate some extended misc field
318 */
319#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
295 320
296struct perf_event_header { 321struct perf_event_header {
297 __u32 type; 322 __u32 type;
@@ -437,8 +462,15 @@ enum perf_callchain_context {
437 462
438#ifdef CONFIG_PERF_EVENTS 463#ifdef CONFIG_PERF_EVENTS
439# include <asm/perf_event.h> 464# include <asm/perf_event.h>
465# include <asm/local64.h>
440#endif 466#endif
441 467
468struct perf_guest_info_callbacks {
469 int (*is_in_guest) (void);
470 int (*is_user_mode) (void);
471 unsigned long (*get_guest_ip) (void);
472};
473
442#ifdef CONFIG_HAVE_HW_BREAKPOINT 474#ifdef CONFIG_HAVE_HW_BREAKPOINT
443#include <asm/hw_breakpoint.h> 475#include <asm/hw_breakpoint.h>
444#endif 476#endif
@@ -455,6 +487,7 @@ enum perf_callchain_context {
455#include <linux/ftrace.h> 487#include <linux/ftrace.h>
456#include <linux/cpu.h> 488#include <linux/cpu.h>
457#include <asm/atomic.h> 489#include <asm/atomic.h>
490#include <asm/local.h>
458 491
459#define PERF_MAX_STACK_DEPTH 255 492#define PERF_MAX_STACK_DEPTH 255
460 493
@@ -468,6 +501,17 @@ struct perf_raw_record {
468 void *data; 501 void *data;
469}; 502};
470 503
504struct perf_branch_entry {
505 __u64 from;
506 __u64 to;
507 __u64 flags;
508};
509
510struct perf_branch_stack {
511 __u64 nr;
512 struct perf_branch_entry entries[0];
513};
514
471struct task_struct; 515struct task_struct;
472 516
473/** 517/**
@@ -489,14 +533,16 @@ struct hw_perf_event {
489 struct hrtimer hrtimer; 533 struct hrtimer hrtimer;
490 }; 534 };
491#ifdef CONFIG_HAVE_HW_BREAKPOINT 535#ifdef CONFIG_HAVE_HW_BREAKPOINT
492 /* breakpoint */ 536 struct { /* breakpoint */
493 struct arch_hw_breakpoint info; 537 struct arch_hw_breakpoint info;
538 struct list_head bp_list;
539 };
494#endif 540#endif
495 }; 541 };
496 atomic64_t prev_count; 542 local64_t prev_count;
497 u64 sample_period; 543 u64 sample_period;
498 u64 last_period; 544 u64 last_period;
499 atomic64_t period_left; 545 local64_t period_left;
500 u64 interrupts; 546 u64 interrupts;
501 547
502 u64 freq_time_stamp; 548 u64 freq_time_stamp;
@@ -506,6 +552,11 @@ struct hw_perf_event {
506 552
507struct perf_event; 553struct perf_event;
508 554
555/*
556 * Common implementation detail of pmu::{start,commit,cancel}_txn
557 */
558#define PERF_EVENT_TXN 0x1
559
509/** 560/**
510 * struct pmu - generic performance monitoring unit 561 * struct pmu - generic performance monitoring unit
511 */ 562 */
@@ -516,6 +567,30 @@ struct pmu {
516 void (*stop) (struct perf_event *event); 567 void (*stop) (struct perf_event *event);
517 void (*read) (struct perf_event *event); 568 void (*read) (struct perf_event *event);
518 void (*unthrottle) (struct perf_event *event); 569 void (*unthrottle) (struct perf_event *event);
570
571 /*
572 * Group events scheduling is treated as a transaction, add group
573 * events as a whole and perform one schedulability test. If the test
574 * fails, roll back the whole group
575 */
576
577 /*
578 * Start the transaction, after this ->enable() doesn't need
579 * to do schedulability tests.
580 */
581 void (*start_txn) (const struct pmu *pmu);
582 /*
583 * If ->start_txn() disabled the ->enable() schedulability test
584 * then ->commit_txn() is required to perform one. On success
585 * the transaction is closed. On error the transaction is kept
586 * open until ->cancel_txn() is called.
587 */
588 int (*commit_txn) (const struct pmu *pmu);
589 /*
590 * Will cancel the transaction, assumes ->disable() is called for
591 * each successfull ->enable() during the transaction.
592 */
593 void (*cancel_txn) (const struct pmu *pmu);
519}; 594};
520 595
521/** 596/**
@@ -530,25 +605,25 @@ enum perf_event_active_state {
530 605
531struct file; 606struct file;
532 607
533struct perf_mmap_data { 608#define PERF_BUFFER_WRITABLE 0x01
609
610struct perf_buffer {
611 atomic_t refcount;
534 struct rcu_head rcu_head; 612 struct rcu_head rcu_head;
535#ifdef CONFIG_PERF_USE_VMALLOC 613#ifdef CONFIG_PERF_USE_VMALLOC
536 struct work_struct work; 614 struct work_struct work;
615 int page_order; /* allocation order */
537#endif 616#endif
538 int data_order;
539 int nr_pages; /* nr of data pages */ 617 int nr_pages; /* nr of data pages */
540 int writable; /* are we writable */ 618 int writable; /* are we writable */
541 int nr_locked; /* nr pages mlocked */
542 619
543 atomic_t poll; /* POLL_ for wakeups */ 620 atomic_t poll; /* POLL_ for wakeups */
544 atomic_t events; /* event_id limit */
545
546 atomic_long_t head; /* write position */
547 atomic_long_t done_head; /* completed head */
548 621
549 atomic_t lock; /* concurrent writes */ 622 local_t head; /* write position */
550 atomic_t wakeup; /* needs a wakeup */ 623 local_t nest; /* nested writers */
551 atomic_t lost; /* nr records lost */ 624 local_t events; /* event limit */
625 local_t wakeup; /* wakeup stamp */
626 local_t lost; /* nr records lost */
552 627
553 long watermark; /* wakeup watermark */ 628 long watermark; /* wakeup watermark */
554 629
@@ -571,6 +646,17 @@ enum perf_group_flag {
571 PERF_GROUP_SOFTWARE = 0x1, 646 PERF_GROUP_SOFTWARE = 0x1,
572}; 647};
573 648
649#define SWEVENT_HLIST_BITS 8
650#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
651
652struct swevent_hlist {
653 struct hlist_head heads[SWEVENT_HLIST_SIZE];
654 struct rcu_head rcu_head;
655};
656
657#define PERF_ATTACH_CONTEXT 0x01
658#define PERF_ATTACH_GROUP 0x02
659
574/** 660/**
575 * struct perf_event - performance event kernel representation: 661 * struct perf_event - performance event kernel representation:
576 */ 662 */
@@ -579,14 +665,16 @@ struct perf_event {
579 struct list_head group_entry; 665 struct list_head group_entry;
580 struct list_head event_entry; 666 struct list_head event_entry;
581 struct list_head sibling_list; 667 struct list_head sibling_list;
668 struct hlist_node hlist_entry;
582 int nr_siblings; 669 int nr_siblings;
583 int group_flags; 670 int group_flags;
584 struct perf_event *group_leader; 671 struct perf_event *group_leader;
585 struct perf_event *output;
586 const struct pmu *pmu; 672 const struct pmu *pmu;
587 673
588 enum perf_event_active_state state; 674 enum perf_event_active_state state;
589 atomic64_t count; 675 unsigned int attach_state;
676 local64_t count;
677 atomic64_t child_count;
590 678
591 /* 679 /*
592 * These are the total time in nanoseconds that the event 680 * These are the total time in nanoseconds that the event
@@ -643,7 +731,9 @@ struct perf_event {
643 /* mmap bits */ 731 /* mmap bits */
644 struct mutex mmap_mutex; 732 struct mutex mmap_mutex;
645 atomic_t mmap_count; 733 atomic_t mmap_count;
646 struct perf_mmap_data *data; 734 int mmap_locked;
735 struct user_struct *mmap_user;
736 struct perf_buffer *buffer;
647 737
648 /* poll related */ 738 /* poll related */
649 wait_queue_head_t waitq; 739 wait_queue_head_t waitq;
@@ -666,6 +756,7 @@ struct perf_event {
666 perf_overflow_handler_t overflow_handler; 756 perf_overflow_handler_t overflow_handler;
667 757
668#ifdef CONFIG_EVENT_TRACING 758#ifdef CONFIG_EVENT_TRACING
759 struct ftrace_event_call *tp_event;
669 struct event_filter *filter; 760 struct event_filter *filter;
670#endif 761#endif
671 762
@@ -726,6 +817,9 @@ struct perf_cpu_context {
726 int active_oncpu; 817 int active_oncpu;
727 int max_pertask; 818 int max_pertask;
728 int exclusive; 819 int exclusive;
820 struct swevent_hlist *swevent_hlist;
821 struct mutex hlist_mutex;
822 int hlist_refcount;
729 823
730 /* 824 /*
731 * Recursion avoidance: 825 * Recursion avoidance:
@@ -737,12 +831,13 @@ struct perf_cpu_context {
737 831
738struct perf_output_handle { 832struct perf_output_handle {
739 struct perf_event *event; 833 struct perf_event *event;
740 struct perf_mmap_data *data; 834 struct perf_buffer *buffer;
741 unsigned long head; 835 unsigned long wakeup;
742 unsigned long offset; 836 unsigned long size;
837 void *addr;
838 int page;
743 int nmi; 839 int nmi;
744 int sample; 840 int sample;
745 int locked;
746}; 841};
747 842
748#ifdef CONFIG_PERF_EVENTS 843#ifdef CONFIG_PERF_EVENTS
@@ -769,9 +864,6 @@ extern void perf_disable(void);
769extern void perf_enable(void); 864extern void perf_enable(void);
770extern int perf_event_task_disable(void); 865extern int perf_event_task_disable(void);
771extern int perf_event_task_enable(void); 866extern int perf_event_task_enable(void);
772extern int hw_perf_group_sched_in(struct perf_event *group_leader,
773 struct perf_cpu_context *cpuctx,
774 struct perf_event_context *ctx);
775extern void perf_event_update_userpage(struct perf_event *event); 867extern void perf_event_update_userpage(struct perf_event *event);
776extern int perf_event_release_kernel(struct perf_event *event); 868extern int perf_event_release_kernel(struct perf_event *event);
777extern struct perf_event * 869extern struct perf_event *
@@ -842,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
842 934
843extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 935extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
844 936
845extern void 937#ifndef perf_arch_fetch_caller_regs
846perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); 938static inline void
939perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
940#endif
847 941
848/* 942/*
849 * Take a snapshot of the regs. Skip ip and frame pointer to 943 * Take a snapshot of the regs. Skip ip and frame pointer to
@@ -853,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
853 * - bp for callchains 947 * - bp for callchains
854 * - eflags, for future purposes, just in case 948 * - eflags, for future purposes, just in case
855 */ 949 */
856static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) 950static inline void perf_fetch_caller_regs(struct pt_regs *regs)
857{ 951{
858 unsigned long ip;
859
860 memset(regs, 0, sizeof(*regs)); 952 memset(regs, 0, sizeof(*regs));
861 953
862 switch (skip) { 954 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
863 case 1 :
864 ip = CALLER_ADDR0;
865 break;
866 case 2 :
867 ip = CALLER_ADDR1;
868 break;
869 case 3 :
870 ip = CALLER_ADDR2;
871 break;
872 case 4:
873 ip = CALLER_ADDR3;
874 break;
875 /* No need to support further for now */
876 default:
877 ip = 0;
878 }
879
880 return perf_arch_fetch_caller_regs(regs, ip, skip);
881} 955}
882 956
883static inline void 957static inline void
@@ -887,20 +961,17 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
887 struct pt_regs hot_regs; 961 struct pt_regs hot_regs;
888 962
889 if (!regs) { 963 if (!regs) {
890 perf_fetch_caller_regs(&hot_regs, 1); 964 perf_fetch_caller_regs(&hot_regs);
891 regs = &hot_regs; 965 regs = &hot_regs;
892 } 966 }
893 __perf_sw_event(event_id, nr, nmi, regs, addr); 967 __perf_sw_event(event_id, nr, nmi, regs, addr);
894 } 968 }
895} 969}
896 970
897extern void __perf_event_mmap(struct vm_area_struct *vma); 971extern void perf_event_mmap(struct vm_area_struct *vma);
898 972extern struct perf_guest_info_callbacks *perf_guest_cbs;
899static inline void perf_event_mmap(struct vm_area_struct *vma) 973extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
900{ 974extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
901 if (vma->vm_flags & VM_EXEC)
902 __perf_event_mmap(vma);
903}
904 975
905extern void perf_event_comm(struct task_struct *tsk); 976extern void perf_event_comm(struct task_struct *tsk);
906extern void perf_event_fork(struct task_struct *tsk); 977extern void perf_event_fork(struct task_struct *tsk);
@@ -927,8 +998,9 @@ static inline bool perf_paranoid_kernel(void)
927} 998}
928 999
929extern void perf_event_init(void); 1000extern void perf_event_init(void);
930extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 1001extern void perf_tp_event(u64 addr, u64 count, void *record,
931 int entry_size, struct pt_regs *regs); 1002 int entry_size, struct pt_regs *regs,
1003 struct hlist_head *head, int rctx);
932extern void perf_bp_event(struct perf_event *event, void *data); 1004extern void perf_bp_event(struct perf_event *event, void *data);
933 1005
934#ifndef perf_misc_flags 1006#ifndef perf_misc_flags
@@ -971,6 +1043,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
971static inline void 1043static inline void
972perf_bp_event(struct perf_event *event, void *data) { } 1044perf_bp_event(struct perf_event *event, void *data) { }
973 1045
1046static inline int perf_register_guest_info_callbacks
1047(struct perf_guest_info_callbacks *callbacks) { return 0; }
1048static inline int perf_unregister_guest_info_callbacks
1049(struct perf_guest_info_callbacks *callbacks) { return 0; }
1050
974static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1051static inline void perf_event_mmap(struct vm_area_struct *vma) { }
975static inline void perf_event_comm(struct task_struct *tsk) { } 1052static inline void perf_event_comm(struct task_struct *tsk) { }
976static inline void perf_event_fork(struct task_struct *tsk) { } 1053static inline void perf_event_fork(struct task_struct *tsk) { }
@@ -990,7 +1067,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
990#define perf_cpu_notifier(fn) \ 1067#define perf_cpu_notifier(fn) \
991do { \ 1068do { \
992 static struct notifier_block fn##_nb __cpuinitdata = \ 1069 static struct notifier_block fn##_nb __cpuinitdata = \
993 { .notifier_call = fn, .priority = 20 }; \ 1070 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
994 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 1071 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
995 (void *)(unsigned long)smp_processor_id()); \ 1072 (void *)(unsigned long)smp_processor_id()); \
996 fn(&fn##_nb, (unsigned long)CPU_STARTING, \ 1073 fn(&fn##_nb, (unsigned long)CPU_STARTING, \