aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h97
1 files changed, 48 insertions, 49 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5d0266d94985..716f99b682c1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -214,8 +214,9 @@ struct perf_event_attr {
214 * See also PERF_RECORD_MISC_EXACT_IP 214 * See also PERF_RECORD_MISC_EXACT_IP
215 */ 215 */
216 precise_ip : 2, /* skid constraint */ 216 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */
217 218
218 __reserved_1 : 47; 219 __reserved_1 : 46;
219 220
220 union { 221 union {
221 __u32 wakeup_events; /* wakeup every n events */ 222 __u32 wakeup_events; /* wakeup every n events */
@@ -461,6 +462,7 @@ enum perf_callchain_context {
461 462
462#ifdef CONFIG_PERF_EVENTS 463#ifdef CONFIG_PERF_EVENTS
463# include <asm/perf_event.h> 464# include <asm/perf_event.h>
465# include <asm/local64.h>
464#endif 466#endif
465 467
466struct perf_guest_info_callbacks { 468struct perf_guest_info_callbacks {
@@ -531,14 +533,16 @@ struct hw_perf_event {
531 struct hrtimer hrtimer; 533 struct hrtimer hrtimer;
532 }; 534 };
533#ifdef CONFIG_HAVE_HW_BREAKPOINT 535#ifdef CONFIG_HAVE_HW_BREAKPOINT
534 /* breakpoint */ 536 struct { /* breakpoint */
535 struct arch_hw_breakpoint info; 537 struct arch_hw_breakpoint info;
538 struct list_head bp_list;
539 };
536#endif 540#endif
537 }; 541 };
538 atomic64_t prev_count; 542 local64_t prev_count;
539 u64 sample_period; 543 u64 sample_period;
540 u64 last_period; 544 u64 last_period;
541 atomic64_t period_left; 545 local64_t period_left;
542 u64 interrupts; 546 u64 interrupts;
543 547
544 u64 freq_time_stamp; 548 u64 freq_time_stamp;
@@ -548,7 +552,10 @@ struct hw_perf_event {
548 552
549struct perf_event; 553struct perf_event;
550 554
551#define PERF_EVENT_TXN_STARTED 1 555/*
556 * Common implementation detail of pmu::{start,commit,cancel}_txn
557 */
558#define PERF_EVENT_TXN 0x1
552 559
553/** 560/**
554 * struct pmu - generic performance monitoring unit 561 * struct pmu - generic performance monitoring unit
@@ -562,14 +569,28 @@ struct pmu {
562 void (*unthrottle) (struct perf_event *event); 569 void (*unthrottle) (struct perf_event *event);
563 570
564 /* 571 /*
565 * group events scheduling is treated as a transaction, 572 * Group events scheduling is treated as a transaction, add group
566 * add group events as a whole and perform one schedulability test. 573 * events as a whole and perform one schedulability test. If the test
567 * If test fails, roll back the whole group 574 * fails, roll back the whole group
568 */ 575 */
569 576
577 /*
578 * Start the transaction, after this ->enable() doesn't need
579 * to do schedulability tests.
580 */
570 void (*start_txn) (const struct pmu *pmu); 581 void (*start_txn) (const struct pmu *pmu);
571 void (*cancel_txn) (const struct pmu *pmu); 582 /*
583 * If ->start_txn() disabled the ->enable() schedulability test
584 * then ->commit_txn() is required to perform one. On success
585 * the transaction is closed. On error the transaction is kept
586 * open until ->cancel_txn() is called.
587 */
572 int (*commit_txn) (const struct pmu *pmu); 588 int (*commit_txn) (const struct pmu *pmu);
589 /*
590 * Will cancel the transaction, assumes ->disable() is called for
591 * each successfull ->enable() during the transaction.
592 */
593 void (*cancel_txn) (const struct pmu *pmu);
573}; 594};
574 595
575/** 596/**
@@ -584,7 +605,9 @@ enum perf_event_active_state {
584 605
585struct file; 606struct file;
586 607
587struct perf_mmap_data { 608#define PERF_BUFFER_WRITABLE 0x01
609
610struct perf_buffer {
588 atomic_t refcount; 611 atomic_t refcount;
589 struct rcu_head rcu_head; 612 struct rcu_head rcu_head;
590#ifdef CONFIG_PERF_USE_VMALLOC 613#ifdef CONFIG_PERF_USE_VMALLOC
@@ -650,7 +673,8 @@ struct perf_event {
650 673
651 enum perf_event_active_state state; 674 enum perf_event_active_state state;
652 unsigned int attach_state; 675 unsigned int attach_state;
653 atomic64_t count; 676 local64_t count;
677 atomic64_t child_count;
654 678
655 /* 679 /*
656 * These are the total time in nanoseconds that the event 680 * These are the total time in nanoseconds that the event
@@ -709,7 +733,7 @@ struct perf_event {
709 atomic_t mmap_count; 733 atomic_t mmap_count;
710 int mmap_locked; 734 int mmap_locked;
711 struct user_struct *mmap_user; 735 struct user_struct *mmap_user;
712 struct perf_mmap_data *data; 736 struct perf_buffer *buffer;
713 737
714 /* poll related */ 738 /* poll related */
715 wait_queue_head_t waitq; 739 wait_queue_head_t waitq;
@@ -807,7 +831,7 @@ struct perf_cpu_context {
807 831
808struct perf_output_handle { 832struct perf_output_handle {
809 struct perf_event *event; 833 struct perf_event *event;
810 struct perf_mmap_data *data; 834 struct perf_buffer *buffer;
811 unsigned long wakeup; 835 unsigned long wakeup;
812 unsigned long size; 836 unsigned long size;
813 void *addr; 837 void *addr;
@@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
910 934
911extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 935extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
912 936
913extern void 937#ifndef perf_arch_fetch_caller_regs
914perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); 938static inline void
939perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
940#endif
915 941
916/* 942/*
917 * Take a snapshot of the regs. Skip ip and frame pointer to 943 * Take a snapshot of the regs. Skip ip and frame pointer to
@@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
921 * - bp for callchains 947 * - bp for callchains
922 * - eflags, for future purposes, just in case 948 * - eflags, for future purposes, just in case
923 */ 949 */
924static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) 950static inline void perf_fetch_caller_regs(struct pt_regs *regs)
925{ 951{
926 unsigned long ip;
927
928 memset(regs, 0, sizeof(*regs)); 952 memset(regs, 0, sizeof(*regs));
929 953
930 switch (skip) { 954 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
931 case 1 :
932 ip = CALLER_ADDR0;
933 break;
934 case 2 :
935 ip = CALLER_ADDR1;
936 break;
937 case 3 :
938 ip = CALLER_ADDR2;
939 break;
940 case 4:
941 ip = CALLER_ADDR3;
942 break;
943 /* No need to support further for now */
944 default:
945 ip = 0;
946 }
947
948 return perf_arch_fetch_caller_regs(regs, ip, skip);
949} 955}
950 956
951static inline void 957static inline void
@@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
955 struct pt_regs hot_regs; 961 struct pt_regs hot_regs;
956 962
957 if (!regs) { 963 if (!regs) {
958 perf_fetch_caller_regs(&hot_regs, 1); 964 perf_fetch_caller_regs(&hot_regs);
959 regs = &hot_regs; 965 regs = &hot_regs;
960 } 966 }
961 __perf_sw_event(event_id, nr, nmi, regs, addr); 967 __perf_sw_event(event_id, nr, nmi, regs, addr);
962 } 968 }
963} 969}
964 970
965extern void __perf_event_mmap(struct vm_area_struct *vma); 971extern void perf_event_mmap(struct vm_area_struct *vma);
966
967static inline void perf_event_mmap(struct vm_area_struct *vma)
968{
969 if (vma->vm_flags & VM_EXEC)
970 __perf_event_mmap(vma);
971}
972
973extern struct perf_guest_info_callbacks *perf_guest_cbs; 972extern struct perf_guest_info_callbacks *perf_guest_cbs;
974extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 973extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
975extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 974extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void)
1001extern void perf_event_init(void); 1000extern void perf_event_init(void);
1002extern void perf_tp_event(u64 addr, u64 count, void *record, 1001extern void perf_tp_event(u64 addr, u64 count, void *record,
1003 int entry_size, struct pt_regs *regs, 1002 int entry_size, struct pt_regs *regs,
1004 struct hlist_head *head); 1003 struct hlist_head *head, int rctx);
1005extern void perf_bp_event(struct perf_event *event, void *data); 1004extern void perf_bp_event(struct perf_event *event, void *data);
1006 1005
1007#ifndef perf_misc_flags 1006#ifndef perf_misc_flags
@@ -1068,7 +1067,7 @@ static inline void perf_event_disable(struct perf_event *event) { }
1068#define perf_cpu_notifier(fn) \ 1067#define perf_cpu_notifier(fn) \
1069do { \ 1068do { \
1070 static struct notifier_block fn##_nb __cpuinitdata = \ 1069 static struct notifier_block fn##_nb __cpuinitdata = \
1071 { .notifier_call = fn, .priority = 20 }; \ 1070 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1072 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 1071 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1073 (void *)(unsigned long)smp_processor_id()); \ 1072 (void *)(unsigned long)smp_processor_id()); \
1074 fn(&fn##_nb, (unsigned long)CPU_STARTING, \ 1073 fn(&fn##_nb, (unsigned long)CPU_STARTING, \