aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h141
1 files changed, 113 insertions, 28 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 95477038a72a..5d0266d94985 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -203,8 +203,19 @@ struct perf_event_attr {
203 enable_on_exec : 1, /* next exec enables */ 203 enable_on_exec : 1, /* next exec enables */
204 task : 1, /* trace fork/exit */ 204 task : 1, /* trace fork/exit */
205 watermark : 1, /* wakeup_watermark */ 205 watermark : 1, /* wakeup_watermark */
206 206 /*
207 __reserved_1 : 49; 207 * precise_ip:
208 *
209 * 0 - SAMPLE_IP can have arbitrary skid
210 * 1 - SAMPLE_IP must have constant skid
211 * 2 - SAMPLE_IP requested to have 0 skid
212 * 3 - SAMPLE_IP must have 0 skid
213 *
214 * See also PERF_RECORD_MISC_EXACT_IP
215 */
216 precise_ip : 2, /* skid constraint */
217
218 __reserved_1 : 47;
208 219
209 union { 220 union {
210 __u32 wakeup_events; /* wakeup every n events */ 221 __u32 wakeup_events; /* wakeup every n events */
@@ -287,11 +298,24 @@ struct perf_event_mmap_page {
287 __u64 data_tail; /* user-space written tail */ 298 __u64 data_tail; /* user-space written tail */
288}; 299};
289 300
290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 301#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 302#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292#define PERF_RECORD_MISC_KERNEL (1 << 0) 303#define PERF_RECORD_MISC_KERNEL (1 << 0)
293#define PERF_RECORD_MISC_USER (2 << 0) 304#define PERF_RECORD_MISC_USER (2 << 0)
294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 305#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
306#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
307#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
308
309/*
310 * Indicates that the content of PERF_SAMPLE_IP points to
311 * the actual instruction that triggered the event. See also
312 * perf_event_attr::precise_ip.
313 */
314#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
315/*
316 * Reserve the last bit to indicate some extended misc field
317 */
318#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
295 319
296struct perf_event_header { 320struct perf_event_header {
297 __u32 type; 321 __u32 type;
@@ -439,6 +463,12 @@ enum perf_callchain_context {
439# include <asm/perf_event.h> 463# include <asm/perf_event.h>
440#endif 464#endif
441 465
466struct perf_guest_info_callbacks {
467 int (*is_in_guest) (void);
468 int (*is_user_mode) (void);
469 unsigned long (*get_guest_ip) (void);
470};
471
442#ifdef CONFIG_HAVE_HW_BREAKPOINT 472#ifdef CONFIG_HAVE_HW_BREAKPOINT
443#include <asm/hw_breakpoint.h> 473#include <asm/hw_breakpoint.h>
444#endif 474#endif
@@ -455,6 +485,7 @@ enum perf_callchain_context {
455#include <linux/ftrace.h> 485#include <linux/ftrace.h>
456#include <linux/cpu.h> 486#include <linux/cpu.h>
457#include <asm/atomic.h> 487#include <asm/atomic.h>
488#include <asm/local.h>
458 489
459#define PERF_MAX_STACK_DEPTH 255 490#define PERF_MAX_STACK_DEPTH 255
460 491
@@ -468,6 +499,17 @@ struct perf_raw_record {
468 void *data; 499 void *data;
469}; 500};
470 501
502struct perf_branch_entry {
503 __u64 from;
504 __u64 to;
505 __u64 flags;
506};
507
508struct perf_branch_stack {
509 __u64 nr;
510 struct perf_branch_entry entries[0];
511};
512
471struct task_struct; 513struct task_struct;
472 514
473/** 515/**
@@ -506,6 +548,8 @@ struct hw_perf_event {
506 548
507struct perf_event; 549struct perf_event;
508 550
551#define PERF_EVENT_TXN_STARTED 1
552
509/** 553/**
510 * struct pmu - generic performance monitoring unit 554 * struct pmu - generic performance monitoring unit
511 */ 555 */
@@ -516,6 +560,16 @@ struct pmu {
516 void (*stop) (struct perf_event *event); 560 void (*stop) (struct perf_event *event);
517 void (*read) (struct perf_event *event); 561 void (*read) (struct perf_event *event);
518 void (*unthrottle) (struct perf_event *event); 562 void (*unthrottle) (struct perf_event *event);
563
564 /*
565 * group events scheduling is treated as a transaction,
566 * add group events as a whole and perform one schedulability test.
567 * If test fails, roll back the whole group
568 */
569
570 void (*start_txn) (const struct pmu *pmu);
571 void (*cancel_txn) (const struct pmu *pmu);
572 int (*commit_txn) (const struct pmu *pmu);
519}; 573};
520 574
521/** 575/**
@@ -531,24 +585,22 @@ enum perf_event_active_state {
531struct file; 585struct file;
532 586
533struct perf_mmap_data { 587struct perf_mmap_data {
588 atomic_t refcount;
534 struct rcu_head rcu_head; 589 struct rcu_head rcu_head;
535#ifdef CONFIG_PERF_USE_VMALLOC 590#ifdef CONFIG_PERF_USE_VMALLOC
536 struct work_struct work; 591 struct work_struct work;
592 int page_order; /* allocation order */
537#endif 593#endif
538 int data_order;
539 int nr_pages; /* nr of data pages */ 594 int nr_pages; /* nr of data pages */
540 int writable; /* are we writable */ 595 int writable; /* are we writable */
541 int nr_locked; /* nr pages mlocked */
542 596
543 atomic_t poll; /* POLL_ for wakeups */ 597 atomic_t poll; /* POLL_ for wakeups */
544 atomic_t events; /* event_id limit */
545
546 atomic_long_t head; /* write position */
547 atomic_long_t done_head; /* completed head */
548 598
549 atomic_t lock; /* concurrent writes */ 599 local_t head; /* write position */
550 atomic_t wakeup; /* needs a wakeup */ 600 local_t nest; /* nested writers */
551 atomic_t lost; /* nr records lost */ 601 local_t events; /* event limit */
602 local_t wakeup; /* wakeup stamp */
603 local_t lost; /* nr records lost */
552 604
553 long watermark; /* wakeup watermark */ 605 long watermark; /* wakeup watermark */
554 606
@@ -571,6 +623,17 @@ enum perf_group_flag {
571 PERF_GROUP_SOFTWARE = 0x1, 623 PERF_GROUP_SOFTWARE = 0x1,
572}; 624};
573 625
626#define SWEVENT_HLIST_BITS 8
627#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
628
629struct swevent_hlist {
630 struct hlist_head heads[SWEVENT_HLIST_SIZE];
631 struct rcu_head rcu_head;
632};
633
634#define PERF_ATTACH_CONTEXT 0x01
635#define PERF_ATTACH_GROUP 0x02
636
574/** 637/**
575 * struct perf_event - performance event kernel representation: 638 * struct perf_event - performance event kernel representation:
576 */ 639 */
@@ -579,13 +642,14 @@ struct perf_event {
579 struct list_head group_entry; 642 struct list_head group_entry;
580 struct list_head event_entry; 643 struct list_head event_entry;
581 struct list_head sibling_list; 644 struct list_head sibling_list;
645 struct hlist_node hlist_entry;
582 int nr_siblings; 646 int nr_siblings;
583 int group_flags; 647 int group_flags;
584 struct perf_event *group_leader; 648 struct perf_event *group_leader;
585 struct perf_event *output;
586 const struct pmu *pmu; 649 const struct pmu *pmu;
587 650
588 enum perf_event_active_state state; 651 enum perf_event_active_state state;
652 unsigned int attach_state;
589 atomic64_t count; 653 atomic64_t count;
590 654
591 /* 655 /*
@@ -643,6 +707,8 @@ struct perf_event {
643 /* mmap bits */ 707 /* mmap bits */
644 struct mutex mmap_mutex; 708 struct mutex mmap_mutex;
645 atomic_t mmap_count; 709 atomic_t mmap_count;
710 int mmap_locked;
711 struct user_struct *mmap_user;
646 struct perf_mmap_data *data; 712 struct perf_mmap_data *data;
647 713
648 /* poll related */ 714 /* poll related */
@@ -666,6 +732,7 @@ struct perf_event {
666 perf_overflow_handler_t overflow_handler; 732 perf_overflow_handler_t overflow_handler;
667 733
668#ifdef CONFIG_EVENT_TRACING 734#ifdef CONFIG_EVENT_TRACING
735 struct ftrace_event_call *tp_event;
669 struct event_filter *filter; 736 struct event_filter *filter;
670#endif 737#endif
671 738
@@ -726,6 +793,9 @@ struct perf_cpu_context {
726 int active_oncpu; 793 int active_oncpu;
727 int max_pertask; 794 int max_pertask;
728 int exclusive; 795 int exclusive;
796 struct swevent_hlist *swevent_hlist;
797 struct mutex hlist_mutex;
798 int hlist_refcount;
729 799
730 /* 800 /*
731 * Recursion avoidance: 801 * Recursion avoidance:
@@ -738,11 +808,12 @@ struct perf_cpu_context {
738struct perf_output_handle { 808struct perf_output_handle {
739 struct perf_event *event; 809 struct perf_event *event;
740 struct perf_mmap_data *data; 810 struct perf_mmap_data *data;
741 unsigned long head; 811 unsigned long wakeup;
742 unsigned long offset; 812 unsigned long size;
813 void *addr;
814 int page;
743 int nmi; 815 int nmi;
744 int sample; 816 int sample;
745 int locked;
746}; 817};
747 818
748#ifdef CONFIG_PERF_EVENTS 819#ifdef CONFIG_PERF_EVENTS
@@ -769,9 +840,6 @@ extern void perf_disable(void);
769extern void perf_enable(void); 840extern void perf_enable(void);
770extern int perf_event_task_disable(void); 841extern int perf_event_task_disable(void);
771extern int perf_event_task_enable(void); 842extern int perf_event_task_enable(void);
772extern int hw_perf_group_sched_in(struct perf_event *group_leader,
773 struct perf_cpu_context *cpuctx,
774 struct perf_event_context *ctx);
775extern void perf_event_update_userpage(struct perf_event *event); 843extern void perf_event_update_userpage(struct perf_event *event);
776extern int perf_event_release_kernel(struct perf_event *event); 844extern int perf_event_release_kernel(struct perf_event *event);
777extern struct perf_event * 845extern struct perf_event *
@@ -842,13 +910,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
842 910
843extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 911extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
844 912
845static inline void
846perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
847{
848 if (atomic_read(&perf_swevent_enabled[event_id]))
849 __perf_sw_event(event_id, nr, nmi, regs, addr);
850}
851
852extern void 913extern void
853perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); 914perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
854 915
@@ -887,6 +948,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
887 return perf_arch_fetch_caller_regs(regs, ip, skip); 948 return perf_arch_fetch_caller_regs(regs, ip, skip);
888} 949}
889 950
951static inline void
952perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
953{
954 if (atomic_read(&perf_swevent_enabled[event_id])) {
955 struct pt_regs hot_regs;
956
957 if (!regs) {
958 perf_fetch_caller_regs(&hot_regs, 1);
959 regs = &hot_regs;
960 }
961 __perf_sw_event(event_id, nr, nmi, regs, addr);
962 }
963}
964
890extern void __perf_event_mmap(struct vm_area_struct *vma); 965extern void __perf_event_mmap(struct vm_area_struct *vma);
891 966
892static inline void perf_event_mmap(struct vm_area_struct *vma) 967static inline void perf_event_mmap(struct vm_area_struct *vma)
@@ -895,6 +970,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma)
895 __perf_event_mmap(vma); 970 __perf_event_mmap(vma);
896} 971}
897 972
973extern struct perf_guest_info_callbacks *perf_guest_cbs;
974extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
975extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
976
898extern void perf_event_comm(struct task_struct *tsk); 977extern void perf_event_comm(struct task_struct *tsk);
899extern void perf_event_fork(struct task_struct *tsk); 978extern void perf_event_fork(struct task_struct *tsk);
900 979
@@ -920,8 +999,9 @@ static inline bool perf_paranoid_kernel(void)
920} 999}
921 1000
922extern void perf_event_init(void); 1001extern void perf_event_init(void);
923extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 1002extern void perf_tp_event(u64 addr, u64 count, void *record,
924 int entry_size, struct pt_regs *regs); 1003 int entry_size, struct pt_regs *regs,
1004 struct hlist_head *head);
925extern void perf_bp_event(struct perf_event *event, void *data); 1005extern void perf_bp_event(struct perf_event *event, void *data);
926 1006
927#ifndef perf_misc_flags 1007#ifndef perf_misc_flags
@@ -964,6 +1044,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi,
964static inline void 1044static inline void
965perf_bp_event(struct perf_event *event, void *data) { } 1045perf_bp_event(struct perf_event *event, void *data) { }
966 1046
1047static inline int perf_register_guest_info_callbacks
1048(struct perf_guest_info_callbacks *callbacks) { return 0; }
1049static inline int perf_unregister_guest_info_callbacks
1050(struct perf_guest_info_callbacks *callbacks) { return 0; }
1051
967static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1052static inline void perf_event_mmap(struct vm_area_struct *vma) { }
968static inline void perf_event_comm(struct task_struct *tsk) { } 1053static inline void perf_event_comm(struct task_struct *tsk) { }
969static inline void perf_event_fork(struct task_struct *tsk) { } 1054static inline void perf_event_fork(struct task_struct *tsk) { }