aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/perf_event.h
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h352
1 files changed, 241 insertions, 111 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 716f99b682c1..e0786e35f247 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -2,8 +2,8 @@
2 * Performance events: 2 * Performance events:
3 * 3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra 6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7 * 7 *
8 * Data type definitions, declarations, prototypes. 8 * Data type definitions, declarations, prototypes.
9 * 9 *
@@ -52,6 +52,8 @@ enum perf_hw_id {
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5, 53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6, 54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
55 57
56 PERF_COUNT_HW_MAX, /* non-ABI */ 58 PERF_COUNT_HW_MAX, /* non-ABI */
57}; 59};
@@ -135,14 +137,14 @@ enum perf_event_sample_format {
135 * 137 *
136 * struct read_format { 138 * struct read_format {
137 * { u64 value; 139 * { u64 value;
138 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 140 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
139 * { u64 time_running; } && PERF_FORMAT_RUNNING 141 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
140 * { u64 id; } && PERF_FORMAT_ID 142 * { u64 id; } && PERF_FORMAT_ID
141 * } && !PERF_FORMAT_GROUP 143 * } && !PERF_FORMAT_GROUP
142 * 144 *
143 * { u64 nr; 145 * { u64 nr;
144 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 146 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
145 * { u64 time_running; } && PERF_FORMAT_RUNNING 147 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
146 * { u64 value; 148 * { u64 value;
147 * { u64 id; } && PERF_FORMAT_ID 149 * { u64 id; } && PERF_FORMAT_ID
148 * } cntr[nr]; 150 * } cntr[nr];
@@ -215,8 +217,9 @@ struct perf_event_attr {
215 */ 217 */
216 precise_ip : 2, /* skid constraint */ 218 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */ 219 mmap_data : 1, /* non-exec mmap data */
220 sample_id_all : 1, /* sample_type all events */
218 221
219 __reserved_1 : 46; 222 __reserved_1 : 45;
220 223
221 union { 224 union {
222 __u32 wakeup_events; /* wakeup every n events */ 225 __u32 wakeup_events; /* wakeup every n events */
@@ -224,8 +227,14 @@ struct perf_event_attr {
224 }; 227 };
225 228
226 __u32 bp_type; 229 __u32 bp_type;
227 __u64 bp_addr; 230 union {
228 __u64 bp_len; 231 __u64 bp_addr;
232 __u64 config1; /* extension of config */
233 };
234 union {
235 __u64 bp_len;
236 __u64 config2; /* extension of config1 */
237 };
229}; 238};
230 239
231/* 240/*
@@ -327,6 +336,15 @@ struct perf_event_header {
327enum perf_event_type { 336enum perf_event_type {
328 337
329 /* 338 /*
339 * If perf_event_attr.sample_id_all is set then all event types will
340 * have the sample_type selected fields related to where/when
341 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
342 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
343 * the perf_event_header and the fields already present for the existing
344 * fields, i.e. at the end of the payload. That way a newer perf.data
345 * file will be supported by older perf tools, with these new optional
346 * fields being ignored.
347 *
330 * The MMAP events record the PROT_EXEC mappings so that we can 348 * The MMAP events record the PROT_EXEC mappings so that we can
331 * correlate userspace IPs to code. They have the following structure: 349 * correlate userspace IPs to code. They have the following structure:
332 * 350 *
@@ -452,8 +470,9 @@ enum perf_callchain_context {
452 PERF_CONTEXT_MAX = (__u64)-4095, 470 PERF_CONTEXT_MAX = (__u64)-4095,
453}; 471};
454 472
455#define PERF_FLAG_FD_NO_GROUP (1U << 0) 473#define PERF_FLAG_FD_NO_GROUP (1U << 0)
456#define PERF_FLAG_FD_OUTPUT (1U << 1) 474#define PERF_FLAG_FD_OUTPUT (1U << 1)
475#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
457 476
458#ifdef __KERNEL__ 477#ifdef __KERNEL__
459/* 478/*
@@ -461,14 +480,15 @@ enum perf_callchain_context {
461 */ 480 */
462 481
463#ifdef CONFIG_PERF_EVENTS 482#ifdef CONFIG_PERF_EVENTS
483# include <linux/cgroup.h>
464# include <asm/perf_event.h> 484# include <asm/perf_event.h>
465# include <asm/local64.h> 485# include <asm/local64.h>
466#endif 486#endif
467 487
468struct perf_guest_info_callbacks { 488struct perf_guest_info_callbacks {
469 int (*is_in_guest) (void); 489 int (*is_in_guest)(void);
470 int (*is_user_mode) (void); 490 int (*is_user_mode)(void);
471 unsigned long (*get_guest_ip) (void); 491 unsigned long (*get_guest_ip)(void);
472}; 492};
473 493
474#ifdef CONFIG_HAVE_HW_BREAKPOINT 494#ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -486,6 +506,8 @@ struct perf_guest_info_callbacks {
486#include <linux/workqueue.h> 506#include <linux/workqueue.h>
487#include <linux/ftrace.h> 507#include <linux/ftrace.h>
488#include <linux/cpu.h> 508#include <linux/cpu.h>
509#include <linux/irq_work.h>
510#include <linux/jump_label.h>
489#include <asm/atomic.h> 511#include <asm/atomic.h>
490#include <asm/local.h> 512#include <asm/local.h>
491 513
@@ -527,18 +549,27 @@ struct hw_perf_event {
527 unsigned long event_base; 549 unsigned long event_base;
528 int idx; 550 int idx;
529 int last_cpu; 551 int last_cpu;
552 unsigned int extra_reg;
553 u64 extra_config;
554 int extra_alloc;
530 }; 555 };
531 struct { /* software */ 556 struct { /* software */
532 s64 remaining;
533 struct hrtimer hrtimer; 557 struct hrtimer hrtimer;
534 }; 558 };
535#ifdef CONFIG_HAVE_HW_BREAKPOINT 559#ifdef CONFIG_HAVE_HW_BREAKPOINT
536 struct { /* breakpoint */ 560 struct { /* breakpoint */
537 struct arch_hw_breakpoint info; 561 struct arch_hw_breakpoint info;
538 struct list_head bp_list; 562 struct list_head bp_list;
563 /*
564 * Crufty hack to avoid the chicken and egg
565 * problem hw_breakpoint has with context
566 * creation and event initalization.
567 */
568 struct task_struct *bp_target;
539 }; 569 };
540#endif 570#endif
541 }; 571 };
572 int state;
542 local64_t prev_count; 573 local64_t prev_count;
543 u64 sample_period; 574 u64 sample_period;
544 u64 last_period; 575 u64 last_period;
@@ -550,6 +581,13 @@ struct hw_perf_event {
550#endif 581#endif
551}; 582};
552 583
584/*
585 * hw_perf_event::state flags
586 */
587#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
588#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
589#define PERF_HES_ARCH 0x04
590
553struct perf_event; 591struct perf_event;
554 592
555/* 593/*
@@ -561,36 +599,74 @@ struct perf_event;
561 * struct pmu - generic performance monitoring unit 599 * struct pmu - generic performance monitoring unit
562 */ 600 */
563struct pmu { 601struct pmu {
564 int (*enable) (struct perf_event *event); 602 struct list_head entry;
565 void (*disable) (struct perf_event *event); 603
566 int (*start) (struct perf_event *event); 604 struct device *dev;
567 void (*stop) (struct perf_event *event); 605 char *name;
568 void (*read) (struct perf_event *event); 606 int type;
569 void (*unthrottle) (struct perf_event *event); 607
608 int * __percpu pmu_disable_count;
609 struct perf_cpu_context * __percpu pmu_cpu_context;
610 int task_ctx_nr;
611
612 /*
613 * Fully disable/enable this PMU, can be used to protect from the PMI
614 * as well as for lazy/batch writing of the MSRs.
615 */
616 void (*pmu_enable) (struct pmu *pmu); /* optional */
617 void (*pmu_disable) (struct pmu *pmu); /* optional */
618
619 /*
620 * Try and initialize the event for this PMU.
621 * Should return -ENOENT when the @event doesn't match this PMU.
622 */
623 int (*event_init) (struct perf_event *event);
624
625#define PERF_EF_START 0x01 /* start the counter when adding */
626#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
627#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
628
629 /*
630 * Adds/Removes a counter to/from the PMU, can be done inside
631 * a transaction, see the ->*_txn() methods.
632 */
633 int (*add) (struct perf_event *event, int flags);
634 void (*del) (struct perf_event *event, int flags);
635
636 /*
637 * Starts/Stops a counter present on the PMU. The PMI handler
638 * should stop the counter when perf_event_overflow() returns
639 * !0. ->start() will be used to continue.
640 */
641 void (*start) (struct perf_event *event, int flags);
642 void (*stop) (struct perf_event *event, int flags);
570 643
571 /* 644 /*
572 * Group events scheduling is treated as a transaction, add group 645 * Updates the counter value of the event.
573 * events as a whole and perform one schedulability test. If the test
574 * fails, roll back the whole group
575 */ 646 */
647 void (*read) (struct perf_event *event);
576 648
577 /* 649 /*
578 * Start the transaction, after this ->enable() doesn't need 650 * Group events scheduling is treated as a transaction, add
579 * to do schedulability tests. 651 * group events as a whole and perform one schedulability test.
652 * If the test fails, roll back the whole group
653 *
654 * Start the transaction, after this ->add() doesn't need to
655 * do schedulability tests.
580 */ 656 */
581 void (*start_txn) (const struct pmu *pmu); 657 void (*start_txn) (struct pmu *pmu); /* optional */
582 /* 658 /*
583 * If ->start_txn() disabled the ->enable() schedulability test 659 * If ->start_txn() disabled the ->add() schedulability test
584 * then ->commit_txn() is required to perform one. On success 660 * then ->commit_txn() is required to perform one. On success
585 * the transaction is closed. On error the transaction is kept 661 * the transaction is closed. On error the transaction is kept
586 * open until ->cancel_txn() is called. 662 * open until ->cancel_txn() is called.
587 */ 663 */
588 int (*commit_txn) (const struct pmu *pmu); 664 int (*commit_txn) (struct pmu *pmu); /* optional */
589 /* 665 /*
590 * Will cancel the transaction, assumes ->disable() is called for 666 * Will cancel the transaction, assumes ->del() is called
591 * each successfull ->enable() during the transaction. 667 * for each successful ->add() during the transaction.
592 */ 668 */
593 void (*cancel_txn) (const struct pmu *pmu); 669 void (*cancel_txn) (struct pmu *pmu); /* optional */
594}; 670};
595 671
596/** 672/**
@@ -631,11 +707,6 @@ struct perf_buffer {
631 void *data_pages[0]; 707 void *data_pages[0];
632}; 708};
633 709
634struct perf_pending_entry {
635 struct perf_pending_entry *next;
636 void (*func)(struct perf_pending_entry *);
637};
638
639struct perf_sample_data; 710struct perf_sample_data;
640 711
641typedef void (*perf_overflow_handler_t)(struct perf_event *, int, 712typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -643,19 +714,36 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
643 struct pt_regs *regs); 714 struct pt_regs *regs);
644 715
645enum perf_group_flag { 716enum perf_group_flag {
646 PERF_GROUP_SOFTWARE = 0x1, 717 PERF_GROUP_SOFTWARE = 0x1,
647}; 718};
648 719
649#define SWEVENT_HLIST_BITS 8 720#define SWEVENT_HLIST_BITS 8
650#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) 721#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
651 722
652struct swevent_hlist { 723struct swevent_hlist {
653 struct hlist_head heads[SWEVENT_HLIST_SIZE]; 724 struct hlist_head heads[SWEVENT_HLIST_SIZE];
654 struct rcu_head rcu_head; 725 struct rcu_head rcu_head;
655}; 726};
656 727
657#define PERF_ATTACH_CONTEXT 0x01 728#define PERF_ATTACH_CONTEXT 0x01
658#define PERF_ATTACH_GROUP 0x02 729#define PERF_ATTACH_GROUP 0x02
730#define PERF_ATTACH_TASK 0x04
731
732#ifdef CONFIG_CGROUP_PERF
733/*
734 * perf_cgroup_info keeps track of time_enabled for a cgroup.
735 * This is a per-cpu dynamically allocated data structure.
736 */
737struct perf_cgroup_info {
738 u64 time;
739 u64 timestamp;
740};
741
742struct perf_cgroup {
743 struct cgroup_subsys_state css;
744 struct perf_cgroup_info *info; /* timing info, one per cpu */
745};
746#endif
659 747
660/** 748/**
661 * struct perf_event - performance event kernel representation: 749 * struct perf_event - performance event kernel representation:
@@ -669,7 +757,7 @@ struct perf_event {
669 int nr_siblings; 757 int nr_siblings;
670 int group_flags; 758 int group_flags;
671 struct perf_event *group_leader; 759 struct perf_event *group_leader;
672 const struct pmu *pmu; 760 struct pmu *pmu;
673 761
674 enum perf_event_active_state state; 762 enum perf_event_active_state state;
675 unsigned int attach_state; 763 unsigned int attach_state;
@@ -702,7 +790,20 @@ struct perf_event {
702 u64 tstamp_running; 790 u64 tstamp_running;
703 u64 tstamp_stopped; 791 u64 tstamp_stopped;
704 792
793 /*
794 * timestamp shadows the actual context timing but it can
795 * be safely used in NMI interrupt context. It reflects the
796 * context time as it was when the event was last scheduled in.
797 *
798 * ctx_time already accounts for ctx->timestamp. Therefore to
799 * compute ctx_time for a sample, simply add perf_clock().
800 */
801 u64 shadow_ctx_time;
802
705 struct perf_event_attr attr; 803 struct perf_event_attr attr;
804 u16 header_size;
805 u16 id_header_size;
806 u16 read_size;
706 struct hw_perf_event hw; 807 struct hw_perf_event hw;
707 808
708 struct perf_event_context *ctx; 809 struct perf_event_context *ctx;
@@ -743,7 +844,7 @@ struct perf_event {
743 int pending_wakeup; 844 int pending_wakeup;
744 int pending_kill; 845 int pending_kill;
745 int pending_disable; 846 int pending_disable;
746 struct perf_pending_entry pending; 847 struct irq_work pending;
747 848
748 atomic_t event_limit; 849 atomic_t event_limit;
749 850
@@ -760,15 +861,27 @@ struct perf_event {
760 struct event_filter *filter; 861 struct event_filter *filter;
761#endif 862#endif
762 863
864#ifdef CONFIG_CGROUP_PERF
865 struct perf_cgroup *cgrp; /* cgroup event is attach to */
866 int cgrp_defer_enabled;
867#endif
868
763#endif /* CONFIG_PERF_EVENTS */ 869#endif /* CONFIG_PERF_EVENTS */
764}; 870};
765 871
872enum perf_event_context_type {
873 task_context,
874 cpu_context,
875};
876
766/** 877/**
767 * struct perf_event_context - event context structure 878 * struct perf_event_context - event context structure
768 * 879 *
769 * Used as a container for task events and CPU events as well: 880 * Used as a container for task events and CPU events as well:
770 */ 881 */
771struct perf_event_context { 882struct perf_event_context {
883 struct pmu *pmu;
884 enum perf_event_context_type type;
772 /* 885 /*
773 * Protect the states of the events in the list, 886 * Protect the states of the events in the list,
774 * nr_active, and the list: 887 * nr_active, and the list:
@@ -788,6 +901,7 @@ struct perf_event_context {
788 int nr_active; 901 int nr_active;
789 int is_active; 902 int is_active;
790 int nr_stat; 903 int nr_stat;
904 int rotate_disable;
791 atomic_t refcount; 905 atomic_t refcount;
792 struct task_struct *task; 906 struct task_struct *task;
793 907
@@ -806,8 +920,15 @@ struct perf_event_context {
806 u64 generation; 920 u64 generation;
807 int pin_count; 921 int pin_count;
808 struct rcu_head rcu_head; 922 struct rcu_head rcu_head;
923 int nr_cgroups; /* cgroup events present */
809}; 924};
810 925
926/*
927 * Number of contexts where an event can trigger:
928 * task, softirq, hardirq, nmi.
929 */
930#define PERF_NR_CONTEXTS 4
931
811/** 932/**
812 * struct perf_event_cpu_context - per cpu event context structure 933 * struct perf_event_cpu_context - per cpu event context structure
813 */ 934 */
@@ -815,18 +936,11 @@ struct perf_cpu_context {
815 struct perf_event_context ctx; 936 struct perf_event_context ctx;
816 struct perf_event_context *task_ctx; 937 struct perf_event_context *task_ctx;
817 int active_oncpu; 938 int active_oncpu;
818 int max_pertask;
819 int exclusive; 939 int exclusive;
820 struct swevent_hlist *swevent_hlist; 940 struct list_head rotation_list;
821 struct mutex hlist_mutex; 941 int jiffies_interval;
822 int hlist_refcount; 942 struct pmu *active_pmu;
823 943 struct perf_cgroup *cgrp;
824 /*
825 * Recursion avoidance:
826 *
827 * task, softirq, irq, nmi context
828 */
829 int recursion[4];
830}; 944};
831 945
832struct perf_output_handle { 946struct perf_output_handle {
@@ -842,26 +956,20 @@ struct perf_output_handle {
842 956
843#ifdef CONFIG_PERF_EVENTS 957#ifdef CONFIG_PERF_EVENTS
844 958
845/* 959extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
846 * Set by architecture code: 960extern void perf_pmu_unregister(struct pmu *pmu);
847 */
848extern int perf_max_events;
849 961
850extern const struct pmu *hw_perf_event_init(struct perf_event *event); 962extern int perf_num_counters(void);
851 963extern const char *perf_pmu_name(void);
852extern void perf_event_task_sched_in(struct task_struct *task); 964extern void __perf_event_task_sched_in(struct task_struct *task);
853extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 965extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
854extern void perf_event_task_tick(struct task_struct *task);
855extern int perf_event_init_task(struct task_struct *child); 966extern int perf_event_init_task(struct task_struct *child);
856extern void perf_event_exit_task(struct task_struct *child); 967extern void perf_event_exit_task(struct task_struct *child);
857extern void perf_event_free_task(struct task_struct *task); 968extern void perf_event_free_task(struct task_struct *task);
858extern void set_perf_event_pending(void); 969extern void perf_event_delayed_put(struct task_struct *task);
859extern void perf_event_do_pending(void);
860extern void perf_event_print_debug(void); 970extern void perf_event_print_debug(void);
861extern void __perf_disable(void); 971extern void perf_pmu_disable(struct pmu *pmu);
862extern bool __perf_enable(void); 972extern void perf_pmu_enable(struct pmu *pmu);
863extern void perf_disable(void);
864extern void perf_enable(void);
865extern int perf_event_task_disable(void); 973extern int perf_event_task_disable(void);
866extern int perf_event_task_enable(void); 974extern int perf_event_task_enable(void);
867extern void perf_event_update_userpage(struct perf_event *event); 975extern void perf_event_update_userpage(struct perf_event *event);
@@ -869,7 +977,7 @@ extern int perf_event_release_kernel(struct perf_event *event);
869extern struct perf_event * 977extern struct perf_event *
870perf_event_create_kernel_counter(struct perf_event_attr *attr, 978perf_event_create_kernel_counter(struct perf_event_attr *attr,
871 int cpu, 979 int cpu,
872 pid_t pid, 980 struct task_struct *task,
873 perf_overflow_handler_t callback); 981 perf_overflow_handler_t callback);
874extern u64 perf_event_read_value(struct perf_event *event, 982extern u64 perf_event_read_value(struct perf_event *event,
875 u64 *enabled, u64 *running); 983 u64 *enabled, u64 *running);
@@ -895,8 +1003,7 @@ struct perf_sample_data {
895 struct perf_raw_record *raw; 1003 struct perf_raw_record *raw;
896}; 1004};
897 1005
898static inline 1006static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
899void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
900{ 1007{
901 data->addr = addr; 1008 data->addr = addr;
902 data->raw = NULL; 1009 data->raw = NULL;
@@ -915,28 +1022,25 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
915 struct perf_sample_data *data, 1022 struct perf_sample_data *data,
916 struct pt_regs *regs); 1023 struct pt_regs *regs);
917 1024
1025static inline bool is_sampling_event(struct perf_event *event)
1026{
1027 return event->attr.sample_period != 0;
1028}
1029
918/* 1030/*
919 * Return 1 for a software event, 0 for a hardware event 1031 * Return 1 for a software event, 0 for a hardware event
920 */ 1032 */
921static inline int is_software_event(struct perf_event *event) 1033static inline int is_software_event(struct perf_event *event)
922{ 1034{
923 switch (event->attr.type) { 1035 return event->pmu->task_ctx_nr == perf_sw_context;
924 case PERF_TYPE_SOFTWARE:
925 case PERF_TYPE_TRACEPOINT:
926 /* for now the breakpoint stuff also works as software event */
927 case PERF_TYPE_BREAKPOINT:
928 return 1;
929 }
930 return 0;
931} 1036}
932 1037
933extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1038extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
934 1039
935extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 1040extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
936 1041
937#ifndef perf_arch_fetch_caller_regs 1042#ifndef perf_arch_fetch_caller_regs
938static inline void 1043static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
939perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
940#endif 1044#endif
941 1045
942/* 1046/*
@@ -954,12 +1058,12 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
954 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); 1058 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
955} 1059}
956 1060
957static inline void 1061static __always_inline void
958perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 1062perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
959{ 1063{
960 if (atomic_read(&perf_swevent_enabled[event_id])) { 1064 struct pt_regs hot_regs;
961 struct pt_regs hot_regs;
962 1065
1066 if (static_branch(&perf_swevent_enabled[event_id])) {
963 if (!regs) { 1067 if (!regs) {
964 perf_fetch_caller_regs(&hot_regs); 1068 perf_fetch_caller_regs(&hot_regs);
965 regs = &hot_regs; 1069 regs = &hot_regs;
@@ -968,6 +1072,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
968 } 1072 }
969} 1073}
970 1074
1075extern struct jump_label_key perf_sched_events;
1076
1077static inline void perf_event_task_sched_in(struct task_struct *task)
1078{
1079 if (static_branch(&perf_sched_events))
1080 __perf_event_task_sched_in(task);
1081}
1082
1083static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1084{
1085 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1086
1087 __perf_event_task_sched_out(task, next);
1088}
1089
971extern void perf_event_mmap(struct vm_area_struct *vma); 1090extern void perf_event_mmap(struct vm_area_struct *vma);
972extern struct perf_guest_info_callbacks *perf_guest_cbs; 1091extern struct perf_guest_info_callbacks *perf_guest_cbs;
973extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1092extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -976,12 +1095,26 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
976extern void perf_event_comm(struct task_struct *tsk); 1095extern void perf_event_comm(struct task_struct *tsk);
977extern void perf_event_fork(struct task_struct *tsk); 1096extern void perf_event_fork(struct task_struct *tsk);
978 1097
979extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 1098/* Callchains */
1099DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1100
1101extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1102extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1103
1104static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1105{
1106 if (entry->nr < PERF_MAX_STACK_DEPTH)
1107 entry->ip[entry->nr++] = ip;
1108}
980 1109
981extern int sysctl_perf_event_paranoid; 1110extern int sysctl_perf_event_paranoid;
982extern int sysctl_perf_event_mlock; 1111extern int sysctl_perf_event_mlock;
983extern int sysctl_perf_event_sample_rate; 1112extern int sysctl_perf_event_sample_rate;
984 1113
1114extern int perf_proc_update_handler(struct ctl_table *table, int write,
1115 void __user *buffer, size_t *lenp,
1116 loff_t *ppos);
1117
985static inline bool perf_paranoid_tracepoint_raw(void) 1118static inline bool perf_paranoid_tracepoint_raw(void)
986{ 1119{
987 return sysctl_perf_event_paranoid > -1; 1120 return sysctl_perf_event_paranoid > -1;
@@ -1004,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record,
1004extern void perf_bp_event(struct perf_event *event, void *data); 1137extern void perf_bp_event(struct perf_event *event, void *data);
1005 1138
1006#ifndef perf_misc_flags 1139#ifndef perf_misc_flags
1007#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ 1140# define perf_misc_flags(regs) \
1008 PERF_RECORD_MISC_KERNEL) 1141 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1009#define perf_instruction_pointer(regs) instruction_pointer(regs) 1142# define perf_instruction_pointer(regs) instruction_pointer(regs)
1010#endif 1143#endif
1011 1144
1012extern int perf_output_begin(struct perf_output_handle *handle, 1145extern int perf_output_begin(struct perf_output_handle *handle,
@@ -1019,21 +1152,18 @@ extern int perf_swevent_get_recursion_context(void);
1019extern void perf_swevent_put_recursion_context(int rctx); 1152extern void perf_swevent_put_recursion_context(int rctx);
1020extern void perf_event_enable(struct perf_event *event); 1153extern void perf_event_enable(struct perf_event *event);
1021extern void perf_event_disable(struct perf_event *event); 1154extern void perf_event_disable(struct perf_event *event);
1155extern void perf_event_task_tick(void);
1022#else 1156#else
1023static inline void 1157static inline void
1024perf_event_task_sched_in(struct task_struct *task) { } 1158perf_event_task_sched_in(struct task_struct *task) { }
1025static inline void 1159static inline void
1026perf_event_task_sched_out(struct task_struct *task, 1160perf_event_task_sched_out(struct task_struct *task,
1027 struct task_struct *next) { } 1161 struct task_struct *next) { }
1028static inline void
1029perf_event_task_tick(struct task_struct *task) { }
1030static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1162static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1031static inline void perf_event_exit_task(struct task_struct *child) { } 1163static inline void perf_event_exit_task(struct task_struct *child) { }
1032static inline void perf_event_free_task(struct task_struct *task) { } 1164static inline void perf_event_free_task(struct task_struct *task) { }
1033static inline void perf_event_do_pending(void) { } 1165static inline void perf_event_delayed_put(struct task_struct *task) { }
1034static inline void perf_event_print_debug(void) { } 1166static inline void perf_event_print_debug(void) { }
1035static inline void perf_disable(void) { }
1036static inline void perf_enable(void) { }
1037static inline int perf_event_task_disable(void) { return -EINVAL; } 1167static inline int perf_event_task_disable(void) { return -EINVAL; }
1038static inline int perf_event_task_enable(void) { return -EINVAL; } 1168static inline int perf_event_task_enable(void) { return -EINVAL; }
1039 1169
@@ -1044,9 +1174,9 @@ static inline void
1044perf_bp_event(struct perf_event *event, void *data) { } 1174perf_bp_event(struct perf_event *event, void *data) { }
1045 1175
1046static inline int perf_register_guest_info_callbacks 1176static inline int perf_register_guest_info_callbacks
1047(struct perf_guest_info_callbacks *callbacks) { return 0; } 1177(struct perf_guest_info_callbacks *callbacks) { return 0; }
1048static inline int perf_unregister_guest_info_callbacks 1178static inline int perf_unregister_guest_info_callbacks
1049(struct perf_guest_info_callbacks *callbacks) { return 0; } 1179(struct perf_guest_info_callbacks *callbacks) { return 0; }
1050 1180
1051static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1181static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1052static inline void perf_event_comm(struct task_struct *tsk) { } 1182static inline void perf_event_comm(struct task_struct *tsk) { }
@@ -1056,25 +1186,25 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; }
1056static inline void perf_swevent_put_recursion_context(int rctx) { } 1186static inline void perf_swevent_put_recursion_context(int rctx) { }
1057static inline void perf_event_enable(struct perf_event *event) { } 1187static inline void perf_event_enable(struct perf_event *event) { }
1058static inline void perf_event_disable(struct perf_event *event) { } 1188static inline void perf_event_disable(struct perf_event *event) { }
1189static inline void perf_event_task_tick(void) { }
1059#endif 1190#endif
1060 1191
1061#define perf_output_put(handle, x) \ 1192#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1062 perf_output_copy((handle), &(x), sizeof(x))
1063 1193
1064/* 1194/*
1065 * This has to have a higher priority than migration_notifier in sched.c. 1195 * This has to have a higher priority than migration_notifier in sched.c.
1066 */ 1196 */
1067#define perf_cpu_notifier(fn) \ 1197#define perf_cpu_notifier(fn) \
1068do { \ 1198do { \
1069 static struct notifier_block fn##_nb __cpuinitdata = \ 1199 static struct notifier_block fn##_nb __cpuinitdata = \
1070 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ 1200 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1071 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 1201 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1072 (void *)(unsigned long)smp_processor_id()); \ 1202 (void *)(unsigned long)smp_processor_id()); \
1073 fn(&fn##_nb, (unsigned long)CPU_STARTING, \ 1203 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1074 (void *)(unsigned long)smp_processor_id()); \ 1204 (void *)(unsigned long)smp_processor_id()); \
1075 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ 1205 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1076 (void *)(unsigned long)smp_processor_id()); \ 1206 (void *)(unsigned long)smp_processor_id()); \
1077 register_cpu_notifier(&fn##_nb); \ 1207 register_cpu_notifier(&fn##_nb); \
1078} while (0) 1208} while (0)
1079 1209
1080#endif /* __KERNEL__ */ 1210#endif /* __KERNEL__ */