diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/perf_event.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 352 |
1 files changed, 241 insertions, 111 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 716f99b682c1..e0786e35f247 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -2,8 +2,8 @@ | |||
2 | * Performance events: | 2 | * Performance events: |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
7 | * | 7 | * |
8 | * Data type definitions, declarations, prototypes. | 8 | * Data type definitions, declarations, prototypes. |
9 | * | 9 | * |
@@ -52,6 +52,8 @@ enum perf_hw_id { | |||
52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | 52 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, |
53 | PERF_COUNT_HW_BRANCH_MISSES = 5, | 53 | PERF_COUNT_HW_BRANCH_MISSES = 5, |
54 | PERF_COUNT_HW_BUS_CYCLES = 6, | 54 | PERF_COUNT_HW_BUS_CYCLES = 6, |
55 | PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, | ||
56 | PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, | ||
55 | 57 | ||
56 | PERF_COUNT_HW_MAX, /* non-ABI */ | 58 | PERF_COUNT_HW_MAX, /* non-ABI */ |
57 | }; | 59 | }; |
@@ -135,14 +137,14 @@ enum perf_event_sample_format { | |||
135 | * | 137 | * |
136 | * struct read_format { | 138 | * struct read_format { |
137 | * { u64 value; | 139 | * { u64 value; |
138 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 140 | * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED |
139 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 141 | * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING |
140 | * { u64 id; } && PERF_FORMAT_ID | 142 | * { u64 id; } && PERF_FORMAT_ID |
141 | * } && !PERF_FORMAT_GROUP | 143 | * } && !PERF_FORMAT_GROUP |
142 | * | 144 | * |
143 | * { u64 nr; | 145 | * { u64 nr; |
144 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | 146 | * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED |
145 | * { u64 time_running; } && PERF_FORMAT_RUNNING | 147 | * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING |
146 | * { u64 value; | 148 | * { u64 value; |
147 | * { u64 id; } && PERF_FORMAT_ID | 149 | * { u64 id; } && PERF_FORMAT_ID |
148 | * } cntr[nr]; | 150 | * } cntr[nr]; |
@@ -215,8 +217,9 @@ struct perf_event_attr { | |||
215 | */ | 217 | */ |
216 | precise_ip : 2, /* skid constraint */ | 218 | precise_ip : 2, /* skid constraint */ |
217 | mmap_data : 1, /* non-exec mmap data */ | 219 | mmap_data : 1, /* non-exec mmap data */ |
220 | sample_id_all : 1, /* sample_type all events */ | ||
218 | 221 | ||
219 | __reserved_1 : 46; | 222 | __reserved_1 : 45; |
220 | 223 | ||
221 | union { | 224 | union { |
222 | __u32 wakeup_events; /* wakeup every n events */ | 225 | __u32 wakeup_events; /* wakeup every n events */ |
@@ -224,8 +227,14 @@ struct perf_event_attr { | |||
224 | }; | 227 | }; |
225 | 228 | ||
226 | __u32 bp_type; | 229 | __u32 bp_type; |
227 | __u64 bp_addr; | 230 | union { |
228 | __u64 bp_len; | 231 | __u64 bp_addr; |
232 | __u64 config1; /* extension of config */ | ||
233 | }; | ||
234 | union { | ||
235 | __u64 bp_len; | ||
236 | __u64 config2; /* extension of config1 */ | ||
237 | }; | ||
229 | }; | 238 | }; |
230 | 239 | ||
231 | /* | 240 | /* |
@@ -327,6 +336,15 @@ struct perf_event_header { | |||
327 | enum perf_event_type { | 336 | enum perf_event_type { |
328 | 337 | ||
329 | /* | 338 | /* |
339 | * If perf_event_attr.sample_id_all is set then all event types will | ||
340 | * have the sample_type selected fields related to where/when | ||
341 | * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID) | ||
342 | * described in PERF_RECORD_SAMPLE below, it will be stashed just after | ||
343 | * the perf_event_header and the fields already present for the existing | ||
344 | * fields, i.e. at the end of the payload. That way a newer perf.data | ||
345 | * file will be supported by older perf tools, with these new optional | ||
346 | * fields being ignored. | ||
347 | * | ||
330 | * The MMAP events record the PROT_EXEC mappings so that we can | 348 | * The MMAP events record the PROT_EXEC mappings so that we can |
331 | * correlate userspace IPs to code. They have the following structure: | 349 | * correlate userspace IPs to code. They have the following structure: |
332 | * | 350 | * |
@@ -452,8 +470,9 @@ enum perf_callchain_context { | |||
452 | PERF_CONTEXT_MAX = (__u64)-4095, | 470 | PERF_CONTEXT_MAX = (__u64)-4095, |
453 | }; | 471 | }; |
454 | 472 | ||
455 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | 473 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) |
456 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | 474 | #define PERF_FLAG_FD_OUTPUT (1U << 1) |
475 | #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ | ||
457 | 476 | ||
458 | #ifdef __KERNEL__ | 477 | #ifdef __KERNEL__ |
459 | /* | 478 | /* |
@@ -461,14 +480,15 @@ enum perf_callchain_context { | |||
461 | */ | 480 | */ |
462 | 481 | ||
463 | #ifdef CONFIG_PERF_EVENTS | 482 | #ifdef CONFIG_PERF_EVENTS |
483 | # include <linux/cgroup.h> | ||
464 | # include <asm/perf_event.h> | 484 | # include <asm/perf_event.h> |
465 | # include <asm/local64.h> | 485 | # include <asm/local64.h> |
466 | #endif | 486 | #endif |
467 | 487 | ||
468 | struct perf_guest_info_callbacks { | 488 | struct perf_guest_info_callbacks { |
469 | int (*is_in_guest) (void); | 489 | int (*is_in_guest)(void); |
470 | int (*is_user_mode) (void); | 490 | int (*is_user_mode)(void); |
471 | unsigned long (*get_guest_ip) (void); | 491 | unsigned long (*get_guest_ip)(void); |
472 | }; | 492 | }; |
473 | 493 | ||
474 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 494 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
@@ -486,6 +506,8 @@ struct perf_guest_info_callbacks { | |||
486 | #include <linux/workqueue.h> | 506 | #include <linux/workqueue.h> |
487 | #include <linux/ftrace.h> | 507 | #include <linux/ftrace.h> |
488 | #include <linux/cpu.h> | 508 | #include <linux/cpu.h> |
509 | #include <linux/irq_work.h> | ||
510 | #include <linux/jump_label.h> | ||
489 | #include <asm/atomic.h> | 511 | #include <asm/atomic.h> |
490 | #include <asm/local.h> | 512 | #include <asm/local.h> |
491 | 513 | ||
@@ -527,18 +549,27 @@ struct hw_perf_event { | |||
527 | unsigned long event_base; | 549 | unsigned long event_base; |
528 | int idx; | 550 | int idx; |
529 | int last_cpu; | 551 | int last_cpu; |
552 | unsigned int extra_reg; | ||
553 | u64 extra_config; | ||
554 | int extra_alloc; | ||
530 | }; | 555 | }; |
531 | struct { /* software */ | 556 | struct { /* software */ |
532 | s64 remaining; | ||
533 | struct hrtimer hrtimer; | 557 | struct hrtimer hrtimer; |
534 | }; | 558 | }; |
535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 559 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
536 | struct { /* breakpoint */ | 560 | struct { /* breakpoint */ |
537 | struct arch_hw_breakpoint info; | 561 | struct arch_hw_breakpoint info; |
538 | struct list_head bp_list; | 562 | struct list_head bp_list; |
563 | /* | ||
564 | * Crufty hack to avoid the chicken and egg | ||
565 | * problem hw_breakpoint has with context | ||
566 | * creation and event initalization. | ||
567 | */ | ||
568 | struct task_struct *bp_target; | ||
539 | }; | 569 | }; |
540 | #endif | 570 | #endif |
541 | }; | 571 | }; |
572 | int state; | ||
542 | local64_t prev_count; | 573 | local64_t prev_count; |
543 | u64 sample_period; | 574 | u64 sample_period; |
544 | u64 last_period; | 575 | u64 last_period; |
@@ -550,6 +581,13 @@ struct hw_perf_event { | |||
550 | #endif | 581 | #endif |
551 | }; | 582 | }; |
552 | 583 | ||
584 | /* | ||
585 | * hw_perf_event::state flags | ||
586 | */ | ||
587 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
588 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
589 | #define PERF_HES_ARCH 0x04 | ||
590 | |||
553 | struct perf_event; | 591 | struct perf_event; |
554 | 592 | ||
555 | /* | 593 | /* |
@@ -561,36 +599,74 @@ struct perf_event; | |||
561 | * struct pmu - generic performance monitoring unit | 599 | * struct pmu - generic performance monitoring unit |
562 | */ | 600 | */ |
563 | struct pmu { | 601 | struct pmu { |
564 | int (*enable) (struct perf_event *event); | 602 | struct list_head entry; |
565 | void (*disable) (struct perf_event *event); | 603 | |
566 | int (*start) (struct perf_event *event); | 604 | struct device *dev; |
567 | void (*stop) (struct perf_event *event); | 605 | char *name; |
568 | void (*read) (struct perf_event *event); | 606 | int type; |
569 | void (*unthrottle) (struct perf_event *event); | 607 | |
608 | int * __percpu pmu_disable_count; | ||
609 | struct perf_cpu_context * __percpu pmu_cpu_context; | ||
610 | int task_ctx_nr; | ||
611 | |||
612 | /* | ||
613 | * Fully disable/enable this PMU, can be used to protect from the PMI | ||
614 | * as well as for lazy/batch writing of the MSRs. | ||
615 | */ | ||
616 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | ||
617 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | ||
618 | |||
619 | /* | ||
620 | * Try and initialize the event for this PMU. | ||
621 | * Should return -ENOENT when the @event doesn't match this PMU. | ||
622 | */ | ||
623 | int (*event_init) (struct perf_event *event); | ||
624 | |||
625 | #define PERF_EF_START 0x01 /* start the counter when adding */ | ||
626 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | ||
627 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | ||
628 | |||
629 | /* | ||
630 | * Adds/Removes a counter to/from the PMU, can be done inside | ||
631 | * a transaction, see the ->*_txn() methods. | ||
632 | */ | ||
633 | int (*add) (struct perf_event *event, int flags); | ||
634 | void (*del) (struct perf_event *event, int flags); | ||
635 | |||
636 | /* | ||
637 | * Starts/Stops a counter present on the PMU. The PMI handler | ||
638 | * should stop the counter when perf_event_overflow() returns | ||
639 | * !0. ->start() will be used to continue. | ||
640 | */ | ||
641 | void (*start) (struct perf_event *event, int flags); | ||
642 | void (*stop) (struct perf_event *event, int flags); | ||
570 | 643 | ||
571 | /* | 644 | /* |
572 | * Group events scheduling is treated as a transaction, add group | 645 | * Updates the counter value of the event. |
573 | * events as a whole and perform one schedulability test. If the test | ||
574 | * fails, roll back the whole group | ||
575 | */ | 646 | */ |
647 | void (*read) (struct perf_event *event); | ||
576 | 648 | ||
577 | /* | 649 | /* |
578 | * Start the transaction, after this ->enable() doesn't need | 650 | * Group events scheduling is treated as a transaction, add |
579 | * to do schedulability tests. | 651 | * group events as a whole and perform one schedulability test. |
652 | * If the test fails, roll back the whole group | ||
653 | * | ||
654 | * Start the transaction, after this ->add() doesn't need to | ||
655 | * do schedulability tests. | ||
580 | */ | 656 | */ |
581 | void (*start_txn) (const struct pmu *pmu); | 657 | void (*start_txn) (struct pmu *pmu); /* optional */ |
582 | /* | 658 | /* |
583 | * If ->start_txn() disabled the ->enable() schedulability test | 659 | * If ->start_txn() disabled the ->add() schedulability test |
584 | * then ->commit_txn() is required to perform one. On success | 660 | * then ->commit_txn() is required to perform one. On success |
585 | * the transaction is closed. On error the transaction is kept | 661 | * the transaction is closed. On error the transaction is kept |
586 | * open until ->cancel_txn() is called. | 662 | * open until ->cancel_txn() is called. |
587 | */ | 663 | */ |
588 | int (*commit_txn) (const struct pmu *pmu); | 664 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
589 | /* | 665 | /* |
590 | * Will cancel the transaction, assumes ->disable() is called for | 666 | * Will cancel the transaction, assumes ->del() is called |
591 | * each successfull ->enable() during the transaction. | 667 | * for each successful ->add() during the transaction. |
592 | */ | 668 | */ |
593 | void (*cancel_txn) (const struct pmu *pmu); | 669 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
594 | }; | 670 | }; |
595 | 671 | ||
596 | /** | 672 | /** |
@@ -631,11 +707,6 @@ struct perf_buffer { | |||
631 | void *data_pages[0]; | 707 | void *data_pages[0]; |
632 | }; | 708 | }; |
633 | 709 | ||
634 | struct perf_pending_entry { | ||
635 | struct perf_pending_entry *next; | ||
636 | void (*func)(struct perf_pending_entry *); | ||
637 | }; | ||
638 | |||
639 | struct perf_sample_data; | 710 | struct perf_sample_data; |
640 | 711 | ||
641 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 712 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, |
@@ -643,19 +714,36 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |||
643 | struct pt_regs *regs); | 714 | struct pt_regs *regs); |
644 | 715 | ||
645 | enum perf_group_flag { | 716 | enum perf_group_flag { |
646 | PERF_GROUP_SOFTWARE = 0x1, | 717 | PERF_GROUP_SOFTWARE = 0x1, |
647 | }; | 718 | }; |
648 | 719 | ||
649 | #define SWEVENT_HLIST_BITS 8 | 720 | #define SWEVENT_HLIST_BITS 8 |
650 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | 721 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
651 | 722 | ||
652 | struct swevent_hlist { | 723 | struct swevent_hlist { |
653 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | 724 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
654 | struct rcu_head rcu_head; | 725 | struct rcu_head rcu_head; |
655 | }; | 726 | }; |
656 | 727 | ||
657 | #define PERF_ATTACH_CONTEXT 0x01 | 728 | #define PERF_ATTACH_CONTEXT 0x01 |
658 | #define PERF_ATTACH_GROUP 0x02 | 729 | #define PERF_ATTACH_GROUP 0x02 |
730 | #define PERF_ATTACH_TASK 0x04 | ||
731 | |||
732 | #ifdef CONFIG_CGROUP_PERF | ||
733 | /* | ||
734 | * perf_cgroup_info keeps track of time_enabled for a cgroup. | ||
735 | * This is a per-cpu dynamically allocated data structure. | ||
736 | */ | ||
737 | struct perf_cgroup_info { | ||
738 | u64 time; | ||
739 | u64 timestamp; | ||
740 | }; | ||
741 | |||
742 | struct perf_cgroup { | ||
743 | struct cgroup_subsys_state css; | ||
744 | struct perf_cgroup_info *info; /* timing info, one per cpu */ | ||
745 | }; | ||
746 | #endif | ||
659 | 747 | ||
660 | /** | 748 | /** |
661 | * struct perf_event - performance event kernel representation: | 749 | * struct perf_event - performance event kernel representation: |
@@ -669,7 +757,7 @@ struct perf_event { | |||
669 | int nr_siblings; | 757 | int nr_siblings; |
670 | int group_flags; | 758 | int group_flags; |
671 | struct perf_event *group_leader; | 759 | struct perf_event *group_leader; |
672 | const struct pmu *pmu; | 760 | struct pmu *pmu; |
673 | 761 | ||
674 | enum perf_event_active_state state; | 762 | enum perf_event_active_state state; |
675 | unsigned int attach_state; | 763 | unsigned int attach_state; |
@@ -702,7 +790,20 @@ struct perf_event { | |||
702 | u64 tstamp_running; | 790 | u64 tstamp_running; |
703 | u64 tstamp_stopped; | 791 | u64 tstamp_stopped; |
704 | 792 | ||
793 | /* | ||
794 | * timestamp shadows the actual context timing but it can | ||
795 | * be safely used in NMI interrupt context. It reflects the | ||
796 | * context time as it was when the event was last scheduled in. | ||
797 | * | ||
798 | * ctx_time already accounts for ctx->timestamp. Therefore to | ||
799 | * compute ctx_time for a sample, simply add perf_clock(). | ||
800 | */ | ||
801 | u64 shadow_ctx_time; | ||
802 | |||
705 | struct perf_event_attr attr; | 803 | struct perf_event_attr attr; |
804 | u16 header_size; | ||
805 | u16 id_header_size; | ||
806 | u16 read_size; | ||
706 | struct hw_perf_event hw; | 807 | struct hw_perf_event hw; |
707 | 808 | ||
708 | struct perf_event_context *ctx; | 809 | struct perf_event_context *ctx; |
@@ -743,7 +844,7 @@ struct perf_event { | |||
743 | int pending_wakeup; | 844 | int pending_wakeup; |
744 | int pending_kill; | 845 | int pending_kill; |
745 | int pending_disable; | 846 | int pending_disable; |
746 | struct perf_pending_entry pending; | 847 | struct irq_work pending; |
747 | 848 | ||
748 | atomic_t event_limit; | 849 | atomic_t event_limit; |
749 | 850 | ||
@@ -760,15 +861,27 @@ struct perf_event { | |||
760 | struct event_filter *filter; | 861 | struct event_filter *filter; |
761 | #endif | 862 | #endif |
762 | 863 | ||
864 | #ifdef CONFIG_CGROUP_PERF | ||
865 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ | ||
866 | int cgrp_defer_enabled; | ||
867 | #endif | ||
868 | |||
763 | #endif /* CONFIG_PERF_EVENTS */ | 869 | #endif /* CONFIG_PERF_EVENTS */ |
764 | }; | 870 | }; |
765 | 871 | ||
872 | enum perf_event_context_type { | ||
873 | task_context, | ||
874 | cpu_context, | ||
875 | }; | ||
876 | |||
766 | /** | 877 | /** |
767 | * struct perf_event_context - event context structure | 878 | * struct perf_event_context - event context structure |
768 | * | 879 | * |
769 | * Used as a container for task events and CPU events as well: | 880 | * Used as a container for task events and CPU events as well: |
770 | */ | 881 | */ |
771 | struct perf_event_context { | 882 | struct perf_event_context { |
883 | struct pmu *pmu; | ||
884 | enum perf_event_context_type type; | ||
772 | /* | 885 | /* |
773 | * Protect the states of the events in the list, | 886 | * Protect the states of the events in the list, |
774 | * nr_active, and the list: | 887 | * nr_active, and the list: |
@@ -788,6 +901,7 @@ struct perf_event_context { | |||
788 | int nr_active; | 901 | int nr_active; |
789 | int is_active; | 902 | int is_active; |
790 | int nr_stat; | 903 | int nr_stat; |
904 | int rotate_disable; | ||
791 | atomic_t refcount; | 905 | atomic_t refcount; |
792 | struct task_struct *task; | 906 | struct task_struct *task; |
793 | 907 | ||
@@ -806,8 +920,15 @@ struct perf_event_context { | |||
806 | u64 generation; | 920 | u64 generation; |
807 | int pin_count; | 921 | int pin_count; |
808 | struct rcu_head rcu_head; | 922 | struct rcu_head rcu_head; |
923 | int nr_cgroups; /* cgroup events present */ | ||
809 | }; | 924 | }; |
810 | 925 | ||
926 | /* | ||
927 | * Number of contexts where an event can trigger: | ||
928 | * task, softirq, hardirq, nmi. | ||
929 | */ | ||
930 | #define PERF_NR_CONTEXTS 4 | ||
931 | |||
811 | /** | 932 | /** |
812 | * struct perf_event_cpu_context - per cpu event context structure | 933 | * struct perf_event_cpu_context - per cpu event context structure |
813 | */ | 934 | */ |
@@ -815,18 +936,11 @@ struct perf_cpu_context { | |||
815 | struct perf_event_context ctx; | 936 | struct perf_event_context ctx; |
816 | struct perf_event_context *task_ctx; | 937 | struct perf_event_context *task_ctx; |
817 | int active_oncpu; | 938 | int active_oncpu; |
818 | int max_pertask; | ||
819 | int exclusive; | 939 | int exclusive; |
820 | struct swevent_hlist *swevent_hlist; | 940 | struct list_head rotation_list; |
821 | struct mutex hlist_mutex; | 941 | int jiffies_interval; |
822 | int hlist_refcount; | 942 | struct pmu *active_pmu; |
823 | 943 | struct perf_cgroup *cgrp; | |
824 | /* | ||
825 | * Recursion avoidance: | ||
826 | * | ||
827 | * task, softirq, irq, nmi context | ||
828 | */ | ||
829 | int recursion[4]; | ||
830 | }; | 944 | }; |
831 | 945 | ||
832 | struct perf_output_handle { | 946 | struct perf_output_handle { |
@@ -842,26 +956,20 @@ struct perf_output_handle { | |||
842 | 956 | ||
843 | #ifdef CONFIG_PERF_EVENTS | 957 | #ifdef CONFIG_PERF_EVENTS |
844 | 958 | ||
845 | /* | 959 | extern int perf_pmu_register(struct pmu *pmu, char *name, int type); |
846 | * Set by architecture code: | 960 | extern void perf_pmu_unregister(struct pmu *pmu); |
847 | */ | ||
848 | extern int perf_max_events; | ||
849 | 961 | ||
850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 962 | extern int perf_num_counters(void); |
851 | 963 | extern const char *perf_pmu_name(void); | |
852 | extern void perf_event_task_sched_in(struct task_struct *task); | 964 | extern void __perf_event_task_sched_in(struct task_struct *task); |
853 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 965 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
854 | extern void perf_event_task_tick(struct task_struct *task); | ||
855 | extern int perf_event_init_task(struct task_struct *child); | 966 | extern int perf_event_init_task(struct task_struct *child); |
856 | extern void perf_event_exit_task(struct task_struct *child); | 967 | extern void perf_event_exit_task(struct task_struct *child); |
857 | extern void perf_event_free_task(struct task_struct *task); | 968 | extern void perf_event_free_task(struct task_struct *task); |
858 | extern void set_perf_event_pending(void); | 969 | extern void perf_event_delayed_put(struct task_struct *task); |
859 | extern void perf_event_do_pending(void); | ||
860 | extern void perf_event_print_debug(void); | 970 | extern void perf_event_print_debug(void); |
861 | extern void __perf_disable(void); | 971 | extern void perf_pmu_disable(struct pmu *pmu); |
862 | extern bool __perf_enable(void); | 972 | extern void perf_pmu_enable(struct pmu *pmu); |
863 | extern void perf_disable(void); | ||
864 | extern void perf_enable(void); | ||
865 | extern int perf_event_task_disable(void); | 973 | extern int perf_event_task_disable(void); |
866 | extern int perf_event_task_enable(void); | 974 | extern int perf_event_task_enable(void); |
867 | extern void perf_event_update_userpage(struct perf_event *event); | 975 | extern void perf_event_update_userpage(struct perf_event *event); |
@@ -869,7 +977,7 @@ extern int perf_event_release_kernel(struct perf_event *event); | |||
869 | extern struct perf_event * | 977 | extern struct perf_event * |
870 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 978 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
871 | int cpu, | 979 | int cpu, |
872 | pid_t pid, | 980 | struct task_struct *task, |
873 | perf_overflow_handler_t callback); | 981 | perf_overflow_handler_t callback); |
874 | extern u64 perf_event_read_value(struct perf_event *event, | 982 | extern u64 perf_event_read_value(struct perf_event *event, |
875 | u64 *enabled, u64 *running); | 983 | u64 *enabled, u64 *running); |
@@ -895,8 +1003,7 @@ struct perf_sample_data { | |||
895 | struct perf_raw_record *raw; | 1003 | struct perf_raw_record *raw; |
896 | }; | 1004 | }; |
897 | 1005 | ||
898 | static inline | 1006 | static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) |
899 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
900 | { | 1007 | { |
901 | data->addr = addr; | 1008 | data->addr = addr; |
902 | data->raw = NULL; | 1009 | data->raw = NULL; |
@@ -915,28 +1022,25 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
915 | struct perf_sample_data *data, | 1022 | struct perf_sample_data *data, |
916 | struct pt_regs *regs); | 1023 | struct pt_regs *regs); |
917 | 1024 | ||
1025 | static inline bool is_sampling_event(struct perf_event *event) | ||
1026 | { | ||
1027 | return event->attr.sample_period != 0; | ||
1028 | } | ||
1029 | |||
918 | /* | 1030 | /* |
919 | * Return 1 for a software event, 0 for a hardware event | 1031 | * Return 1 for a software event, 0 for a hardware event |
920 | */ | 1032 | */ |
921 | static inline int is_software_event(struct perf_event *event) | 1033 | static inline int is_software_event(struct perf_event *event) |
922 | { | 1034 | { |
923 | switch (event->attr.type) { | 1035 | return event->pmu->task_ctx_nr == perf_sw_context; |
924 | case PERF_TYPE_SOFTWARE: | ||
925 | case PERF_TYPE_TRACEPOINT: | ||
926 | /* for now the breakpoint stuff also works as software event */ | ||
927 | case PERF_TYPE_BREAKPOINT: | ||
928 | return 1; | ||
929 | } | ||
930 | return 0; | ||
931 | } | 1036 | } |
932 | 1037 | ||
933 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1038 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
934 | 1039 | ||
935 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 1040 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
936 | 1041 | ||
937 | #ifndef perf_arch_fetch_caller_regs | 1042 | #ifndef perf_arch_fetch_caller_regs |
938 | static inline void | 1043 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
939 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | ||
940 | #endif | 1044 | #endif |
941 | 1045 | ||
942 | /* | 1046 | /* |
@@ -954,12 +1058,12 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
954 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | 1058 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
955 | } | 1059 | } |
956 | 1060 | ||
957 | static inline void | 1061 | static __always_inline void |
958 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 1062 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
959 | { | 1063 | { |
960 | if (atomic_read(&perf_swevent_enabled[event_id])) { | 1064 | struct pt_regs hot_regs; |
961 | struct pt_regs hot_regs; | ||
962 | 1065 | ||
1066 | if (static_branch(&perf_swevent_enabled[event_id])) { | ||
963 | if (!regs) { | 1067 | if (!regs) { |
964 | perf_fetch_caller_regs(&hot_regs); | 1068 | perf_fetch_caller_regs(&hot_regs); |
965 | regs = &hot_regs; | 1069 | regs = &hot_regs; |
@@ -968,6 +1072,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
968 | } | 1072 | } |
969 | } | 1073 | } |
970 | 1074 | ||
1075 | extern struct jump_label_key perf_sched_events; | ||
1076 | |||
1077 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
1078 | { | ||
1079 | if (static_branch(&perf_sched_events)) | ||
1080 | __perf_event_task_sched_in(task); | ||
1081 | } | ||
1082 | |||
1083 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
1084 | { | ||
1085 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | ||
1086 | |||
1087 | __perf_event_task_sched_out(task, next); | ||
1088 | } | ||
1089 | |||
971 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1090 | extern void perf_event_mmap(struct vm_area_struct *vma); |
972 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 1091 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
973 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 1092 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
@@ -976,12 +1095,26 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks | |||
976 | extern void perf_event_comm(struct task_struct *tsk); | 1095 | extern void perf_event_comm(struct task_struct *tsk); |
977 | extern void perf_event_fork(struct task_struct *tsk); | 1096 | extern void perf_event_fork(struct task_struct *tsk); |
978 | 1097 | ||
979 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 1098 | /* Callchains */ |
1099 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | ||
1100 | |||
1101 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); | ||
1102 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); | ||
1103 | |||
1104 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1105 | { | ||
1106 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1107 | entry->ip[entry->nr++] = ip; | ||
1108 | } | ||
980 | 1109 | ||
981 | extern int sysctl_perf_event_paranoid; | 1110 | extern int sysctl_perf_event_paranoid; |
982 | extern int sysctl_perf_event_mlock; | 1111 | extern int sysctl_perf_event_mlock; |
983 | extern int sysctl_perf_event_sample_rate; | 1112 | extern int sysctl_perf_event_sample_rate; |
984 | 1113 | ||
1114 | extern int perf_proc_update_handler(struct ctl_table *table, int write, | ||
1115 | void __user *buffer, size_t *lenp, | ||
1116 | loff_t *ppos); | ||
1117 | |||
985 | static inline bool perf_paranoid_tracepoint_raw(void) | 1118 | static inline bool perf_paranoid_tracepoint_raw(void) |
986 | { | 1119 | { |
987 | return sysctl_perf_event_paranoid > -1; | 1120 | return sysctl_perf_event_paranoid > -1; |
@@ -1004,9 +1137,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, | |||
1004 | extern void perf_bp_event(struct perf_event *event, void *data); | 1137 | extern void perf_bp_event(struct perf_event *event, void *data); |
1005 | 1138 | ||
1006 | #ifndef perf_misc_flags | 1139 | #ifndef perf_misc_flags |
1007 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 1140 | # define perf_misc_flags(regs) \ |
1008 | PERF_RECORD_MISC_KERNEL) | 1141 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
1009 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | 1142 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
1010 | #endif | 1143 | #endif |
1011 | 1144 | ||
1012 | extern int perf_output_begin(struct perf_output_handle *handle, | 1145 | extern int perf_output_begin(struct perf_output_handle *handle, |
@@ -1019,21 +1152,18 @@ extern int perf_swevent_get_recursion_context(void); | |||
1019 | extern void perf_swevent_put_recursion_context(int rctx); | 1152 | extern void perf_swevent_put_recursion_context(int rctx); |
1020 | extern void perf_event_enable(struct perf_event *event); | 1153 | extern void perf_event_enable(struct perf_event *event); |
1021 | extern void perf_event_disable(struct perf_event *event); | 1154 | extern void perf_event_disable(struct perf_event *event); |
1155 | extern void perf_event_task_tick(void); | ||
1022 | #else | 1156 | #else |
1023 | static inline void | 1157 | static inline void |
1024 | perf_event_task_sched_in(struct task_struct *task) { } | 1158 | perf_event_task_sched_in(struct task_struct *task) { } |
1025 | static inline void | 1159 | static inline void |
1026 | perf_event_task_sched_out(struct task_struct *task, | 1160 | perf_event_task_sched_out(struct task_struct *task, |
1027 | struct task_struct *next) { } | 1161 | struct task_struct *next) { } |
1028 | static inline void | ||
1029 | perf_event_task_tick(struct task_struct *task) { } | ||
1030 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1162 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
1031 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1163 | static inline void perf_event_exit_task(struct task_struct *child) { } |
1032 | static inline void perf_event_free_task(struct task_struct *task) { } | 1164 | static inline void perf_event_free_task(struct task_struct *task) { } |
1033 | static inline void perf_event_do_pending(void) { } | 1165 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
1034 | static inline void perf_event_print_debug(void) { } | 1166 | static inline void perf_event_print_debug(void) { } |
1035 | static inline void perf_disable(void) { } | ||
1036 | static inline void perf_enable(void) { } | ||
1037 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1167 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1038 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1168 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1039 | 1169 | ||
@@ -1044,9 +1174,9 @@ static inline void | |||
1044 | perf_bp_event(struct perf_event *event, void *data) { } | 1174 | perf_bp_event(struct perf_event *event, void *data) { } |
1045 | 1175 | ||
1046 | static inline int perf_register_guest_info_callbacks | 1176 | static inline int perf_register_guest_info_callbacks |
1047 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1177 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1048 | static inline int perf_unregister_guest_info_callbacks | 1178 | static inline int perf_unregister_guest_info_callbacks |
1049 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1179 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
1050 | 1180 | ||
1051 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1181 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
1052 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1182 | static inline void perf_event_comm(struct task_struct *tsk) { } |
@@ -1056,25 +1186,25 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } | |||
1056 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 1186 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
1057 | static inline void perf_event_enable(struct perf_event *event) { } | 1187 | static inline void perf_event_enable(struct perf_event *event) { } |
1058 | static inline void perf_event_disable(struct perf_event *event) { } | 1188 | static inline void perf_event_disable(struct perf_event *event) { } |
1189 | static inline void perf_event_task_tick(void) { } | ||
1059 | #endif | 1190 | #endif |
1060 | 1191 | ||
1061 | #define perf_output_put(handle, x) \ | 1192 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
1062 | perf_output_copy((handle), &(x), sizeof(x)) | ||
1063 | 1193 | ||
1064 | /* | 1194 | /* |
1065 | * This has to have a higher priority than migration_notifier in sched.c. | 1195 | * This has to have a higher priority than migration_notifier in sched.c. |
1066 | */ | 1196 | */ |
1067 | #define perf_cpu_notifier(fn) \ | 1197 | #define perf_cpu_notifier(fn) \ |
1068 | do { \ | 1198 | do { \ |
1069 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1199 | static struct notifier_block fn##_nb __cpuinitdata = \ |
1070 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | 1200 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
1071 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1201 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
1072 | (void *)(unsigned long)smp_processor_id()); \ | 1202 | (void *)(unsigned long)smp_processor_id()); \ |
1073 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1203 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
1074 | (void *)(unsigned long)smp_processor_id()); \ | 1204 | (void *)(unsigned long)smp_processor_id()); \ |
1075 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | 1205 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
1076 | (void *)(unsigned long)smp_processor_id()); \ | 1206 | (void *)(unsigned long)smp_processor_id()); \ |
1077 | register_cpu_notifier(&fn##_nb); \ | 1207 | register_cpu_notifier(&fn##_nb); \ |
1078 | } while (0) | 1208 | } while (0) |
1079 | 1209 | ||
1080 | #endif /* __KERNEL__ */ | 1210 | #endif /* __KERNEL__ */ |