diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-15 06:45:00 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-15 06:45:00 -0400 |
commit | 6268464b370e234e0255330190f9bd5d19386ad7 (patch) | |
tree | 5742641092ce64227dd2086d78baaede57da1f80 /include/linux/perf_event.h | |
parent | 7df01d96b295e400167e78061b81d4c91630b12d (diff) | |
parent | 0fdf13606b67f830559abdaad15980c7f4f05ec4 (diff) |
Merge remote branch 'tip/perf/core' into oprofile/core
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 155 |
1 files changed, 100 insertions, 55 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 33f08dafda2f..a9227e985207 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -529,7 +529,6 @@ struct hw_perf_event { | |||
529 | int last_cpu; | 529 | int last_cpu; |
530 | }; | 530 | }; |
531 | struct { /* software */ | 531 | struct { /* software */ |
532 | s64 remaining; | ||
533 | struct hrtimer hrtimer; | 532 | struct hrtimer hrtimer; |
534 | }; | 533 | }; |
535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 534 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
@@ -539,6 +538,7 @@ struct hw_perf_event { | |||
539 | }; | 538 | }; |
540 | #endif | 539 | #endif |
541 | }; | 540 | }; |
541 | int state; | ||
542 | local64_t prev_count; | 542 | local64_t prev_count; |
543 | u64 sample_period; | 543 | u64 sample_period; |
544 | u64 last_period; | 544 | u64 last_period; |
@@ -550,6 +550,13 @@ struct hw_perf_event { | |||
550 | #endif | 550 | #endif |
551 | }; | 551 | }; |
552 | 552 | ||
553 | /* | ||
554 | * hw_perf_event::state flags | ||
555 | */ | ||
556 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
557 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
558 | #define PERF_HES_ARCH 0x04 | ||
559 | |||
553 | struct perf_event; | 560 | struct perf_event; |
554 | 561 | ||
555 | /* | 562 | /* |
@@ -561,36 +568,70 @@ struct perf_event; | |||
561 | * struct pmu - generic performance monitoring unit | 568 | * struct pmu - generic performance monitoring unit |
562 | */ | 569 | */ |
563 | struct pmu { | 570 | struct pmu { |
564 | int (*enable) (struct perf_event *event); | 571 | struct list_head entry; |
565 | void (*disable) (struct perf_event *event); | 572 | |
566 | int (*start) (struct perf_event *event); | 573 | int * __percpu pmu_disable_count; |
567 | void (*stop) (struct perf_event *event); | 574 | struct perf_cpu_context * __percpu pmu_cpu_context; |
568 | void (*read) (struct perf_event *event); | 575 | int task_ctx_nr; |
569 | void (*unthrottle) (struct perf_event *event); | ||
570 | 576 | ||
571 | /* | 577 | /* |
572 | * Group events scheduling is treated as a transaction, add group | 578 | * Fully disable/enable this PMU, can be used to protect from the PMI |
573 | * events as a whole and perform one schedulability test. If the test | 579 | * as well as for lazy/batch writing of the MSRs. |
574 | * fails, roll back the whole group | ||
575 | */ | 580 | */ |
581 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | ||
582 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | ||
576 | 583 | ||
577 | /* | 584 | /* |
578 | * Start the transaction, after this ->enable() doesn't need | 585 | * Try and initialize the event for this PMU. |
579 | * to do schedulability tests. | 586 | * Should return -ENOENT when the @event doesn't match this PMU. |
580 | */ | 587 | */ |
581 | void (*start_txn) (const struct pmu *pmu); | 588 | int (*event_init) (struct perf_event *event); |
589 | |||
590 | #define PERF_EF_START 0x01 /* start the counter when adding */ | ||
591 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | ||
592 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | ||
593 | |||
582 | /* | 594 | /* |
583 | * If ->start_txn() disabled the ->enable() schedulability test | 595 | * Adds/Removes a counter to/from the PMU, can be done inside |
596 | * a transaction, see the ->*_txn() methods. | ||
597 | */ | ||
598 | int (*add) (struct perf_event *event, int flags); | ||
599 | void (*del) (struct perf_event *event, int flags); | ||
600 | |||
601 | /* | ||
602 | * Starts/Stops a counter present on the PMU. The PMI handler | ||
603 | * should stop the counter when perf_event_overflow() returns | ||
604 | * !0. ->start() will be used to continue. | ||
605 | */ | ||
606 | void (*start) (struct perf_event *event, int flags); | ||
607 | void (*stop) (struct perf_event *event, int flags); | ||
608 | |||
609 | /* | ||
610 | * Updates the counter value of the event. | ||
611 | */ | ||
612 | void (*read) (struct perf_event *event); | ||
613 | |||
614 | /* | ||
615 | * Group events scheduling is treated as a transaction, add | ||
616 | * group events as a whole and perform one schedulability test. | ||
617 | * If the test fails, roll back the whole group | ||
618 | * | ||
619 | * Start the transaction, after this ->add() doesn't need to | ||
620 | * do schedulability tests. | ||
621 | */ | ||
622 | void (*start_txn) (struct pmu *pmu); /* optional */ | ||
623 | /* | ||
624 | * If ->start_txn() disabled the ->add() schedulability test | ||
584 | * then ->commit_txn() is required to perform one. On success | 625 | * then ->commit_txn() is required to perform one. On success |
585 | * the transaction is closed. On error the transaction is kept | 626 | * the transaction is closed. On error the transaction is kept |
586 | * open until ->cancel_txn() is called. | 627 | * open until ->cancel_txn() is called. |
587 | */ | 628 | */ |
588 | int (*commit_txn) (const struct pmu *pmu); | 629 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
589 | /* | 630 | /* |
590 | * Will cancel the transaction, assumes ->disable() is called for | 631 | * Will cancel the transaction, assumes ->del() is called |
591 | * each successfull ->enable() during the transaction. | 632 | * for each successfull ->add() during the transaction. |
592 | */ | 633 | */ |
593 | void (*cancel_txn) (const struct pmu *pmu); | 634 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
594 | }; | 635 | }; |
595 | 636 | ||
596 | /** | 637 | /** |
@@ -669,7 +710,7 @@ struct perf_event { | |||
669 | int nr_siblings; | 710 | int nr_siblings; |
670 | int group_flags; | 711 | int group_flags; |
671 | struct perf_event *group_leader; | 712 | struct perf_event *group_leader; |
672 | const struct pmu *pmu; | 713 | struct pmu *pmu; |
673 | 714 | ||
674 | enum perf_event_active_state state; | 715 | enum perf_event_active_state state; |
675 | unsigned int attach_state; | 716 | unsigned int attach_state; |
@@ -763,12 +804,19 @@ struct perf_event { | |||
763 | #endif /* CONFIG_PERF_EVENTS */ | 804 | #endif /* CONFIG_PERF_EVENTS */ |
764 | }; | 805 | }; |
765 | 806 | ||
807 | enum perf_event_context_type { | ||
808 | task_context, | ||
809 | cpu_context, | ||
810 | }; | ||
811 | |||
766 | /** | 812 | /** |
767 | * struct perf_event_context - event context structure | 813 | * struct perf_event_context - event context structure |
768 | * | 814 | * |
769 | * Used as a container for task events and CPU events as well: | 815 | * Used as a container for task events and CPU events as well: |
770 | */ | 816 | */ |
771 | struct perf_event_context { | 817 | struct perf_event_context { |
818 | enum perf_event_context_type type; | ||
819 | struct pmu *pmu; | ||
772 | /* | 820 | /* |
773 | * Protect the states of the events in the list, | 821 | * Protect the states of the events in the list, |
774 | * nr_active, and the list: | 822 | * nr_active, and the list: |
@@ -808,6 +856,12 @@ struct perf_event_context { | |||
808 | struct rcu_head rcu_head; | 856 | struct rcu_head rcu_head; |
809 | }; | 857 | }; |
810 | 858 | ||
859 | /* | ||
860 | * Number of contexts where an event can trigger: | ||
861 | * task, softirq, hardirq, nmi. | ||
862 | */ | ||
863 | #define PERF_NR_CONTEXTS 4 | ||
864 | |||
811 | /** | 865 | /** |
812 | * struct perf_event_cpu_context - per cpu event context structure | 866 | * struct perf_event_cpu_context - per cpu event context structure |
813 | */ | 867 | */ |
@@ -815,18 +869,9 @@ struct perf_cpu_context { | |||
815 | struct perf_event_context ctx; | 869 | struct perf_event_context ctx; |
816 | struct perf_event_context *task_ctx; | 870 | struct perf_event_context *task_ctx; |
817 | int active_oncpu; | 871 | int active_oncpu; |
818 | int max_pertask; | ||
819 | int exclusive; | 872 | int exclusive; |
820 | struct swevent_hlist *swevent_hlist; | 873 | struct list_head rotation_list; |
821 | struct mutex hlist_mutex; | 874 | int jiffies_interval; |
822 | int hlist_refcount; | ||
823 | |||
824 | /* | ||
825 | * Recursion avoidance: | ||
826 | * | ||
827 | * task, softirq, irq, nmi context | ||
828 | */ | ||
829 | int recursion[4]; | ||
830 | }; | 875 | }; |
831 | 876 | ||
832 | struct perf_output_handle { | 877 | struct perf_output_handle { |
@@ -842,28 +887,22 @@ struct perf_output_handle { | |||
842 | 887 | ||
843 | #ifdef CONFIG_PERF_EVENTS | 888 | #ifdef CONFIG_PERF_EVENTS |
844 | 889 | ||
845 | /* | 890 | extern int perf_pmu_register(struct pmu *pmu); |
846 | * Set by architecture code: | 891 | extern void perf_pmu_unregister(struct pmu *pmu); |
847 | */ | ||
848 | extern int perf_max_events; | ||
849 | |||
850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | ||
851 | 892 | ||
852 | extern int perf_num_counters(void); | 893 | extern int perf_num_counters(void); |
853 | extern const char *perf_pmu_name(void); | 894 | extern const char *perf_pmu_name(void); |
854 | extern void perf_event_task_sched_in(struct task_struct *task); | 895 | extern void perf_event_task_sched_in(struct task_struct *task); |
855 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | 896 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
856 | extern void perf_event_task_tick(struct task_struct *task); | ||
857 | extern int perf_event_init_task(struct task_struct *child); | 897 | extern int perf_event_init_task(struct task_struct *child); |
858 | extern void perf_event_exit_task(struct task_struct *child); | 898 | extern void perf_event_exit_task(struct task_struct *child); |
859 | extern void perf_event_free_task(struct task_struct *task); | 899 | extern void perf_event_free_task(struct task_struct *task); |
900 | extern void perf_event_delayed_put(struct task_struct *task); | ||
860 | extern void set_perf_event_pending(void); | 901 | extern void set_perf_event_pending(void); |
861 | extern void perf_event_do_pending(void); | 902 | extern void perf_event_do_pending(void); |
862 | extern void perf_event_print_debug(void); | 903 | extern void perf_event_print_debug(void); |
863 | extern void __perf_disable(void); | 904 | extern void perf_pmu_disable(struct pmu *pmu); |
864 | extern bool __perf_enable(void); | 905 | extern void perf_pmu_enable(struct pmu *pmu); |
865 | extern void perf_disable(void); | ||
866 | extern void perf_enable(void); | ||
867 | extern int perf_event_task_disable(void); | 906 | extern int perf_event_task_disable(void); |
868 | extern int perf_event_task_enable(void); | 907 | extern int perf_event_task_enable(void); |
869 | extern void perf_event_update_userpage(struct perf_event *event); | 908 | extern void perf_event_update_userpage(struct perf_event *event); |
@@ -871,7 +910,7 @@ extern int perf_event_release_kernel(struct perf_event *event); | |||
871 | extern struct perf_event * | 910 | extern struct perf_event * |
872 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 911 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
873 | int cpu, | 912 | int cpu, |
874 | pid_t pid, | 913 | struct task_struct *task, |
875 | perf_overflow_handler_t callback); | 914 | perf_overflow_handler_t callback); |
876 | extern u64 perf_event_read_value(struct perf_event *event, | 915 | extern u64 perf_event_read_value(struct perf_event *event, |
877 | u64 *enabled, u64 *running); | 916 | u64 *enabled, u64 *running); |
@@ -922,14 +961,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
922 | */ | 961 | */ |
923 | static inline int is_software_event(struct perf_event *event) | 962 | static inline int is_software_event(struct perf_event *event) |
924 | { | 963 | { |
925 | switch (event->attr.type) { | 964 | return event->pmu->task_ctx_nr == perf_sw_context; |
926 | case PERF_TYPE_SOFTWARE: | ||
927 | case PERF_TYPE_TRACEPOINT: | ||
928 | /* for now the breakpoint stuff also works as software event */ | ||
929 | case PERF_TYPE_BREAKPOINT: | ||
930 | return 1; | ||
931 | } | ||
932 | return 0; | ||
933 | } | 965 | } |
934 | 966 | ||
935 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 967 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
@@ -978,7 +1010,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks | |||
978 | extern void perf_event_comm(struct task_struct *tsk); | 1010 | extern void perf_event_comm(struct task_struct *tsk); |
979 | extern void perf_event_fork(struct task_struct *tsk); | 1011 | extern void perf_event_fork(struct task_struct *tsk); |
980 | 1012 | ||
981 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 1013 | /* Callchains */ |
1014 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | ||
1015 | |||
1016 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | ||
1017 | struct pt_regs *regs); | ||
1018 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1019 | struct pt_regs *regs); | ||
1020 | |||
1021 | |||
1022 | static inline void | ||
1023 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1024 | { | ||
1025 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1026 | entry->ip[entry->nr++] = ip; | ||
1027 | } | ||
982 | 1028 | ||
983 | extern int sysctl_perf_event_paranoid; | 1029 | extern int sysctl_perf_event_paranoid; |
984 | extern int sysctl_perf_event_mlock; | 1030 | extern int sysctl_perf_event_mlock; |
@@ -1021,21 +1067,19 @@ extern int perf_swevent_get_recursion_context(void); | |||
1021 | extern void perf_swevent_put_recursion_context(int rctx); | 1067 | extern void perf_swevent_put_recursion_context(int rctx); |
1022 | extern void perf_event_enable(struct perf_event *event); | 1068 | extern void perf_event_enable(struct perf_event *event); |
1023 | extern void perf_event_disable(struct perf_event *event); | 1069 | extern void perf_event_disable(struct perf_event *event); |
1070 | extern void perf_event_task_tick(void); | ||
1024 | #else | 1071 | #else |
1025 | static inline void | 1072 | static inline void |
1026 | perf_event_task_sched_in(struct task_struct *task) { } | 1073 | perf_event_task_sched_in(struct task_struct *task) { } |
1027 | static inline void | 1074 | static inline void |
1028 | perf_event_task_sched_out(struct task_struct *task, | 1075 | perf_event_task_sched_out(struct task_struct *task, |
1029 | struct task_struct *next) { } | 1076 | struct task_struct *next) { } |
1030 | static inline void | ||
1031 | perf_event_task_tick(struct task_struct *task) { } | ||
1032 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1077 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
1033 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1078 | static inline void perf_event_exit_task(struct task_struct *child) { } |
1034 | static inline void perf_event_free_task(struct task_struct *task) { } | 1079 | static inline void perf_event_free_task(struct task_struct *task) { } |
1080 | static inline void perf_event_delayed_put(struct task_struct *task) { } | ||
1035 | static inline void perf_event_do_pending(void) { } | 1081 | static inline void perf_event_do_pending(void) { } |
1036 | static inline void perf_event_print_debug(void) { } | 1082 | static inline void perf_event_print_debug(void) { } |
1037 | static inline void perf_disable(void) { } | ||
1038 | static inline void perf_enable(void) { } | ||
1039 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1083 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1040 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1084 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1041 | 1085 | ||
@@ -1058,6 +1102,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } | |||
1058 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 1102 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
1059 | static inline void perf_event_enable(struct perf_event *event) { } | 1103 | static inline void perf_event_enable(struct perf_event *event) { } |
1060 | static inline void perf_event_disable(struct perf_event *event) { } | 1104 | static inline void perf_event_disable(struct perf_event *event) { } |
1105 | static inline void perf_event_task_tick(void) { } | ||
1061 | #endif | 1106 | #endif |
1062 | 1107 | ||
1063 | #define perf_output_put(handle, x) \ | 1108 | #define perf_output_put(handle, x) \ |