aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h41
1 files changed, 27 insertions, 14 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22a8323..4f1279e105ee 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -747,6 +747,16 @@ struct perf_event {
747 u64 tstamp_running; 747 u64 tstamp_running;
748 u64 tstamp_stopped; 748 u64 tstamp_stopped;
749 749
750 /*
751 * timestamp shadows the actual context timing but it can
752 * be safely used in NMI interrupt context. It reflects the
753 * context time as it was when the event was last scheduled in.
754 *
755 * ctx_time already accounts for ctx->timestamp. Therefore to
756 * compute ctx_time for a sample, simply add perf_clock().
757 */
758 u64 shadow_ctx_time;
759
750 struct perf_event_attr attr; 760 struct perf_event_attr attr;
751 struct hw_perf_event hw; 761 struct hw_perf_event hw;
752 762
@@ -840,6 +850,7 @@ struct perf_event_context {
840 int nr_active; 850 int nr_active;
841 int is_active; 851 int is_active;
842 int nr_stat; 852 int nr_stat;
853 int rotate_disable;
843 atomic_t refcount; 854 atomic_t refcount;
844 struct task_struct *task; 855 struct task_struct *task;
845 856
@@ -876,6 +887,7 @@ struct perf_cpu_context {
876 int exclusive; 887 int exclusive;
877 struct list_head rotation_list; 888 struct list_head rotation_list;
878 int jiffies_interval; 889 int jiffies_interval;
890 struct pmu *active_pmu;
879}; 891};
880 892
881struct perf_output_handle { 893struct perf_output_handle {
@@ -898,20 +910,6 @@ extern int perf_num_counters(void);
898extern const char *perf_pmu_name(void); 910extern const char *perf_pmu_name(void);
899extern void __perf_event_task_sched_in(struct task_struct *task); 911extern void __perf_event_task_sched_in(struct task_struct *task);
900extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 912extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
901
902extern atomic_t perf_task_events;
903
904static inline void perf_event_task_sched_in(struct task_struct *task)
905{
906 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
907}
908
909static inline
910void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
911{
912 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
913}
914
915extern int perf_event_init_task(struct task_struct *child); 913extern int perf_event_init_task(struct task_struct *child);
916extern void perf_event_exit_task(struct task_struct *child); 914extern void perf_event_exit_task(struct task_struct *child);
917extern void perf_event_free_task(struct task_struct *task); 915extern void perf_event_free_task(struct task_struct *task);
@@ -1020,6 +1018,21 @@ have_event:
1020 __perf_sw_event(event_id, nr, nmi, regs, addr); 1018 __perf_sw_event(event_id, nr, nmi, regs, addr);
1021} 1019}
1022 1020
1021extern atomic_t perf_task_events;
1022
1023static inline void perf_event_task_sched_in(struct task_struct *task)
1024{
1025 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1026}
1027
1028static inline
1029void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1030{
1031 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1032
1033 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1034}
1035
1023extern void perf_event_mmap(struct vm_area_struct *vma); 1036extern void perf_event_mmap(struct vm_area_struct *vma);
1024extern struct perf_guest_info_callbacks *perf_guest_cbs; 1037extern struct perf_guest_info_callbacks *perf_guest_cbs;
1025extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1038extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);