aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-27 05:10:47 -0400
committerIngo Molnar <mingo@kernel.org>2015-03-27 05:10:47 -0400
commitb381e63b48a0b6befc7b4e55408c39012a0dcf8c (patch)
tree92e70447e1b0cf721ef40515db6b97cb881c5066 /include
parent4e6d7c2aa95158315902647963b359b32da5c295 (diff)
parentccd41c86ad4d464d0ed4e48d80759ff85c2115b0 (diff)
Merge branch 'perf/core' into perf/timer, before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/perf_event.h70
-rw-r--r--include/uapi/linux/perf_event.h49
2 files changed, 100 insertions, 19 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2b621982938d..b16eac5f54ce 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
53#include <linux/sysfs.h> 53#include <linux/sysfs.h>
54#include <linux/perf_regs.h> 54#include <linux/perf_regs.h>
55#include <linux/workqueue.h> 55#include <linux/workqueue.h>
56#include <linux/cgroup.h>
56#include <asm/local.h> 57#include <asm/local.h>
57 58
58struct perf_callchain_entry { 59struct perf_callchain_entry {
@@ -118,10 +119,16 @@ struct hw_perf_event {
118 struct hrtimer hrtimer; 119 struct hrtimer hrtimer;
119 }; 120 };
120 struct { /* tracepoint */ 121 struct { /* tracepoint */
121 struct task_struct *tp_target;
122 /* for tp_event->class */ 122 /* for tp_event->class */
123 struct list_head tp_list; 123 struct list_head tp_list;
124 }; 124 };
125 struct { /* intel_cqm */
126 int cqm_state;
127 int cqm_rmid;
128 struct list_head cqm_events_entry;
129 struct list_head cqm_groups_entry;
130 struct list_head cqm_group_entry;
131 };
125#ifdef CONFIG_HAVE_HW_BREAKPOINT 132#ifdef CONFIG_HAVE_HW_BREAKPOINT
126 struct { /* breakpoint */ 133 struct { /* breakpoint */
127 /* 134 /*
@@ -129,12 +136,12 @@ struct hw_perf_event {
129 * problem hw_breakpoint has with context 136 * problem hw_breakpoint has with context
130 * creation and event initalization. 137 * creation and event initalization.
131 */ 138 */
132 struct task_struct *bp_target;
133 struct arch_hw_breakpoint info; 139 struct arch_hw_breakpoint info;
134 struct list_head bp_list; 140 struct list_head bp_list;
135 }; 141 };
136#endif 142#endif
137 }; 143 };
144 struct task_struct *target;
138 int state; 145 int state;
139 local64_t prev_count; 146 local64_t prev_count;
140 u64 sample_period; 147 u64 sample_period;
@@ -262,9 +269,20 @@ struct pmu {
262 int (*event_idx) (struct perf_event *event); /*optional */ 269 int (*event_idx) (struct perf_event *event); /*optional */
263 270
264 /* 271 /*
265 * flush branch stack on context-switches (needed in cpu-wide mode) 272 * context-switches callback
273 */
274 void (*sched_task) (struct perf_event_context *ctx,
275 bool sched_in);
276 /*
277 * PMU specific data size
278 */
279 size_t task_ctx_size;
280
281
282 /*
283 * Return the count value for a counter.
266 */ 284 */
267 void (*flush_branch_stack) (void); 285 u64 (*count) (struct perf_event *event); /*optional*/
268}; 286};
269 287
270/** 288/**
@@ -300,6 +318,7 @@ struct swevent_hlist {
300#define PERF_ATTACH_CONTEXT 0x01 318#define PERF_ATTACH_CONTEXT 0x01
301#define PERF_ATTACH_GROUP 0x02 319#define PERF_ATTACH_GROUP 0x02
302#define PERF_ATTACH_TASK 0x04 320#define PERF_ATTACH_TASK 0x04
321#define PERF_ATTACH_TASK_DATA 0x08
303 322
304struct perf_cgroup; 323struct perf_cgroup;
305struct ring_buffer; 324struct ring_buffer;
@@ -504,7 +523,7 @@ struct perf_event_context {
504 u64 generation; 523 u64 generation;
505 int pin_count; 524 int pin_count;
506 int nr_cgroups; /* cgroup evts */ 525 int nr_cgroups; /* cgroup evts */
507 int nr_branch_stack; /* branch_stack evt */ 526 void *task_ctx_data; /* pmu specific data */
508 struct rcu_head rcu_head; 527 struct rcu_head rcu_head;
509 528
510 struct delayed_work orphans_remove; 529 struct delayed_work orphans_remove;
@@ -540,6 +559,35 @@ struct perf_output_handle {
540 int page; 559 int page;
541}; 560};
542 561
562#ifdef CONFIG_CGROUP_PERF
563
564/*
565 * perf_cgroup_info keeps track of time_enabled for a cgroup.
566 * This is a per-cpu dynamically allocated data structure.
567 */
568struct perf_cgroup_info {
569 u64 time;
570 u64 timestamp;
571};
572
573struct perf_cgroup {
574 struct cgroup_subsys_state css;
575 struct perf_cgroup_info __percpu *info;
576};
577
578/*
579 * Must ensure cgroup is pinned (css_get) before calling
580 * this function. In other words, we cannot call this function
581 * if there is no cgroup event for the current CPU context.
582 */
583static inline struct perf_cgroup *
584perf_cgroup_from_task(struct task_struct *task)
585{
586 return container_of(task_css(task, perf_event_cgrp_id),
587 struct perf_cgroup, css);
588}
589#endif /* CONFIG_CGROUP_PERF */
590
543#ifdef CONFIG_PERF_EVENTS 591#ifdef CONFIG_PERF_EVENTS
544 592
545extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); 593extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
@@ -558,6 +606,8 @@ extern void perf_event_delayed_put(struct task_struct *task);
558extern void perf_event_print_debug(void); 606extern void perf_event_print_debug(void);
559extern void perf_pmu_disable(struct pmu *pmu); 607extern void perf_pmu_disable(struct pmu *pmu);
560extern void perf_pmu_enable(struct pmu *pmu); 608extern void perf_pmu_enable(struct pmu *pmu);
609extern void perf_sched_cb_dec(struct pmu *pmu);
610extern void perf_sched_cb_inc(struct pmu *pmu);
561extern int perf_event_task_disable(void); 611extern int perf_event_task_disable(void);
562extern int perf_event_task_enable(void); 612extern int perf_event_task_enable(void);
563extern int perf_event_refresh(struct perf_event *event, int refresh); 613extern int perf_event_refresh(struct perf_event *event, int refresh);
@@ -731,6 +781,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
731 __perf_event_task_sched_out(prev, next); 781 __perf_event_task_sched_out(prev, next);
732} 782}
733 783
784static inline u64 __perf_event_count(struct perf_event *event)
785{
786 return local64_read(&event->count) + atomic64_read(&event->child_count);
787}
788
734extern void perf_event_mmap(struct vm_area_struct *vma); 789extern void perf_event_mmap(struct vm_area_struct *vma);
735extern struct perf_guest_info_callbacks *perf_guest_cbs; 790extern struct perf_guest_info_callbacks *perf_guest_cbs;
736extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 791extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -800,6 +855,11 @@ static inline bool has_branch_stack(struct perf_event *event)
800 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; 855 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
801} 856}
802 857
858static inline bool needs_branch_stack(struct perf_event *event)
859{
860 return event->attr.branch_sample_type != 0;
861}
862
803extern int perf_output_begin(struct perf_output_handle *handle, 863extern int perf_output_begin(struct perf_output_handle *handle,
804 struct perf_event *event, unsigned int size); 864 struct perf_event *event, unsigned int size);
805extern void perf_output_end(struct perf_output_handle *handle); 865extern void perf_output_end(struct perf_output_handle *handle);
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9b79abbd1ab8..1e3cd07cf76e 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -152,21 +152,42 @@ enum perf_event_sample_format {
152 * The branch types can be combined, however BRANCH_ANY covers all types 152 * The branch types can be combined, however BRANCH_ANY covers all types
153 * of branches and therefore it supersedes all the other types. 153 * of branches and therefore it supersedes all the other types.
154 */ 154 */
155enum perf_branch_sample_type_shift {
156 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
157 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
158 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
159
160 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
161 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
162 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
163 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
164 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
165 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
166 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
167 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
168
169 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
170
171 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
172};
173
155enum perf_branch_sample_type { 174enum perf_branch_sample_type {
156 PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ 175 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
157 PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ 176 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
158 PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ 177 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
159 178
160 PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ 179 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
161 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ 180 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
162 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ 181 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
163 PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ 182 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
164 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */ 183 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
165 PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */ 184 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
166 PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */ 185 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
167 PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */ 186 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
168 187
169 PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */ 188 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
189
190 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
170}; 191};
171 192
172#define PERF_SAMPLE_BRANCH_PLM_ALL \ 193#define PERF_SAMPLE_BRANCH_PLM_ALL \