diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-07-23 00:47:29 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-07-23 00:47:29 -0400 |
commit | bd072111e7319d90a7b8127f91c2806b9a6f279e (patch) | |
tree | 1686978814a2387ebfc16f9f5778a7f0caaf319b /include/linux/perf_counter.h | |
parent | 24d01c0681bfbc10a99304c48a89ad213d2d7a4b (diff) | |
parent | 4be3bd7849165e7efa6b0b35a23d6a3598d97465 (diff) |
Merge commit 'v2.6.31-rc4' into next
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 753 |
1 files changed, 753 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 000000000000..bd15d7a5f5ce --- /dev/null +++ b/include/linux/perf_counter.h | |||
@@ -0,0 +1,753 @@ | |||
1 | /* | ||
2 | * Performance counters: | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
7 | * | ||
8 | * Data type definitions, declarations, prototypes. | ||
9 | * | ||
10 | * Started by: Thomas Gleixner and Ingo Molnar | ||
11 | * | ||
12 | * For licencing details see kernel-base/COPYING | ||
13 | */ | ||
14 | #ifndef _LINUX_PERF_COUNTER_H | ||
15 | #define _LINUX_PERF_COUNTER_H | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/ioctl.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | |||
21 | /* | ||
22 | * User-space ABI bits: | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * attr.type | ||
27 | */ | ||
28 | enum perf_type_id { | ||
29 | PERF_TYPE_HARDWARE = 0, | ||
30 | PERF_TYPE_SOFTWARE = 1, | ||
31 | PERF_TYPE_TRACEPOINT = 2, | ||
32 | PERF_TYPE_HW_CACHE = 3, | ||
33 | PERF_TYPE_RAW = 4, | ||
34 | |||
35 | PERF_TYPE_MAX, /* non-ABI */ | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * Generalized performance counter event types, used by the | ||
40 | * attr.event_id parameter of the sys_perf_counter_open() | ||
41 | * syscall: | ||
42 | */ | ||
43 | enum perf_hw_id { | ||
44 | /* | ||
45 | * Common hardware events, generalized by the kernel: | ||
46 | */ | ||
47 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
48 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
50 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
52 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
53 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
54 | |||
55 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * Generalized hardware cache counters: | ||
60 | * | ||
61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
62 | * { read, write, prefetch } x | ||
63 | * { accesses, misses } | ||
64 | */ | ||
65 | enum perf_hw_cache_id { | ||
66 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
67 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
68 | PERF_COUNT_HW_CACHE_LL = 2, | ||
69 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
70 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
71 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
72 | |||
73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
74 | }; | ||
75 | |||
76 | enum perf_hw_cache_op_id { | ||
77 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
80 | |||
81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
82 | }; | ||
83 | |||
84 | enum perf_hw_cache_op_result_id { | ||
85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
87 | |||
88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * Special "software" counters provided by the kernel, even if the hardware | ||
93 | * does not support performance counters. These counters measure various | ||
94 | * physical and sw events of the kernel (and allow the profiling of them as | ||
95 | * well): | ||
96 | */ | ||
97 | enum perf_sw_ids { | ||
98 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
99 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
100 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
105 | |||
106 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
107 | }; | ||
108 | |||
109 | /* | ||
110 | * Bits that can be set in attr.sample_type to request information | ||
111 | * in the overflow packets. | ||
112 | */ | ||
113 | enum perf_counter_sample_format { | ||
114 | PERF_SAMPLE_IP = 1U << 0, | ||
115 | PERF_SAMPLE_TID = 1U << 1, | ||
116 | PERF_SAMPLE_TIME = 1U << 2, | ||
117 | PERF_SAMPLE_ADDR = 1U << 3, | ||
118 | PERF_SAMPLE_GROUP = 1U << 4, | ||
119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
120 | PERF_SAMPLE_ID = 1U << 6, | ||
121 | PERF_SAMPLE_CPU = 1U << 7, | ||
122 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
123 | PERF_SAMPLE_STREAM_ID = 1U << 9, | ||
124 | |||
125 | PERF_SAMPLE_MAX = 1U << 10, /* non-ABI */ | ||
126 | }; | ||
127 | |||
128 | /* | ||
129 | * Bits that can be set in attr.read_format to request that | ||
130 | * reads on the counter should return the indicated quantities, | ||
131 | * in increasing order of bit value, after the counter value. | ||
132 | */ | ||
133 | enum perf_counter_read_format { | ||
134 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
135 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
136 | PERF_FORMAT_ID = 1U << 2, | ||
137 | |||
138 | PERF_FORMAT_MAX = 1U << 3, /* non-ABI */ | ||
139 | }; | ||
140 | |||
141 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | ||
142 | |||
143 | /* | ||
144 | * Hardware event to monitor via a performance monitoring counter: | ||
145 | */ | ||
146 | struct perf_counter_attr { | ||
147 | |||
148 | /* | ||
149 | * Major type: hardware/software/tracepoint/etc. | ||
150 | */ | ||
151 | __u32 type; | ||
152 | |||
153 | /* | ||
154 | * Size of the attr structure, for fwd/bwd compat. | ||
155 | */ | ||
156 | __u32 size; | ||
157 | |||
158 | /* | ||
159 | * Type specific configuration information. | ||
160 | */ | ||
161 | __u64 config; | ||
162 | |||
163 | union { | ||
164 | __u64 sample_period; | ||
165 | __u64 sample_freq; | ||
166 | }; | ||
167 | |||
168 | __u64 sample_type; | ||
169 | __u64 read_format; | ||
170 | |||
171 | __u64 disabled : 1, /* off by default */ | ||
172 | inherit : 1, /* children inherit it */ | ||
173 | pinned : 1, /* must always be on PMU */ | ||
174 | exclusive : 1, /* only group on PMU */ | ||
175 | exclude_user : 1, /* don't count user */ | ||
176 | exclude_kernel : 1, /* ditto kernel */ | ||
177 | exclude_hv : 1, /* ditto hypervisor */ | ||
178 | exclude_idle : 1, /* don't count when idle */ | ||
179 | mmap : 1, /* include mmap data */ | ||
180 | comm : 1, /* include comm data */ | ||
181 | freq : 1, /* use freq, not period */ | ||
182 | inherit_stat : 1, /* per task counts */ | ||
183 | enable_on_exec : 1, /* next exec enables */ | ||
184 | |||
185 | __reserved_1 : 51; | ||
186 | |||
187 | __u32 wakeup_events; /* wakeup every n events */ | ||
188 | __u32 __reserved_2; | ||
189 | |||
190 | __u64 __reserved_3; | ||
191 | }; | ||
192 | |||
193 | /* | ||
194 | * Ioctls that can be done on a perf counter fd: | ||
195 | */ | ||
196 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | ||
197 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | ||
198 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | ||
199 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | ||
200 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | ||
201 | |||
202 | enum perf_counter_ioc_flags { | ||
203 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
204 | }; | ||
205 | |||
206 | /* | ||
207 | * Structure of the page that can be mapped via mmap | ||
208 | */ | ||
209 | struct perf_counter_mmap_page { | ||
210 | __u32 version; /* version number of this structure */ | ||
211 | __u32 compat_version; /* lowest version this is compat with */ | ||
212 | |||
213 | /* | ||
214 | * Bits needed to read the hw counters in user-space. | ||
215 | * | ||
216 | * u32 seq; | ||
217 | * s64 count; | ||
218 | * | ||
219 | * do { | ||
220 | * seq = pc->lock; | ||
221 | * | ||
222 | * barrier() | ||
223 | * if (pc->index) { | ||
224 | * count = pmc_read(pc->index - 1); | ||
225 | * count += pc->offset; | ||
226 | * } else | ||
227 | * goto regular_read; | ||
228 | * | ||
229 | * barrier(); | ||
230 | * } while (pc->lock != seq); | ||
231 | * | ||
232 | * NOTE: for obvious reason this only works on self-monitoring | ||
233 | * processes. | ||
234 | */ | ||
235 | __u32 lock; /* seqlock for synchronization */ | ||
236 | __u32 index; /* hardware counter identifier */ | ||
237 | __s64 offset; /* add to hardware counter value */ | ||
238 | __u64 time_enabled; /* time counter active */ | ||
239 | __u64 time_running; /* time counter on cpu */ | ||
240 | |||
241 | /* | ||
242 | * Hole for extension of the self monitor capabilities | ||
243 | */ | ||
244 | |||
245 | __u64 __reserved[123]; /* align to 1k */ | ||
246 | |||
247 | /* | ||
248 | * Control data for the mmap() data buffer. | ||
249 | * | ||
250 | * User-space reading the @data_head value should issue an rmb(), on | ||
251 | * SMP capable platforms, after reading this value -- see | ||
252 | * perf_counter_wakeup(). | ||
253 | * | ||
254 | * When the mapping is PROT_WRITE the @data_tail value should be | ||
255 | * written by userspace to reflect the last read data. In this case | ||
256 | * the kernel will not over-write unread data. | ||
257 | */ | ||
258 | __u64 data_head; /* head in the data section */ | ||
259 | __u64 data_tail; /* user-space written tail */ | ||
260 | }; | ||
261 | |||
262 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | ||
263 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
264 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | ||
265 | #define PERF_EVENT_MISC_USER (2 << 0) | ||
266 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | ||
267 | |||
268 | struct perf_event_header { | ||
269 | __u32 type; | ||
270 | __u16 misc; | ||
271 | __u16 size; | ||
272 | }; | ||
273 | |||
274 | enum perf_event_type { | ||
275 | |||
276 | /* | ||
277 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
278 | * correlate userspace IPs to code. They have the following structure: | ||
279 | * | ||
280 | * struct { | ||
281 | * struct perf_event_header header; | ||
282 | * | ||
283 | * u32 pid, tid; | ||
284 | * u64 addr; | ||
285 | * u64 len; | ||
286 | * u64 pgoff; | ||
287 | * char filename[]; | ||
288 | * }; | ||
289 | */ | ||
290 | PERF_EVENT_MMAP = 1, | ||
291 | |||
292 | /* | ||
293 | * struct { | ||
294 | * struct perf_event_header header; | ||
295 | * u64 id; | ||
296 | * u64 lost; | ||
297 | * }; | ||
298 | */ | ||
299 | PERF_EVENT_LOST = 2, | ||
300 | |||
301 | /* | ||
302 | * struct { | ||
303 | * struct perf_event_header header; | ||
304 | * | ||
305 | * u32 pid, tid; | ||
306 | * char comm[]; | ||
307 | * }; | ||
308 | */ | ||
309 | PERF_EVENT_COMM = 3, | ||
310 | |||
311 | /* | ||
312 | * struct { | ||
313 | * struct perf_event_header header; | ||
314 | * u64 time; | ||
315 | * u64 id; | ||
316 | * u64 stream_id; | ||
317 | * }; | ||
318 | */ | ||
319 | PERF_EVENT_THROTTLE = 5, | ||
320 | PERF_EVENT_UNTHROTTLE = 6, | ||
321 | |||
322 | /* | ||
323 | * struct { | ||
324 | * struct perf_event_header header; | ||
325 | * u32 pid, ppid; | ||
326 | * }; | ||
327 | */ | ||
328 | PERF_EVENT_FORK = 7, | ||
329 | |||
330 | /* | ||
331 | * struct { | ||
332 | * struct perf_event_header header; | ||
333 | * u32 pid, tid; | ||
334 | * u64 value; | ||
335 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
336 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
337 | * { u64 parent_id; } && PERF_FORMAT_ID | ||
338 | * }; | ||
339 | */ | ||
340 | PERF_EVENT_READ = 8, | ||
341 | |||
342 | /* | ||
343 | * struct { | ||
344 | * struct perf_event_header header; | ||
345 | * | ||
346 | * { u64 ip; } && PERF_SAMPLE_IP | ||
347 | * { u32 pid, tid; } && PERF_SAMPLE_TID | ||
348 | * { u64 time; } && PERF_SAMPLE_TIME | ||
349 | * { u64 addr; } && PERF_SAMPLE_ADDR | ||
350 | * { u64 id; } && PERF_SAMPLE_ID | ||
351 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | ||
352 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | ||
353 | * { u64 period; } && PERF_SAMPLE_PERIOD | ||
354 | * | ||
355 | * { u64 nr; | ||
356 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP | ||
357 | * | ||
358 | * { u64 nr, | ||
359 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | ||
360 | * }; | ||
361 | */ | ||
362 | PERF_EVENT_SAMPLE = 9, | ||
363 | |||
364 | PERF_EVENT_MAX, /* non-ABI */ | ||
365 | }; | ||
366 | |||
367 | enum perf_callchain_context { | ||
368 | PERF_CONTEXT_HV = (__u64)-32, | ||
369 | PERF_CONTEXT_KERNEL = (__u64)-128, | ||
370 | PERF_CONTEXT_USER = (__u64)-512, | ||
371 | |||
372 | PERF_CONTEXT_GUEST = (__u64)-2048, | ||
373 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | ||
374 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | ||
375 | |||
376 | PERF_CONTEXT_MAX = (__u64)-4095, | ||
377 | }; | ||
378 | |||
379 | #ifdef __KERNEL__ | ||
380 | /* | ||
381 | * Kernel-internal data types and definitions: | ||
382 | */ | ||
383 | |||
384 | #ifdef CONFIG_PERF_COUNTERS | ||
385 | # include <asm/perf_counter.h> | ||
386 | #endif | ||
387 | |||
388 | #include <linux/list.h> | ||
389 | #include <linux/mutex.h> | ||
390 | #include <linux/rculist.h> | ||
391 | #include <linux/rcupdate.h> | ||
392 | #include <linux/spinlock.h> | ||
393 | #include <linux/hrtimer.h> | ||
394 | #include <linux/fs.h> | ||
395 | #include <linux/pid_namespace.h> | ||
396 | #include <asm/atomic.h> | ||
397 | |||
398 | #define PERF_MAX_STACK_DEPTH 255 | ||
399 | |||
400 | struct perf_callchain_entry { | ||
401 | __u64 nr; | ||
402 | __u64 ip[PERF_MAX_STACK_DEPTH]; | ||
403 | }; | ||
404 | |||
405 | struct task_struct; | ||
406 | |||
407 | /** | ||
408 | * struct hw_perf_counter - performance counter hardware details: | ||
409 | */ | ||
410 | struct hw_perf_counter { | ||
411 | #ifdef CONFIG_PERF_COUNTERS | ||
412 | union { | ||
413 | struct { /* hardware */ | ||
414 | u64 config; | ||
415 | unsigned long config_base; | ||
416 | unsigned long counter_base; | ||
417 | int idx; | ||
418 | }; | ||
419 | union { /* software */ | ||
420 | atomic64_t count; | ||
421 | struct hrtimer hrtimer; | ||
422 | }; | ||
423 | }; | ||
424 | atomic64_t prev_count; | ||
425 | u64 sample_period; | ||
426 | u64 last_period; | ||
427 | atomic64_t period_left; | ||
428 | u64 interrupts; | ||
429 | |||
430 | u64 freq_count; | ||
431 | u64 freq_interrupts; | ||
432 | u64 freq_stamp; | ||
433 | #endif | ||
434 | }; | ||
435 | |||
436 | struct perf_counter; | ||
437 | |||
438 | /** | ||
439 | * struct pmu - generic performance monitoring unit | ||
440 | */ | ||
441 | struct pmu { | ||
442 | int (*enable) (struct perf_counter *counter); | ||
443 | void (*disable) (struct perf_counter *counter); | ||
444 | void (*read) (struct perf_counter *counter); | ||
445 | void (*unthrottle) (struct perf_counter *counter); | ||
446 | }; | ||
447 | |||
448 | /** | ||
449 | * enum perf_counter_active_state - the states of a counter | ||
450 | */ | ||
451 | enum perf_counter_active_state { | ||
452 | PERF_COUNTER_STATE_ERROR = -2, | ||
453 | PERF_COUNTER_STATE_OFF = -1, | ||
454 | PERF_COUNTER_STATE_INACTIVE = 0, | ||
455 | PERF_COUNTER_STATE_ACTIVE = 1, | ||
456 | }; | ||
457 | |||
458 | struct file; | ||
459 | |||
460 | struct perf_mmap_data { | ||
461 | struct rcu_head rcu_head; | ||
462 | int nr_pages; /* nr of data pages */ | ||
463 | int writable; /* are we writable */ | ||
464 | int nr_locked; /* nr pages mlocked */ | ||
465 | |||
466 | atomic_t poll; /* POLL_ for wakeups */ | ||
467 | atomic_t events; /* event limit */ | ||
468 | |||
469 | atomic_long_t head; /* write position */ | ||
470 | atomic_long_t done_head; /* completed head */ | ||
471 | |||
472 | atomic_t lock; /* concurrent writes */ | ||
473 | atomic_t wakeup; /* needs a wakeup */ | ||
474 | atomic_t lost; /* nr records lost */ | ||
475 | |||
476 | struct perf_counter_mmap_page *user_page; | ||
477 | void *data_pages[0]; | ||
478 | }; | ||
479 | |||
480 | struct perf_pending_entry { | ||
481 | struct perf_pending_entry *next; | ||
482 | void (*func)(struct perf_pending_entry *); | ||
483 | }; | ||
484 | |||
485 | /** | ||
486 | * struct perf_counter - performance counter kernel representation: | ||
487 | */ | ||
488 | struct perf_counter { | ||
489 | #ifdef CONFIG_PERF_COUNTERS | ||
490 | struct list_head list_entry; | ||
491 | struct list_head event_entry; | ||
492 | struct list_head sibling_list; | ||
493 | int nr_siblings; | ||
494 | struct perf_counter *group_leader; | ||
495 | const struct pmu *pmu; | ||
496 | |||
497 | enum perf_counter_active_state state; | ||
498 | atomic64_t count; | ||
499 | |||
500 | /* | ||
501 | * These are the total time in nanoseconds that the counter | ||
502 | * has been enabled (i.e. eligible to run, and the task has | ||
503 | * been scheduled in, if this is a per-task counter) | ||
504 | * and running (scheduled onto the CPU), respectively. | ||
505 | * | ||
506 | * They are computed from tstamp_enabled, tstamp_running and | ||
507 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | ||
508 | */ | ||
509 | u64 total_time_enabled; | ||
510 | u64 total_time_running; | ||
511 | |||
512 | /* | ||
513 | * These are timestamps used for computing total_time_enabled | ||
514 | * and total_time_running when the counter is in INACTIVE or | ||
515 | * ACTIVE state, measured in nanoseconds from an arbitrary point | ||
516 | * in time. | ||
517 | * tstamp_enabled: the notional time when the counter was enabled | ||
518 | * tstamp_running: the notional time when the counter was scheduled on | ||
519 | * tstamp_stopped: in INACTIVE state, the notional time when the | ||
520 | * counter was scheduled off. | ||
521 | */ | ||
522 | u64 tstamp_enabled; | ||
523 | u64 tstamp_running; | ||
524 | u64 tstamp_stopped; | ||
525 | |||
526 | struct perf_counter_attr attr; | ||
527 | struct hw_perf_counter hw; | ||
528 | |||
529 | struct perf_counter_context *ctx; | ||
530 | struct file *filp; | ||
531 | |||
532 | /* | ||
533 | * These accumulate total time (in nanoseconds) that children | ||
534 | * counters have been enabled and running, respectively. | ||
535 | */ | ||
536 | atomic64_t child_total_time_enabled; | ||
537 | atomic64_t child_total_time_running; | ||
538 | |||
539 | /* | ||
540 | * Protect attach/detach and child_list: | ||
541 | */ | ||
542 | struct mutex child_mutex; | ||
543 | struct list_head child_list; | ||
544 | struct perf_counter *parent; | ||
545 | |||
546 | int oncpu; | ||
547 | int cpu; | ||
548 | |||
549 | struct list_head owner_entry; | ||
550 | struct task_struct *owner; | ||
551 | |||
552 | /* mmap bits */ | ||
553 | struct mutex mmap_mutex; | ||
554 | atomic_t mmap_count; | ||
555 | struct perf_mmap_data *data; | ||
556 | |||
557 | /* poll related */ | ||
558 | wait_queue_head_t waitq; | ||
559 | struct fasync_struct *fasync; | ||
560 | |||
561 | /* delayed work for NMIs and such */ | ||
562 | int pending_wakeup; | ||
563 | int pending_kill; | ||
564 | int pending_disable; | ||
565 | struct perf_pending_entry pending; | ||
566 | |||
567 | atomic_t event_limit; | ||
568 | |||
569 | void (*destroy)(struct perf_counter *); | ||
570 | struct rcu_head rcu_head; | ||
571 | |||
572 | struct pid_namespace *ns; | ||
573 | u64 id; | ||
574 | #endif | ||
575 | }; | ||
576 | |||
577 | /** | ||
578 | * struct perf_counter_context - counter context structure | ||
579 | * | ||
580 | * Used as a container for task counters and CPU counters as well: | ||
581 | */ | ||
582 | struct perf_counter_context { | ||
583 | /* | ||
584 | * Protect the states of the counters in the list, | ||
585 | * nr_active, and the list: | ||
586 | */ | ||
587 | spinlock_t lock; | ||
588 | /* | ||
589 | * Protect the list of counters. Locking either mutex or lock | ||
590 | * is sufficient to ensure the list doesn't change; to change | ||
591 | * the list you need to lock both the mutex and the spinlock. | ||
592 | */ | ||
593 | struct mutex mutex; | ||
594 | |||
595 | struct list_head counter_list; | ||
596 | struct list_head event_list; | ||
597 | int nr_counters; | ||
598 | int nr_active; | ||
599 | int is_active; | ||
600 | int nr_stat; | ||
601 | atomic_t refcount; | ||
602 | struct task_struct *task; | ||
603 | |||
604 | /* | ||
605 | * Context clock, runs when context enabled. | ||
606 | */ | ||
607 | u64 time; | ||
608 | u64 timestamp; | ||
609 | |||
610 | /* | ||
611 | * These fields let us detect when two contexts have both | ||
612 | * been cloned (inherited) from a common ancestor. | ||
613 | */ | ||
614 | struct perf_counter_context *parent_ctx; | ||
615 | u64 parent_gen; | ||
616 | u64 generation; | ||
617 | int pin_count; | ||
618 | struct rcu_head rcu_head; | ||
619 | }; | ||
620 | |||
621 | /** | ||
622 | * struct perf_counter_cpu_context - per cpu counter context structure | ||
623 | */ | ||
624 | struct perf_cpu_context { | ||
625 | struct perf_counter_context ctx; | ||
626 | struct perf_counter_context *task_ctx; | ||
627 | int active_oncpu; | ||
628 | int max_pertask; | ||
629 | int exclusive; | ||
630 | |||
631 | /* | ||
632 | * Recursion avoidance: | ||
633 | * | ||
634 | * task, softirq, irq, nmi context | ||
635 | */ | ||
636 | int recursion[4]; | ||
637 | }; | ||
638 | |||
639 | #ifdef CONFIG_PERF_COUNTERS | ||
640 | |||
641 | /* | ||
642 | * Set by architecture code: | ||
643 | */ | ||
644 | extern int perf_max_counters; | ||
645 | |||
646 | extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); | ||
647 | |||
648 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | ||
649 | extern void perf_counter_task_sched_out(struct task_struct *task, | ||
650 | struct task_struct *next, int cpu); | ||
651 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | ||
652 | extern int perf_counter_init_task(struct task_struct *child); | ||
653 | extern void perf_counter_exit_task(struct task_struct *child); | ||
654 | extern void perf_counter_free_task(struct task_struct *task); | ||
655 | extern void set_perf_counter_pending(void); | ||
656 | extern void perf_counter_do_pending(void); | ||
657 | extern void perf_counter_print_debug(void); | ||
658 | extern void __perf_disable(void); | ||
659 | extern bool __perf_enable(void); | ||
660 | extern void perf_disable(void); | ||
661 | extern void perf_enable(void); | ||
662 | extern int perf_counter_task_disable(void); | ||
663 | extern int perf_counter_task_enable(void); | ||
664 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
665 | struct perf_cpu_context *cpuctx, | ||
666 | struct perf_counter_context *ctx, int cpu); | ||
667 | extern void perf_counter_update_userpage(struct perf_counter *counter); | ||
668 | |||
669 | struct perf_sample_data { | ||
670 | struct pt_regs *regs; | ||
671 | u64 addr; | ||
672 | u64 period; | ||
673 | }; | ||
674 | |||
675 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
676 | struct perf_sample_data *data); | ||
677 | |||
678 | /* | ||
679 | * Return 1 for a software counter, 0 for a hardware counter | ||
680 | */ | ||
681 | static inline int is_software_counter(struct perf_counter *counter) | ||
682 | { | ||
683 | return (counter->attr.type != PERF_TYPE_RAW) && | ||
684 | (counter->attr.type != PERF_TYPE_HARDWARE) && | ||
685 | (counter->attr.type != PERF_TYPE_HW_CACHE); | ||
686 | } | ||
687 | |||
688 | extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | ||
689 | |||
690 | extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
691 | |||
692 | static inline void | ||
693 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | ||
694 | { | ||
695 | if (atomic_read(&perf_swcounter_enabled[event])) | ||
696 | __perf_swcounter_event(event, nr, nmi, regs, addr); | ||
697 | } | ||
698 | |||
699 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | ||
700 | |||
701 | static inline void perf_counter_mmap(struct vm_area_struct *vma) | ||
702 | { | ||
703 | if (vma->vm_flags & VM_EXEC) | ||
704 | __perf_counter_mmap(vma); | ||
705 | } | ||
706 | |||
707 | extern void perf_counter_comm(struct task_struct *tsk); | ||
708 | extern void perf_counter_fork(struct task_struct *tsk); | ||
709 | |||
710 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | ||
711 | |||
712 | extern int sysctl_perf_counter_paranoid; | ||
713 | extern int sysctl_perf_counter_mlock; | ||
714 | extern int sysctl_perf_counter_sample_rate; | ||
715 | |||
716 | extern void perf_counter_init(void); | ||
717 | |||
718 | #ifndef perf_misc_flags | ||
719 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | ||
720 | PERF_EVENT_MISC_KERNEL) | ||
721 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | ||
722 | #endif | ||
723 | |||
724 | #else | ||
725 | static inline void | ||
726 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | ||
727 | static inline void | ||
728 | perf_counter_task_sched_out(struct task_struct *task, | ||
729 | struct task_struct *next, int cpu) { } | ||
730 | static inline void | ||
731 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | ||
732 | static inline int perf_counter_init_task(struct task_struct *child) { return 0; } | ||
733 | static inline void perf_counter_exit_task(struct task_struct *child) { } | ||
734 | static inline void perf_counter_free_task(struct task_struct *task) { } | ||
735 | static inline void perf_counter_do_pending(void) { } | ||
736 | static inline void perf_counter_print_debug(void) { } | ||
737 | static inline void perf_disable(void) { } | ||
738 | static inline void perf_enable(void) { } | ||
739 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | ||
740 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | ||
741 | |||
742 | static inline void | ||
743 | perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
744 | struct pt_regs *regs, u64 addr) { } | ||
745 | |||
746 | static inline void perf_counter_mmap(struct vm_area_struct *vma) { } | ||
747 | static inline void perf_counter_comm(struct task_struct *tsk) { } | ||
748 | static inline void perf_counter_fork(struct task_struct *tsk) { } | ||
749 | static inline void perf_counter_init(void) { } | ||
750 | #endif | ||
751 | |||
752 | #endif /* __KERNEL__ */ | ||
753 | #endif /* _LINUX_PERF_COUNTER_H */ | ||