diff options
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r-- | include/linux/perf_counter.h | 79 |
1 files changed, 49 insertions, 30 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 1b3118a1023a..89698d8aba5c 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -236,10 +236,16 @@ struct perf_counter_mmap_page { | |||
236 | /* | 236 | /* |
237 | * Control data for the mmap() data buffer. | 237 | * Control data for the mmap() data buffer. |
238 | * | 238 | * |
239 | * User-space reading this value should issue an rmb(), on SMP capable | 239 | * User-space reading the @data_head value should issue an rmb(), on |
240 | * platforms, after reading this value -- see perf_counter_wakeup(). | 240 | * SMP capable platforms, after reading this value -- see |
241 | * perf_counter_wakeup(). | ||
242 | * | ||
243 | * When the mapping is PROT_WRITE the @data_tail value should be | ||
244 | * written by userspace to reflect the last read data. In this case | ||
245 | * the kernel will not over-write unread data. | ||
241 | */ | 246 | */ |
242 | __u64 data_head; /* head in the data section */ | 247 | __u64 data_head; /* head in the data section */ |
248 | __u64 data_tail; /* user-space written tail */ | ||
243 | }; | 249 | }; |
244 | 250 | ||
245 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | 251 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) |
@@ -275,6 +281,15 @@ enum perf_event_type { | |||
275 | 281 | ||
276 | /* | 282 | /* |
277 | * struct { | 283 | * struct { |
284 | * struct perf_event_header header; | ||
285 | * u64 id; | ||
286 | * u64 lost; | ||
287 | * }; | ||
288 | */ | ||
289 | PERF_EVENT_LOST = 2, | ||
290 | |||
291 | /* | ||
292 | * struct { | ||
278 | * struct perf_event_header header; | 293 | * struct perf_event_header header; |
279 | * | 294 | * |
280 | * u32 pid, tid; | 295 | * u32 pid, tid; |
@@ -313,30 +328,39 @@ enum perf_event_type { | |||
313 | 328 | ||
314 | /* | 329 | /* |
315 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field | 330 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field |
316 | * will be PERF_RECORD_* | 331 | * will be PERF_SAMPLE_* |
317 | * | 332 | * |
318 | * struct { | 333 | * struct { |
319 | * struct perf_event_header header; | 334 | * struct perf_event_header header; |
320 | * | 335 | * |
321 | * { u64 ip; } && PERF_RECORD_IP | 336 | * { u64 ip; } && PERF_SAMPLE_IP |
322 | * { u32 pid, tid; } && PERF_RECORD_TID | 337 | * { u32 pid, tid; } && PERF_SAMPLE_TID |
323 | * { u64 time; } && PERF_RECORD_TIME | 338 | * { u64 time; } && PERF_SAMPLE_TIME |
324 | * { u64 addr; } && PERF_RECORD_ADDR | 339 | * { u64 addr; } && PERF_SAMPLE_ADDR |
325 | * { u64 config; } && PERF_RECORD_CONFIG | 340 | * { u64 config; } && PERF_SAMPLE_CONFIG |
326 | * { u32 cpu, res; } && PERF_RECORD_CPU | 341 | * { u32 cpu, res; } && PERF_SAMPLE_CPU |
327 | * | 342 | * |
328 | * { u64 nr; | 343 | * { u64 nr; |
329 | * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP | 344 | * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP |
330 | * | 345 | * |
331 | * { u16 nr, | 346 | * { u64 nr, |
332 | * hv, | 347 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN |
333 | * kernel, | ||
334 | * user; | ||
335 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN | ||
336 | * }; | 348 | * }; |
337 | */ | 349 | */ |
338 | }; | 350 | }; |
339 | 351 | ||
352 | enum perf_callchain_context { | ||
353 | PERF_CONTEXT_HV = (__u64)-32, | ||
354 | PERF_CONTEXT_KERNEL = (__u64)-128, | ||
355 | PERF_CONTEXT_USER = (__u64)-512, | ||
356 | |||
357 | PERF_CONTEXT_GUEST = (__u64)-2048, | ||
358 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | ||
359 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | ||
360 | |||
361 | PERF_CONTEXT_MAX = (__u64)-4095, | ||
362 | }; | ||
363 | |||
340 | #ifdef __KERNEL__ | 364 | #ifdef __KERNEL__ |
341 | /* | 365 | /* |
342 | * Kernel-internal data types and definitions: | 366 | * Kernel-internal data types and definitions: |
@@ -356,6 +380,13 @@ enum perf_event_type { | |||
356 | #include <linux/pid_namespace.h> | 380 | #include <linux/pid_namespace.h> |
357 | #include <asm/atomic.h> | 381 | #include <asm/atomic.h> |
358 | 382 | ||
383 | #define PERF_MAX_STACK_DEPTH 255 | ||
384 | |||
385 | struct perf_callchain_entry { | ||
386 | __u64 nr; | ||
387 | __u64 ip[PERF_MAX_STACK_DEPTH]; | ||
388 | }; | ||
389 | |||
359 | struct task_struct; | 390 | struct task_struct; |
360 | 391 | ||
361 | /** | 392 | /** |
@@ -414,6 +445,7 @@ struct file; | |||
414 | struct perf_mmap_data { | 445 | struct perf_mmap_data { |
415 | struct rcu_head rcu_head; | 446 | struct rcu_head rcu_head; |
416 | int nr_pages; /* nr of data pages */ | 447 | int nr_pages; /* nr of data pages */ |
448 | int writable; /* are we writable */ | ||
417 | int nr_locked; /* nr pages mlocked */ | 449 | int nr_locked; /* nr pages mlocked */ |
418 | 450 | ||
419 | atomic_t poll; /* POLL_ for wakeups */ | 451 | atomic_t poll; /* POLL_ for wakeups */ |
@@ -423,8 +455,8 @@ struct perf_mmap_data { | |||
423 | atomic_long_t done_head; /* completed head */ | 455 | atomic_long_t done_head; /* completed head */ |
424 | 456 | ||
425 | atomic_t lock; /* concurrent writes */ | 457 | atomic_t lock; /* concurrent writes */ |
426 | |||
427 | atomic_t wakeup; /* needs a wakeup */ | 458 | atomic_t wakeup; /* needs a wakeup */ |
459 | atomic_t lost; /* nr records lost */ | ||
428 | 460 | ||
429 | struct perf_counter_mmap_page *user_page; | 461 | struct perf_counter_mmap_page *user_page; |
430 | void *data_pages[0]; | 462 | void *data_pages[0]; |
@@ -604,6 +636,7 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |||
604 | extern int perf_counter_init_task(struct task_struct *child); | 636 | extern int perf_counter_init_task(struct task_struct *child); |
605 | extern void perf_counter_exit_task(struct task_struct *child); | 637 | extern void perf_counter_exit_task(struct task_struct *child); |
606 | extern void perf_counter_free_task(struct task_struct *task); | 638 | extern void perf_counter_free_task(struct task_struct *task); |
639 | extern void set_perf_counter_pending(void); | ||
607 | extern void perf_counter_do_pending(void); | 640 | extern void perf_counter_do_pending(void); |
608 | extern void perf_counter_print_debug(void); | 641 | extern void perf_counter_print_debug(void); |
609 | extern void __perf_disable(void); | 642 | extern void __perf_disable(void); |
@@ -649,18 +682,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) | |||
649 | extern void perf_counter_comm(struct task_struct *tsk); | 682 | extern void perf_counter_comm(struct task_struct *tsk); |
650 | extern void perf_counter_fork(struct task_struct *tsk); | 683 | extern void perf_counter_fork(struct task_struct *tsk); |
651 | 684 | ||
652 | extern void perf_counter_task_migration(struct task_struct *task, int cpu); | ||
653 | |||
654 | #define MAX_STACK_DEPTH 255 | ||
655 | |||
656 | struct perf_callchain_entry { | ||
657 | u16 nr; | ||
658 | u16 hv; | ||
659 | u16 kernel; | ||
660 | u16 user; | ||
661 | u64 ip[MAX_STACK_DEPTH]; | ||
662 | }; | ||
663 | |||
664 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 685 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); |
665 | 686 | ||
666 | extern int sysctl_perf_counter_paranoid; | 687 | extern int sysctl_perf_counter_paranoid; |
@@ -701,8 +722,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { } | |||
701 | static inline void perf_counter_comm(struct task_struct *tsk) { } | 722 | static inline void perf_counter_comm(struct task_struct *tsk) { } |
702 | static inline void perf_counter_fork(struct task_struct *tsk) { } | 723 | static inline void perf_counter_fork(struct task_struct *tsk) { } |
703 | static inline void perf_counter_init(void) { } | 724 | static inline void perf_counter_init(void) { } |
704 | static inline void perf_counter_task_migration(struct task_struct *task, | ||
705 | int cpu) { } | ||
706 | #endif | 725 | #endif |
707 | 726 | ||
708 | #endif /* __KERNEL__ */ | 727 | #endif /* __KERNEL__ */ |