diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 205 |
1 files changed, 173 insertions, 32 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9e7012689a84..c8e375440403 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -31,6 +31,7 @@ enum perf_type_id { | |||
31 | PERF_TYPE_TRACEPOINT = 2, | 31 | PERF_TYPE_TRACEPOINT = 2, |
32 | PERF_TYPE_HW_CACHE = 3, | 32 | PERF_TYPE_HW_CACHE = 3, |
33 | PERF_TYPE_RAW = 4, | 33 | PERF_TYPE_RAW = 4, |
34 | PERF_TYPE_BREAKPOINT = 5, | ||
34 | 35 | ||
35 | PERF_TYPE_MAX, /* non-ABI */ | 36 | PERF_TYPE_MAX, /* non-ABI */ |
36 | }; | 37 | }; |
@@ -102,6 +103,8 @@ enum perf_sw_ids { | |||
102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | 103 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, |
103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | 104 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, |
104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | 105 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, |
106 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
107 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
105 | 108 | ||
106 | PERF_COUNT_SW_MAX, /* non-ABI */ | 109 | PERF_COUNT_SW_MAX, /* non-ABI */ |
107 | }; | 110 | }; |
@@ -207,9 +210,10 @@ struct perf_event_attr { | |||
207 | __u32 wakeup_events; /* wakeup every n events */ | 210 | __u32 wakeup_events; /* wakeup every n events */ |
208 | __u32 wakeup_watermark; /* bytes before wakeup */ | 211 | __u32 wakeup_watermark; /* bytes before wakeup */ |
209 | }; | 212 | }; |
210 | __u32 __reserved_2; | ||
211 | 213 | ||
212 | __u64 __reserved_3; | 214 | __u32 bp_type; |
215 | __u64 bp_addr; | ||
216 | __u64 bp_len; | ||
213 | }; | 217 | }; |
214 | 218 | ||
215 | /* | 219 | /* |
@@ -219,8 +223,9 @@ struct perf_event_attr { | |||
219 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) | 223 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) |
220 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) | 224 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) |
221 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) | 225 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) |
222 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) | 226 | #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) |
223 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) | 227 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) |
228 | #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) | ||
224 | 229 | ||
225 | enum perf_event_ioc_flags { | 230 | enum perf_event_ioc_flags { |
226 | PERF_IOC_FLAG_GROUP = 1U << 0, | 231 | PERF_IOC_FLAG_GROUP = 1U << 0, |
@@ -283,7 +288,7 @@ struct perf_event_mmap_page { | |||
283 | }; | 288 | }; |
284 | 289 | ||
285 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | 290 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) |
286 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 291 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) |
287 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 292 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
288 | #define PERF_RECORD_MISC_USER (2 << 0) | 293 | #define PERF_RECORD_MISC_USER (2 << 0) |
289 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 294 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) |
@@ -349,8 +354,8 @@ enum perf_event_type { | |||
349 | * u64 stream_id; | 354 | * u64 stream_id; |
350 | * }; | 355 | * }; |
351 | */ | 356 | */ |
352 | PERF_RECORD_THROTTLE = 5, | 357 | PERF_RECORD_THROTTLE = 5, |
353 | PERF_RECORD_UNTHROTTLE = 6, | 358 | PERF_RECORD_UNTHROTTLE = 6, |
354 | 359 | ||
355 | /* | 360 | /* |
356 | * struct { | 361 | * struct { |
@@ -364,10 +369,10 @@ enum perf_event_type { | |||
364 | 369 | ||
365 | /* | 370 | /* |
366 | * struct { | 371 | * struct { |
367 | * struct perf_event_header header; | 372 | * struct perf_event_header header; |
368 | * u32 pid, tid; | 373 | * u32 pid, tid; |
369 | * | 374 | * |
370 | * struct read_format values; | 375 | * struct read_format values; |
371 | * }; | 376 | * }; |
372 | */ | 377 | */ |
373 | PERF_RECORD_READ = 8, | 378 | PERF_RECORD_READ = 8, |
@@ -405,7 +410,7 @@ enum perf_event_type { | |||
405 | * char data[size];}&& PERF_SAMPLE_RAW | 410 | * char data[size];}&& PERF_SAMPLE_RAW |
406 | * }; | 411 | * }; |
407 | */ | 412 | */ |
408 | PERF_RECORD_SAMPLE = 9, | 413 | PERF_RECORD_SAMPLE = 9, |
409 | 414 | ||
410 | PERF_RECORD_MAX, /* non-ABI */ | 415 | PERF_RECORD_MAX, /* non-ABI */ |
411 | }; | 416 | }; |
@@ -434,6 +439,10 @@ enum perf_callchain_context { | |||
434 | # include <asm/perf_event.h> | 439 | # include <asm/perf_event.h> |
435 | #endif | 440 | #endif |
436 | 441 | ||
442 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
443 | #include <asm/hw_breakpoint.h> | ||
444 | #endif | ||
445 | |||
437 | #include <linux/list.h> | 446 | #include <linux/list.h> |
438 | #include <linux/mutex.h> | 447 | #include <linux/mutex.h> |
439 | #include <linux/rculist.h> | 448 | #include <linux/rculist.h> |
@@ -443,6 +452,8 @@ enum perf_callchain_context { | |||
443 | #include <linux/fs.h> | 452 | #include <linux/fs.h> |
444 | #include <linux/pid_namespace.h> | 453 | #include <linux/pid_namespace.h> |
445 | #include <linux/workqueue.h> | 454 | #include <linux/workqueue.h> |
455 | #include <linux/ftrace.h> | ||
456 | #include <linux/cpu.h> | ||
446 | #include <asm/atomic.h> | 457 | #include <asm/atomic.h> |
447 | 458 | ||
448 | #define PERF_MAX_STACK_DEPTH 255 | 459 | #define PERF_MAX_STACK_DEPTH 255 |
@@ -467,14 +478,20 @@ struct hw_perf_event { | |||
467 | union { | 478 | union { |
468 | struct { /* hardware */ | 479 | struct { /* hardware */ |
469 | u64 config; | 480 | u64 config; |
481 | u64 last_tag; | ||
470 | unsigned long config_base; | 482 | unsigned long config_base; |
471 | unsigned long event_base; | 483 | unsigned long event_base; |
472 | int idx; | 484 | int idx; |
485 | int last_cpu; | ||
473 | }; | 486 | }; |
474 | struct { /* software */ | 487 | struct { /* software */ |
475 | s64 remaining; | 488 | s64 remaining; |
476 | struct hrtimer hrtimer; | 489 | struct hrtimer hrtimer; |
477 | }; | 490 | }; |
491 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
492 | /* breakpoint */ | ||
493 | struct arch_hw_breakpoint info; | ||
494 | #endif | ||
478 | }; | 495 | }; |
479 | atomic64_t prev_count; | 496 | atomic64_t prev_count; |
480 | u64 sample_period; | 497 | u64 sample_period; |
@@ -482,9 +499,8 @@ struct hw_perf_event { | |||
482 | atomic64_t period_left; | 499 | atomic64_t period_left; |
483 | u64 interrupts; | 500 | u64 interrupts; |
484 | 501 | ||
485 | u64 freq_count; | 502 | u64 freq_time_stamp; |
486 | u64 freq_interrupts; | 503 | u64 freq_count_stamp; |
487 | u64 freq_stamp; | ||
488 | #endif | 504 | #endif |
489 | }; | 505 | }; |
490 | 506 | ||
@@ -496,6 +512,8 @@ struct perf_event; | |||
496 | struct pmu { | 512 | struct pmu { |
497 | int (*enable) (struct perf_event *event); | 513 | int (*enable) (struct perf_event *event); |
498 | void (*disable) (struct perf_event *event); | 514 | void (*disable) (struct perf_event *event); |
515 | int (*start) (struct perf_event *event); | ||
516 | void (*stop) (struct perf_event *event); | ||
499 | void (*read) (struct perf_event *event); | 517 | void (*read) (struct perf_event *event); |
500 | void (*unthrottle) (struct perf_event *event); | 518 | void (*unthrottle) (struct perf_event *event); |
501 | }; | 519 | }; |
@@ -543,6 +561,16 @@ struct perf_pending_entry { | |||
543 | void (*func)(struct perf_pending_entry *); | 561 | void (*func)(struct perf_pending_entry *); |
544 | }; | 562 | }; |
545 | 563 | ||
564 | struct perf_sample_data; | ||
565 | |||
566 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | ||
567 | struct perf_sample_data *, | ||
568 | struct pt_regs *regs); | ||
569 | |||
570 | enum perf_group_flag { | ||
571 | PERF_GROUP_SOFTWARE = 0x1, | ||
572 | }; | ||
573 | |||
546 | /** | 574 | /** |
547 | * struct perf_event - performance event kernel representation: | 575 | * struct perf_event - performance event kernel representation: |
548 | */ | 576 | */ |
@@ -552,6 +580,7 @@ struct perf_event { | |||
552 | struct list_head event_entry; | 580 | struct list_head event_entry; |
553 | struct list_head sibling_list; | 581 | struct list_head sibling_list; |
554 | int nr_siblings; | 582 | int nr_siblings; |
583 | int group_flags; | ||
555 | struct perf_event *group_leader; | 584 | struct perf_event *group_leader; |
556 | struct perf_event *output; | 585 | struct perf_event *output; |
557 | const struct pmu *pmu; | 586 | const struct pmu *pmu; |
@@ -585,7 +614,7 @@ struct perf_event { | |||
585 | u64 tstamp_running; | 614 | u64 tstamp_running; |
586 | u64 tstamp_stopped; | 615 | u64 tstamp_stopped; |
587 | 616 | ||
588 | struct perf_event_attr attr; | 617 | struct perf_event_attr attr; |
589 | struct hw_perf_event hw; | 618 | struct hw_perf_event hw; |
590 | 619 | ||
591 | struct perf_event_context *ctx; | 620 | struct perf_event_context *ctx; |
@@ -633,7 +662,14 @@ struct perf_event { | |||
633 | 662 | ||
634 | struct pid_namespace *ns; | 663 | struct pid_namespace *ns; |
635 | u64 id; | 664 | u64 id; |
665 | |||
666 | perf_overflow_handler_t overflow_handler; | ||
667 | |||
668 | #ifdef CONFIG_EVENT_TRACING | ||
669 | struct event_filter *filter; | ||
636 | #endif | 670 | #endif |
671 | |||
672 | #endif /* CONFIG_PERF_EVENTS */ | ||
637 | }; | 673 | }; |
638 | 674 | ||
639 | /** | 675 | /** |
@@ -646,7 +682,7 @@ struct perf_event_context { | |||
646 | * Protect the states of the events in the list, | 682 | * Protect the states of the events in the list, |
647 | * nr_active, and the list: | 683 | * nr_active, and the list: |
648 | */ | 684 | */ |
649 | spinlock_t lock; | 685 | raw_spinlock_t lock; |
650 | /* | 686 | /* |
651 | * Protect the list of events. Locking either mutex or lock | 687 | * Protect the list of events. Locking either mutex or lock |
652 | * is sufficient to ensure the list doesn't change; to change | 688 | * is sufficient to ensure the list doesn't change; to change |
@@ -654,7 +690,8 @@ struct perf_event_context { | |||
654 | */ | 690 | */ |
655 | struct mutex mutex; | 691 | struct mutex mutex; |
656 | 692 | ||
657 | struct list_head group_list; | 693 | struct list_head pinned_groups; |
694 | struct list_head flexible_groups; | ||
658 | struct list_head event_list; | 695 | struct list_head event_list; |
659 | int nr_events; | 696 | int nr_events; |
660 | int nr_active; | 697 | int nr_active; |
@@ -706,7 +743,6 @@ struct perf_output_handle { | |||
706 | int nmi; | 743 | int nmi; |
707 | int sample; | 744 | int sample; |
708 | int locked; | 745 | int locked; |
709 | unsigned long flags; | ||
710 | }; | 746 | }; |
711 | 747 | ||
712 | #ifdef CONFIG_PERF_EVENTS | 748 | #ifdef CONFIG_PERF_EVENTS |
@@ -718,10 +754,9 @@ extern int perf_max_events; | |||
718 | 754 | ||
719 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 755 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
720 | 756 | ||
721 | extern void perf_event_task_sched_in(struct task_struct *task, int cpu); | 757 | extern void perf_event_task_sched_in(struct task_struct *task); |
722 | extern void perf_event_task_sched_out(struct task_struct *task, | 758 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
723 | struct task_struct *next, int cpu); | 759 | extern void perf_event_task_tick(struct task_struct *task); |
724 | extern void perf_event_task_tick(struct task_struct *task, int cpu); | ||
725 | extern int perf_event_init_task(struct task_struct *child); | 760 | extern int perf_event_init_task(struct task_struct *child); |
726 | extern void perf_event_exit_task(struct task_struct *child); | 761 | extern void perf_event_exit_task(struct task_struct *child); |
727 | extern void perf_event_free_task(struct task_struct *task); | 762 | extern void perf_event_free_task(struct task_struct *task); |
@@ -736,8 +771,16 @@ extern int perf_event_task_disable(void); | |||
736 | extern int perf_event_task_enable(void); | 771 | extern int perf_event_task_enable(void); |
737 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | 772 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, |
738 | struct perf_cpu_context *cpuctx, | 773 | struct perf_cpu_context *cpuctx, |
739 | struct perf_event_context *ctx, int cpu); | 774 | struct perf_event_context *ctx); |
740 | extern void perf_event_update_userpage(struct perf_event *event); | 775 | extern void perf_event_update_userpage(struct perf_event *event); |
776 | extern int perf_event_release_kernel(struct perf_event *event); | ||
777 | extern struct perf_event * | ||
778 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | ||
779 | int cpu, | ||
780 | pid_t pid, | ||
781 | perf_overflow_handler_t callback); | ||
782 | extern u64 perf_event_read_value(struct perf_event *event, | ||
783 | u64 *enabled, u64 *running); | ||
741 | 784 | ||
742 | struct perf_sample_data { | 785 | struct perf_sample_data { |
743 | u64 type; | 786 | u64 type; |
@@ -760,6 +803,13 @@ struct perf_sample_data { | |||
760 | struct perf_raw_record *raw; | 803 | struct perf_raw_record *raw; |
761 | }; | 804 | }; |
762 | 805 | ||
806 | static inline | ||
807 | void perf_sample_data_init(struct perf_sample_data *data, u64 addr) | ||
808 | { | ||
809 | data->addr = addr; | ||
810 | data->raw = NULL; | ||
811 | } | ||
812 | |||
763 | extern void perf_output_sample(struct perf_output_handle *handle, | 813 | extern void perf_output_sample(struct perf_output_handle *handle, |
764 | struct perf_event_header *header, | 814 | struct perf_event_header *header, |
765 | struct perf_sample_data *data, | 815 | struct perf_sample_data *data, |
@@ -778,20 +828,70 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
778 | */ | 828 | */ |
779 | static inline int is_software_event(struct perf_event *event) | 829 | static inline int is_software_event(struct perf_event *event) |
780 | { | 830 | { |
781 | return (event->attr.type != PERF_TYPE_RAW) && | 831 | switch (event->attr.type) { |
782 | (event->attr.type != PERF_TYPE_HARDWARE) && | 832 | case PERF_TYPE_SOFTWARE: |
783 | (event->attr.type != PERF_TYPE_HW_CACHE); | 833 | case PERF_TYPE_TRACEPOINT: |
834 | /* for now the breakpoint stuff also works as software event */ | ||
835 | case PERF_TYPE_BREAKPOINT: | ||
836 | return 1; | ||
837 | } | ||
838 | return 0; | ||
784 | } | 839 | } |
785 | 840 | ||
786 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 841 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
787 | 842 | ||
788 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 843 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
789 | 844 | ||
845 | extern void | ||
846 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
847 | |||
848 | /* | ||
849 | * Take a snapshot of the regs. Skip ip and frame pointer to | ||
850 | * the nth caller. We only need a few of the regs: | ||
851 | * - ip for PERF_SAMPLE_IP | ||
852 | * - cs for user_mode() tests | ||
853 | * - bp for callchains | ||
854 | * - eflags, for future purposes, just in case | ||
855 | */ | ||
856 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | ||
857 | { | ||
858 | unsigned long ip; | ||
859 | |||
860 | memset(regs, 0, sizeof(*regs)); | ||
861 | |||
862 | switch (skip) { | ||
863 | case 1 : | ||
864 | ip = CALLER_ADDR0; | ||
865 | break; | ||
866 | case 2 : | ||
867 | ip = CALLER_ADDR1; | ||
868 | break; | ||
869 | case 3 : | ||
870 | ip = CALLER_ADDR2; | ||
871 | break; | ||
872 | case 4: | ||
873 | ip = CALLER_ADDR3; | ||
874 | break; | ||
875 | /* No need to support further for now */ | ||
876 | default: | ||
877 | ip = 0; | ||
878 | } | ||
879 | |||
880 | return perf_arch_fetch_caller_regs(regs, ip, skip); | ||
881 | } | ||
882 | |||
790 | static inline void | 883 | static inline void |
791 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 884 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
792 | { | 885 | { |
793 | if (atomic_read(&perf_swevent_enabled[event_id])) | 886 | if (atomic_read(&perf_swevent_enabled[event_id])) { |
887 | struct pt_regs hot_regs; | ||
888 | |||
889 | if (!regs) { | ||
890 | perf_fetch_caller_regs(&hot_regs, 1); | ||
891 | regs = &hot_regs; | ||
892 | } | ||
794 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 893 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
894 | } | ||
795 | } | 895 | } |
796 | 896 | ||
797 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 897 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
@@ -811,9 +911,25 @@ extern int sysctl_perf_event_paranoid; | |||
811 | extern int sysctl_perf_event_mlock; | 911 | extern int sysctl_perf_event_mlock; |
812 | extern int sysctl_perf_event_sample_rate; | 912 | extern int sysctl_perf_event_sample_rate; |
813 | 913 | ||
914 | static inline bool perf_paranoid_tracepoint_raw(void) | ||
915 | { | ||
916 | return sysctl_perf_event_paranoid > -1; | ||
917 | } | ||
918 | |||
919 | static inline bool perf_paranoid_cpu(void) | ||
920 | { | ||
921 | return sysctl_perf_event_paranoid > 0; | ||
922 | } | ||
923 | |||
924 | static inline bool perf_paranoid_kernel(void) | ||
925 | { | ||
926 | return sysctl_perf_event_paranoid > 1; | ||
927 | } | ||
928 | |||
814 | extern void perf_event_init(void); | 929 | extern void perf_event_init(void); |
815 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 930 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
816 | void *record, int entry_size); | 931 | int entry_size, struct pt_regs *regs); |
932 | extern void perf_bp_event(struct perf_event *event, void *data); | ||
817 | 933 | ||
818 | #ifndef perf_misc_flags | 934 | #ifndef perf_misc_flags |
819 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ | 935 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ |
@@ -827,14 +943,18 @@ extern int perf_output_begin(struct perf_output_handle *handle, | |||
827 | extern void perf_output_end(struct perf_output_handle *handle); | 943 | extern void perf_output_end(struct perf_output_handle *handle); |
828 | extern void perf_output_copy(struct perf_output_handle *handle, | 944 | extern void perf_output_copy(struct perf_output_handle *handle, |
829 | const void *buf, unsigned int len); | 945 | const void *buf, unsigned int len); |
946 | extern int perf_swevent_get_recursion_context(void); | ||
947 | extern void perf_swevent_put_recursion_context(int rctx); | ||
948 | extern void perf_event_enable(struct perf_event *event); | ||
949 | extern void perf_event_disable(struct perf_event *event); | ||
830 | #else | 950 | #else |
831 | static inline void | 951 | static inline void |
832 | perf_event_task_sched_in(struct task_struct *task, int cpu) { } | 952 | perf_event_task_sched_in(struct task_struct *task) { } |
833 | static inline void | 953 | static inline void |
834 | perf_event_task_sched_out(struct task_struct *task, | 954 | perf_event_task_sched_out(struct task_struct *task, |
835 | struct task_struct *next, int cpu) { } | 955 | struct task_struct *next) { } |
836 | static inline void | 956 | static inline void |
837 | perf_event_task_tick(struct task_struct *task, int cpu) { } | 957 | perf_event_task_tick(struct task_struct *task) { } |
838 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 958 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
839 | static inline void perf_event_exit_task(struct task_struct *child) { } | 959 | static inline void perf_event_exit_task(struct task_struct *child) { } |
840 | static inline void perf_event_free_task(struct task_struct *task) { } | 960 | static inline void perf_event_free_task(struct task_struct *task) { } |
@@ -848,16 +968,37 @@ static inline int perf_event_task_enable(void) { return -EINVAL; } | |||
848 | static inline void | 968 | static inline void |
849 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 969 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
850 | struct pt_regs *regs, u64 addr) { } | 970 | struct pt_regs *regs, u64 addr) { } |
971 | static inline void | ||
972 | perf_bp_event(struct perf_event *event, void *data) { } | ||
851 | 973 | ||
852 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 974 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
853 | static inline void perf_event_comm(struct task_struct *tsk) { } | 975 | static inline void perf_event_comm(struct task_struct *tsk) { } |
854 | static inline void perf_event_fork(struct task_struct *tsk) { } | 976 | static inline void perf_event_fork(struct task_struct *tsk) { } |
855 | static inline void perf_event_init(void) { } | 977 | static inline void perf_event_init(void) { } |
856 | 978 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | |
979 | static inline void perf_swevent_put_recursion_context(int rctx) { } | ||
980 | static inline void perf_event_enable(struct perf_event *event) { } | ||
981 | static inline void perf_event_disable(struct perf_event *event) { } | ||
857 | #endif | 982 | #endif |
858 | 983 | ||
859 | #define perf_output_put(handle, x) \ | 984 | #define perf_output_put(handle, x) \ |
860 | perf_output_copy((handle), &(x), sizeof(x)) | 985 | perf_output_copy((handle), &(x), sizeof(x)) |
861 | 986 | ||
987 | /* | ||
988 | * This has to have a higher priority than migration_notifier in sched.c. | ||
989 | */ | ||
990 | #define perf_cpu_notifier(fn) \ | ||
991 | do { \ | ||
992 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
993 | { .notifier_call = fn, .priority = 20 }; \ | ||
994 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
995 | (void *)(unsigned long)smp_processor_id()); \ | ||
996 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
997 | (void *)(unsigned long)smp_processor_id()); \ | ||
998 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
999 | (void *)(unsigned long)smp_processor_id()); \ | ||
1000 | register_cpu_notifier(&fn##_nb); \ | ||
1001 | } while (0) | ||
1002 | |||
862 | #endif /* __KERNEL__ */ | 1003 | #endif /* __KERNEL__ */ |
863 | #endif /* _LINUX_PERF_EVENT_H */ | 1004 | #endif /* _LINUX_PERF_EVENT_H */ |