diff options
Diffstat (limited to 'include/linux/perf_event.h')
| -rw-r--r-- | include/linux/perf_event.h | 65 |
1 files changed, 46 insertions, 19 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 486e84ccb1f9..2b621982938d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -79,11 +79,6 @@ struct perf_branch_stack { | |||
| 79 | struct perf_branch_entry entries[0]; | 79 | struct perf_branch_entry entries[0]; |
| 80 | }; | 80 | }; |
| 81 | 81 | ||
| 82 | struct perf_regs { | ||
| 83 | __u64 abi; | ||
| 84 | struct pt_regs *regs; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct task_struct; | 82 | struct task_struct; |
| 88 | 83 | ||
| 89 | /* | 84 | /* |
| @@ -207,6 +202,13 @@ struct pmu { | |||
| 207 | */ | 202 | */ |
| 208 | int (*event_init) (struct perf_event *event); | 203 | int (*event_init) (struct perf_event *event); |
| 209 | 204 | ||
| 205 | /* | ||
| 206 | * Notification that the event was mapped or unmapped. Called | ||
| 207 | * in the context of the mapping task. | ||
| 208 | */ | ||
| 209 | void (*event_mapped) (struct perf_event *event); /*optional*/ | ||
| 210 | void (*event_unmapped) (struct perf_event *event); /*optional*/ | ||
| 211 | |||
| 210 | #define PERF_EF_START 0x01 /* start the counter when adding */ | 212 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
| 211 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | 213 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ |
| 212 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | 214 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ |
| @@ -455,11 +457,6 @@ struct perf_event { | |||
| 455 | #endif /* CONFIG_PERF_EVENTS */ | 457 | #endif /* CONFIG_PERF_EVENTS */ |
| 456 | }; | 458 | }; |
| 457 | 459 | ||
| 458 | enum perf_event_context_type { | ||
| 459 | task_context, | ||
| 460 | cpu_context, | ||
| 461 | }; | ||
| 462 | |||
| 463 | /** | 460 | /** |
| 464 | * struct perf_event_context - event context structure | 461 | * struct perf_event_context - event context structure |
| 465 | * | 462 | * |
| @@ -467,7 +464,6 @@ enum perf_event_context_type { | |||
| 467 | */ | 464 | */ |
| 468 | struct perf_event_context { | 465 | struct perf_event_context { |
| 469 | struct pmu *pmu; | 466 | struct pmu *pmu; |
| 470 | enum perf_event_context_type type; | ||
| 471 | /* | 467 | /* |
| 472 | * Protect the states of the events in the list, | 468 | * Protect the states of the events in the list, |
| 473 | * nr_active, and the list: | 469 | * nr_active, and the list: |
| @@ -480,6 +476,7 @@ struct perf_event_context { | |||
| 480 | */ | 476 | */ |
| 481 | struct mutex mutex; | 477 | struct mutex mutex; |
| 482 | 478 | ||
| 479 | struct list_head active_ctx_list; | ||
| 483 | struct list_head pinned_groups; | 480 | struct list_head pinned_groups; |
| 484 | struct list_head flexible_groups; | 481 | struct list_head flexible_groups; |
| 485 | struct list_head event_list; | 482 | struct list_head event_list; |
| @@ -530,7 +527,6 @@ struct perf_cpu_context { | |||
| 530 | int exclusive; | 527 | int exclusive; |
| 531 | struct hrtimer hrtimer; | 528 | struct hrtimer hrtimer; |
| 532 | ktime_t hrtimer_interval; | 529 | ktime_t hrtimer_interval; |
| 533 | struct list_head rotation_list; | ||
| 534 | struct pmu *unique_pmu; | 530 | struct pmu *unique_pmu; |
| 535 | struct perf_cgroup *cgrp; | 531 | struct perf_cgroup *cgrp; |
| 536 | }; | 532 | }; |
| @@ -610,7 +606,14 @@ struct perf_sample_data { | |||
| 610 | u32 reserved; | 606 | u32 reserved; |
| 611 | } cpu_entry; | 607 | } cpu_entry; |
| 612 | struct perf_callchain_entry *callchain; | 608 | struct perf_callchain_entry *callchain; |
| 609 | |||
| 610 | /* | ||
| 611 | * regs_user may point to task_pt_regs or to regs_user_copy, depending | ||
| 612 | * on arch details. | ||
| 613 | */ | ||
| 613 | struct perf_regs regs_user; | 614 | struct perf_regs regs_user; |
| 615 | struct pt_regs regs_user_copy; | ||
| 616 | |||
| 614 | struct perf_regs regs_intr; | 617 | struct perf_regs regs_intr; |
| 615 | u64 stack_user_size; | 618 | u64 stack_user_size; |
| 616 | } ____cacheline_aligned; | 619 | } ____cacheline_aligned; |
| @@ -663,6 +666,7 @@ static inline int is_software_event(struct perf_event *event) | |||
| 663 | 666 | ||
| 664 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 667 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
| 665 | 668 | ||
| 669 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); | ||
| 666 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | 670 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
| 667 | 671 | ||
| 668 | #ifndef perf_arch_fetch_caller_regs | 672 | #ifndef perf_arch_fetch_caller_regs |
| @@ -687,14 +691,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
| 687 | static __always_inline void | 691 | static __always_inline void |
| 688 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | 692 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
| 689 | { | 693 | { |
| 690 | struct pt_regs hot_regs; | 694 | if (static_key_false(&perf_swevent_enabled[event_id])) |
| 695 | __perf_sw_event(event_id, nr, regs, addr); | ||
| 696 | } | ||
| 697 | |||
| 698 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); | ||
| 691 | 699 | ||
| 700 | /* | ||
| 701 | * 'Special' version for the scheduler, it hard assumes no recursion, | ||
| 702 | * which is guaranteed by us not actually scheduling inside other swevents | ||
| 703 | * because those disable preemption. | ||
| 704 | */ | ||
| 705 | static __always_inline void | ||
| 706 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) | ||
| 707 | { | ||
| 692 | if (static_key_false(&perf_swevent_enabled[event_id])) { | 708 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
| 693 | if (!regs) { | 709 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
| 694 | perf_fetch_caller_regs(&hot_regs); | 710 | |
| 695 | regs = &hot_regs; | 711 | perf_fetch_caller_regs(regs); |
| 696 | } | 712 | ___perf_sw_event(event_id, nr, regs, addr); |
| 697 | __perf_sw_event(event_id, nr, regs, addr); | ||
| 698 | } | 713 | } |
| 699 | } | 714 | } |
| 700 | 715 | ||
| @@ -710,7 +725,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, | |||
| 710 | static inline void perf_event_task_sched_out(struct task_struct *prev, | 725 | static inline void perf_event_task_sched_out(struct task_struct *prev, |
| 711 | struct task_struct *next) | 726 | struct task_struct *next) |
| 712 | { | 727 | { |
| 713 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 728 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); |
| 714 | 729 | ||
| 715 | if (static_key_false(&perf_sched_events.key)) | 730 | if (static_key_false(&perf_sched_events.key)) |
| 716 | __perf_event_task_sched_out(prev, next); | 731 | __perf_event_task_sched_out(prev, next); |
| @@ -821,6 +836,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh) | |||
| 821 | static inline void | 836 | static inline void |
| 822 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } | 837 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
| 823 | static inline void | 838 | static inline void |
| 839 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } | ||
| 840 | static inline void | ||
| 824 | perf_bp_event(struct perf_event *event, void *data) { } | 841 | perf_bp_event(struct perf_event *event, void *data) { } |
| 825 | 842 | ||
| 826 | static inline int perf_register_guest_info_callbacks | 843 | static inline int perf_register_guest_info_callbacks |
| @@ -897,12 +914,22 @@ struct perf_pmu_events_attr { | |||
| 897 | const char *event_str; | 914 | const char *event_str; |
| 898 | }; | 915 | }; |
| 899 | 916 | ||
| 917 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, | ||
| 918 | char *page); | ||
| 919 | |||
| 900 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ | 920 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ |
| 901 | static struct perf_pmu_events_attr _var = { \ | 921 | static struct perf_pmu_events_attr _var = { \ |
| 902 | .attr = __ATTR(_name, 0444, _show, NULL), \ | 922 | .attr = __ATTR(_name, 0444, _show, NULL), \ |
| 903 | .id = _id, \ | 923 | .id = _id, \ |
| 904 | }; | 924 | }; |
| 905 | 925 | ||
| 926 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ | ||
| 927 | static struct perf_pmu_events_attr _var = { \ | ||
| 928 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ | ||
| 929 | .id = 0, \ | ||
| 930 | .event_str = _str, \ | ||
| 931 | }; | ||
| 932 | |||
| 906 | #define PMU_FORMAT_ATTR(_name, _format) \ | 933 | #define PMU_FORMAT_ATTR(_name, _format) \ |
| 907 | static ssize_t \ | 934 | static ssize_t \ |
| 908 | _name##_show(struct device *dev, \ | 935 | _name##_show(struct device *dev, \ |
