aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h47
1 files changed, 39 insertions, 8 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 664de5a4ec46..2b621982938d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -202,6 +202,13 @@ struct pmu {
202 */ 202 */
203 int (*event_init) (struct perf_event *event); 203 int (*event_init) (struct perf_event *event);
204 204
205 /*
206 * Notification that the event was mapped or unmapped. Called
207 * in the context of the mapping task.
208 */
209 void (*event_mapped) (struct perf_event *event); /*optional*/
210 void (*event_unmapped) (struct perf_event *event); /*optional*/
211
205#define PERF_EF_START 0x01 /* start the counter when adding */ 212#define PERF_EF_START 0x01 /* start the counter when adding */
206#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ 213#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
207#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ 214#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
@@ -469,6 +476,7 @@ struct perf_event_context {
469 */ 476 */
470 struct mutex mutex; 477 struct mutex mutex;
471 478
479 struct list_head active_ctx_list;
472 struct list_head pinned_groups; 480 struct list_head pinned_groups;
473 struct list_head flexible_groups; 481 struct list_head flexible_groups;
474 struct list_head event_list; 482 struct list_head event_list;
@@ -519,7 +527,6 @@ struct perf_cpu_context {
519 int exclusive; 527 int exclusive;
520 struct hrtimer hrtimer; 528 struct hrtimer hrtimer;
521 ktime_t hrtimer_interval; 529 ktime_t hrtimer_interval;
522 struct list_head rotation_list;
523 struct pmu *unique_pmu; 530 struct pmu *unique_pmu;
524 struct perf_cgroup *cgrp; 531 struct perf_cgroup *cgrp;
525}; 532};
@@ -659,6 +666,7 @@ static inline int is_software_event(struct perf_event *event)
659 666
660extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 667extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
661 668
669extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
662extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 670extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
663 671
664#ifndef perf_arch_fetch_caller_regs 672#ifndef perf_arch_fetch_caller_regs
@@ -683,14 +691,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
683static __always_inline void 691static __always_inline void
684perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 692perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
685{ 693{
686 struct pt_regs hot_regs; 694 if (static_key_false(&perf_swevent_enabled[event_id]))
695 __perf_sw_event(event_id, nr, regs, addr);
696}
697
698DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
687 699
700/*
701 * 'Special' version for the scheduler, it hard assumes no recursion,
702 * which is guaranteed by us not actually scheduling inside other swevents
703 * because those disable preemption.
704 */
705static __always_inline void
706perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
707{
688 if (static_key_false(&perf_swevent_enabled[event_id])) { 708 if (static_key_false(&perf_swevent_enabled[event_id])) {
689 if (!regs) { 709 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
690 perf_fetch_caller_regs(&hot_regs); 710
691 regs = &hot_regs; 711 perf_fetch_caller_regs(regs);
692 } 712 ___perf_sw_event(event_id, nr, regs, addr);
693 __perf_sw_event(event_id, nr, regs, addr);
694 } 713 }
695} 714}
696 715
@@ -706,7 +725,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
706static inline void perf_event_task_sched_out(struct task_struct *prev, 725static inline void perf_event_task_sched_out(struct task_struct *prev,
707 struct task_struct *next) 726 struct task_struct *next)
708{ 727{
709 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 728 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
710 729
711 if (static_key_false(&perf_sched_events.key)) 730 if (static_key_false(&perf_sched_events.key))
712 __perf_event_task_sched_out(prev, next); 731 __perf_event_task_sched_out(prev, next);
@@ -817,6 +836,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
817static inline void 836static inline void
818perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 837perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
819static inline void 838static inline void
839perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
840static inline void
820perf_bp_event(struct perf_event *event, void *data) { } 841perf_bp_event(struct perf_event *event, void *data) { }
821 842
822static inline int perf_register_guest_info_callbacks 843static inline int perf_register_guest_info_callbacks
@@ -893,12 +914,22 @@ struct perf_pmu_events_attr {
893 const char *event_str; 914 const char *event_str;
894}; 915};
895 916
917ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
918 char *page);
919
896#define PMU_EVENT_ATTR(_name, _var, _id, _show) \ 920#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
897static struct perf_pmu_events_attr _var = { \ 921static struct perf_pmu_events_attr _var = { \
898 .attr = __ATTR(_name, 0444, _show, NULL), \ 922 .attr = __ATTR(_name, 0444, _show, NULL), \
899 .id = _id, \ 923 .id = _id, \
900}; 924};
901 925
926#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
927static struct perf_pmu_events_attr _var = { \
928 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
929 .id = 0, \
930 .event_str = _str, \
931};
932
902#define PMU_FORMAT_ATTR(_name, _format) \ 933#define PMU_FORMAT_ATTR(_name, _format) \
903static ssize_t \ 934static ssize_t \
904_name##_show(struct device *dev, \ 935_name##_show(struct device *dev, \