diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 37 | ||||
-rw-r--r-- | include/linux/perf_event.h | 24 |
2 files changed, 19 insertions, 42 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 66de282ad2fb..fdbe24842271 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -93,24 +93,19 @@ struct cpu_hw_events { | |||
93 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | 93 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #define EVENT_CONSTRAINT(c, n, m) { \ | 96 | #define EVENT_CONSTRAINT(c, n, m) { \ |
97 | { .idxmsk64[0] = (n) }, \ | 97 | { .idxmsk64[0] = (n) }, \ |
98 | .code = (c), \ | 98 | .code = (c), \ |
99 | .cmask = (m), \ | 99 | .cmask = (m), \ |
100 | .weight = HWEIGHT64((u64)(n)), \ | 100 | .weight = HWEIGHT64((u64)(n)), \ |
101 | } | 101 | } |
102 | 102 | ||
103 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | 103 | #define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
104 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 104 | #define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) |
105 | 105 | ||
106 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | 106 | #define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0) |
107 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) | ||
108 | 107 | ||
109 | #define EVENT_CONSTRAINT_END \ | 108 | #define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++) |
110 | EVENT_CONSTRAINT(0, 0, 0) | ||
111 | |||
112 | #define for_each_event_constraint(e, c) \ | ||
113 | for ((e) = (c); (e)->cmask; (e)++) | ||
114 | 109 | ||
115 | /* | 110 | /* |
116 | * struct x86_pmu - generic x86 pmu | 111 | * struct x86_pmu - generic x86 pmu |
@@ -1276,14 +1271,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
1276 | if (test_bit(hwc->idx, used_mask)) | 1271 | if (test_bit(hwc->idx, used_mask)) |
1277 | break; | 1272 | break; |
1278 | 1273 | ||
1279 | #if 0 | ||
1280 | pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n", | ||
1281 | smp_processor_id(), | ||
1282 | hwc->config, | ||
1283 | hwc->idx, | ||
1284 | assign ? 'y' : 'n'); | ||
1285 | #endif | ||
1286 | |||
1287 | set_bit(hwc->idx, used_mask); | 1274 | set_bit(hwc->idx, used_mask); |
1288 | if (assign) | 1275 | if (assign) |
1289 | assign[i] = hwc->idx; | 1276 | assign[i] = hwc->idx; |
@@ -1333,14 +1320,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
1333 | if (j == X86_PMC_IDX_MAX) | 1320 | if (j == X86_PMC_IDX_MAX) |
1334 | break; | 1321 | break; |
1335 | 1322 | ||
1336 | #if 0 | ||
1337 | pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n", | ||
1338 | smp_processor_id(), | ||
1339 | hwc->config, | ||
1340 | j, | ||
1341 | assign ? 'y' : 'n'); | ||
1342 | #endif | ||
1343 | |||
1344 | set_bit(j, used_mask); | 1323 | set_bit(j, used_mask); |
1345 | 1324 | ||
1346 | if (assign) | 1325 | if (assign) |
@@ -2596,9 +2575,9 @@ static const struct pmu pmu = { | |||
2596 | * validate a single event group | 2575 | * validate a single event group |
2597 | * | 2576 | * |
2598 | * validation include: | 2577 | * validation include: |
2599 | * - check events are compatible which each other | 2578 | * - check events are compatible which each other |
2600 | * - events do not compete for the same counter | 2579 | * - events do not compete for the same counter |
2601 | * - number of events <= number of counters | 2580 | * - number of events <= number of counters |
2602 | * | 2581 | * |
2603 | * validation ensures the group can be loaded onto the | 2582 | * validation ensures the group can be loaded onto the |
2604 | * PMU if it was the only group available. | 2583 | * PMU if it was the only group available. |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 72b2615600d8..953c17731e0d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -290,7 +290,7 @@ struct perf_event_mmap_page { | |||
290 | }; | 290 | }; |
291 | 291 | ||
292 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | 292 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) |
293 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 293 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) |
294 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 294 | #define PERF_RECORD_MISC_KERNEL (1 << 0) |
295 | #define PERF_RECORD_MISC_USER (2 << 0) | 295 | #define PERF_RECORD_MISC_USER (2 << 0) |
296 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 296 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) |
@@ -356,8 +356,8 @@ enum perf_event_type { | |||
356 | * u64 stream_id; | 356 | * u64 stream_id; |
357 | * }; | 357 | * }; |
358 | */ | 358 | */ |
359 | PERF_RECORD_THROTTLE = 5, | 359 | PERF_RECORD_THROTTLE = 5, |
360 | PERF_RECORD_UNTHROTTLE = 6, | 360 | PERF_RECORD_UNTHROTTLE = 6, |
361 | 361 | ||
362 | /* | 362 | /* |
363 | * struct { | 363 | * struct { |
@@ -371,10 +371,10 @@ enum perf_event_type { | |||
371 | 371 | ||
372 | /* | 372 | /* |
373 | * struct { | 373 | * struct { |
374 | * struct perf_event_header header; | 374 | * struct perf_event_header header; |
375 | * u32 pid, tid; | 375 | * u32 pid, tid; |
376 | * | 376 | * |
377 | * struct read_format values; | 377 | * struct read_format values; |
378 | * }; | 378 | * }; |
379 | */ | 379 | */ |
380 | PERF_RECORD_READ = 8, | 380 | PERF_RECORD_READ = 8, |
@@ -412,7 +412,7 @@ enum perf_event_type { | |||
412 | * char data[size];}&& PERF_SAMPLE_RAW | 412 | * char data[size];}&& PERF_SAMPLE_RAW |
413 | * }; | 413 | * }; |
414 | */ | 414 | */ |
415 | PERF_RECORD_SAMPLE = 9, | 415 | PERF_RECORD_SAMPLE = 9, |
416 | 416 | ||
417 | PERF_RECORD_MAX, /* non-ABI */ | 417 | PERF_RECORD_MAX, /* non-ABI */ |
418 | }; | 418 | }; |
@@ -752,8 +752,7 @@ extern int perf_max_events; | |||
752 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 752 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
753 | 753 | ||
754 | extern void perf_event_task_sched_in(struct task_struct *task); | 754 | extern void perf_event_task_sched_in(struct task_struct *task); |
755 | extern void perf_event_task_sched_out(struct task_struct *task, | 755 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
756 | struct task_struct *next); | ||
757 | extern void perf_event_task_tick(struct task_struct *task); | 756 | extern void perf_event_task_tick(struct task_struct *task); |
758 | extern int perf_event_init_task(struct task_struct *child); | 757 | extern int perf_event_init_task(struct task_struct *child); |
759 | extern void perf_event_exit_task(struct task_struct *child); | 758 | extern void perf_event_exit_task(struct task_struct *child); |
@@ -853,8 +852,7 @@ extern int sysctl_perf_event_mlock; | |||
853 | extern int sysctl_perf_event_sample_rate; | 852 | extern int sysctl_perf_event_sample_rate; |
854 | 853 | ||
855 | extern void perf_event_init(void); | 854 | extern void perf_event_init(void); |
856 | extern void perf_tp_event(int event_id, u64 addr, u64 count, | 855 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); |
857 | void *record, int entry_size); | ||
858 | extern void perf_bp_event(struct perf_event *event, void *data); | 856 | extern void perf_bp_event(struct perf_event *event, void *data); |
859 | 857 | ||
860 | #ifndef perf_misc_flags | 858 | #ifndef perf_misc_flags |
@@ -895,13 +893,13 @@ static inline void | |||
895 | perf_sw_event(u32 event_id, u64 nr, int nmi, | 893 | perf_sw_event(u32 event_id, u64 nr, int nmi, |
896 | struct pt_regs *regs, u64 addr) { } | 894 | struct pt_regs *regs, u64 addr) { } |
897 | static inline void | 895 | static inline void |
898 | perf_bp_event(struct perf_event *event, void *data) { } | 896 | perf_bp_event(struct perf_event *event, void *data) { } |
899 | 897 | ||
900 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 898 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
901 | static inline void perf_event_comm(struct task_struct *tsk) { } | 899 | static inline void perf_event_comm(struct task_struct *tsk) { } |
902 | static inline void perf_event_fork(struct task_struct *tsk) { } | 900 | static inline void perf_event_fork(struct task_struct *tsk) { } |
903 | static inline void perf_event_init(void) { } | 901 | static inline void perf_event_init(void) { } |
904 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | 902 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
905 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 903 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
906 | static inline void perf_event_enable(struct perf_event *event) { } | 904 | static inline void perf_event_enable(struct perf_event *event) { } |
907 | static inline void perf_event_disable(struct perf_event *event) { } | 905 | static inline void perf_event_disable(struct perf_event *event) { } |