diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.h')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 51 |
1 files changed, 48 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index b9698d40ac4b..8944062f46e2 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -45,6 +45,7 @@ struct event_constraint { | |||
45 | u64 code; | 45 | u64 code; |
46 | u64 cmask; | 46 | u64 cmask; |
47 | int weight; | 47 | int weight; |
48 | int overlap; | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | struct amd_nb { | 51 | struct amd_nb { |
@@ -151,15 +152,40 @@ struct cpu_hw_events { | |||
151 | void *kfree_on_online; | 152 | void *kfree_on_online; |
152 | }; | 153 | }; |
153 | 154 | ||
154 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ | 155 | #define __EVENT_CONSTRAINT(c, n, m, w, o) {\ |
155 | { .idxmsk64 = (n) }, \ | 156 | { .idxmsk64 = (n) }, \ |
156 | .code = (c), \ | 157 | .code = (c), \ |
157 | .cmask = (m), \ | 158 | .cmask = (m), \ |
158 | .weight = (w), \ | 159 | .weight = (w), \ |
160 | .overlap = (o), \ | ||
159 | } | 161 | } |
160 | 162 | ||
161 | #define EVENT_CONSTRAINT(c, n, m) \ | 163 | #define EVENT_CONSTRAINT(c, n, m) \ |
162 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) | 164 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) |
165 | |||
166 | /* | ||
167 | * The overlap flag marks event constraints with overlapping counter | ||
168 | * masks. This is the case if the counter mask of such an event is not | ||
169 | * a subset of any other counter mask of a constraint with an equal or | ||
170 | * higher weight, e.g.: | ||
171 | * | ||
172 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | ||
173 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | ||
174 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | ||
175 | * | ||
176 | * The event scheduler may not select the correct counter in the first | ||
177 | * cycle because it needs to know which subsequent events will be | ||
178 | * scheduled. It may fail to schedule the events then. So we set the | ||
179 | * overlap flag for such constraints to give the scheduler a hint which | ||
180 | * events to select for counter rescheduling. | ||
181 | * | ||
182 | * Care must be taken as the rescheduling algorithm is O(n!) which | ||
183 | * will increase scheduling cycles for an over-commited system | ||
184 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros | ||
185 | * and its counter masks must be kept at a minimum. | ||
186 | */ | ||
187 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | ||
188 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) | ||
163 | 189 | ||
164 | /* | 190 | /* |
165 | * Constraint on the Event code. | 191 | * Constraint on the Event code. |
@@ -235,6 +261,11 @@ union perf_capabilities { | |||
235 | u64 capabilities; | 261 | u64 capabilities; |
236 | }; | 262 | }; |
237 | 263 | ||
264 | struct x86_pmu_quirk { | ||
265 | struct x86_pmu_quirk *next; | ||
266 | void (*func)(void); | ||
267 | }; | ||
268 | |||
238 | /* | 269 | /* |
239 | * struct x86_pmu - generic x86 pmu | 270 | * struct x86_pmu - generic x86 pmu |
240 | */ | 271 | */ |
@@ -259,6 +290,11 @@ struct x86_pmu { | |||
259 | int num_counters_fixed; | 290 | int num_counters_fixed; |
260 | int cntval_bits; | 291 | int cntval_bits; |
261 | u64 cntval_mask; | 292 | u64 cntval_mask; |
293 | union { | ||
294 | unsigned long events_maskl; | ||
295 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | ||
296 | }; | ||
297 | int events_mask_len; | ||
262 | int apic; | 298 | int apic; |
263 | u64 max_period; | 299 | u64 max_period; |
264 | struct event_constraint * | 300 | struct event_constraint * |
@@ -268,7 +304,7 @@ struct x86_pmu { | |||
268 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 304 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
269 | struct perf_event *event); | 305 | struct perf_event *event); |
270 | struct event_constraint *event_constraints; | 306 | struct event_constraint *event_constraints; |
271 | void (*quirks)(void); | 307 | struct x86_pmu_quirk *quirks; |
272 | int perfctr_second_write; | 308 | int perfctr_second_write; |
273 | 309 | ||
274 | int (*cpu_prepare)(int cpu); | 310 | int (*cpu_prepare)(int cpu); |
@@ -309,6 +345,15 @@ struct x86_pmu { | |||
309 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 345 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
310 | }; | 346 | }; |
311 | 347 | ||
348 | #define x86_add_quirk(func_) \ | ||
349 | do { \ | ||
350 | static struct x86_pmu_quirk __quirk __initdata = { \ | ||
351 | .func = func_, \ | ||
352 | }; \ | ||
353 | __quirk.next = x86_pmu.quirks; \ | ||
354 | x86_pmu.quirks = &__quirk; \ | ||
355 | } while (0) | ||
356 | |||
312 | #define ERF_NO_HT_SHARING 1 | 357 | #define ERF_NO_HT_SHARING 1 |
313 | #define ERF_HAS_RSP_1 2 | 358 | #define ERF_HAS_RSP_1 2 |
314 | 359 | ||