diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_amd.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 322 |
1 files changed, 246 insertions, 76 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c93bc4e813a0..dfdab42aed27 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event) | |||
132 | return amd_perfmon_event_map[hw_event]; | 132 | return amd_perfmon_event_map[hw_event]; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int amd_pmu_hw_config(struct perf_event *event) | 135 | static struct event_constraint *amd_nb_event_constraint; |
136 | |||
137 | /* | ||
138 | * Previously calculated offsets | ||
139 | */ | ||
140 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | ||
141 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | ||
142 | static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; | ||
143 | |||
144 | /* | ||
145 | * Legacy CPUs: | ||
146 | * 4 counters starting at 0xc0010000 each offset by 1 | ||
147 | * | ||
148 | * CPUs with core performance counter extensions: | ||
149 | * 6 counters starting at 0xc0010200 each offset by 2 | ||
150 | * | ||
151 | * CPUs with north bridge performance counter extensions: | ||
152 | * 4 additional counters starting at 0xc0010240 each offset by 2 | ||
153 | * (indexed right above either one of the above core counters) | ||
154 | */ | ||
155 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | ||
136 | { | 156 | { |
137 | int ret; | 157 | int offset, first, base; |
138 | 158 | ||
139 | /* pass precise event sampling to ibs: */ | 159 | if (!index) |
140 | if (event->attr.precise_ip && get_ibs_caps()) | 160 | return index; |
141 | return -ENOENT; | 161 | |
162 | if (eventsel) | ||
163 | offset = event_offsets[index]; | ||
164 | else | ||
165 | offset = count_offsets[index]; | ||
166 | |||
167 | if (offset) | ||
168 | return offset; | ||
169 | |||
170 | if (amd_nb_event_constraint && | ||
171 | test_bit(index, amd_nb_event_constraint->idxmsk)) { | ||
172 | /* | ||
173 | * calculate the offset of NB counters with respect to | ||
174 | * base eventsel or perfctr | ||
175 | */ | ||
176 | |||
177 | first = find_first_bit(amd_nb_event_constraint->idxmsk, | ||
178 | X86_PMC_IDX_MAX); | ||
179 | |||
180 | if (eventsel) | ||
181 | base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel; | ||
182 | else | ||
183 | base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr; | ||
184 | |||
185 | offset = base + ((index - first) << 1); | ||
186 | } else if (!cpu_has_perfctr_core) | ||
187 | offset = index; | ||
188 | else | ||
189 | offset = index << 1; | ||
190 | |||
191 | if (eventsel) | ||
192 | event_offsets[index] = offset; | ||
193 | else | ||
194 | count_offsets[index] = offset; | ||
195 | |||
196 | return offset; | ||
197 | } | ||
198 | |||
199 | static inline int amd_pmu_rdpmc_index(int index) | ||
200 | { | ||
201 | int ret, first; | ||
202 | |||
203 | if (!index) | ||
204 | return index; | ||
205 | |||
206 | ret = rdpmc_indexes[index]; | ||
142 | 207 | ||
143 | ret = x86_pmu_hw_config(event); | ||
144 | if (ret) | 208 | if (ret) |
145 | return ret; | 209 | return ret; |
146 | 210 | ||
147 | if (has_branch_stack(event)) | 211 | if (amd_nb_event_constraint && |
148 | return -EOPNOTSUPP; | 212 | test_bit(index, amd_nb_event_constraint->idxmsk)) { |
213 | /* | ||
214 | * according to the mnual, ECX value of the NB counters is | ||
215 | * the index of the NB counter (0, 1, 2 or 3) plus 6 | ||
216 | */ | ||
217 | |||
218 | first = find_first_bit(amd_nb_event_constraint->idxmsk, | ||
219 | X86_PMC_IDX_MAX); | ||
220 | ret = index - first + 6; | ||
221 | } else | ||
222 | ret = index; | ||
223 | |||
224 | rdpmc_indexes[index] = ret; | ||
225 | |||
226 | return ret; | ||
227 | } | ||
149 | 228 | ||
229 | static int amd_core_hw_config(struct perf_event *event) | ||
230 | { | ||
150 | if (event->attr.exclude_host && event->attr.exclude_guest) | 231 | if (event->attr.exclude_host && event->attr.exclude_guest) |
151 | /* | 232 | /* |
152 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | 233 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 |
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
156 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | 237 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | |
157 | ARCH_PERFMON_EVENTSEL_OS); | 238 | ARCH_PERFMON_EVENTSEL_OS); |
158 | else if (event->attr.exclude_host) | 239 | else if (event->attr.exclude_host) |
159 | event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; | 240 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
160 | else if (event->attr.exclude_guest) | 241 | else if (event->attr.exclude_guest) |
161 | event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; | 242 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * NB counters do not support the following event select bits: | ||
249 | * Host/Guest only | ||
250 | * Counter mask | ||
251 | * Invert counter mask | ||
252 | * Edge detect | ||
253 | * OS/User mode | ||
254 | */ | ||
255 | static int amd_nb_hw_config(struct perf_event *event) | ||
256 | { | ||
257 | /* for NB, we only allow system wide counting mode */ | ||
258 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
259 | return -EINVAL; | ||
260 | |||
261 | if (event->attr.exclude_user || event->attr.exclude_kernel || | ||
262 | event->attr.exclude_host || event->attr.exclude_guest) | ||
263 | return -EINVAL; | ||
162 | 264 | ||
163 | if (event->attr.type != PERF_TYPE_RAW) | 265 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | |
164 | return 0; | 266 | ARCH_PERFMON_EVENTSEL_OS); |
165 | 267 | ||
166 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | 268 | if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB | |
269 | ARCH_PERFMON_EVENTSEL_INT)) | ||
270 | return -EINVAL; | ||
167 | 271 | ||
168 | return 0; | 272 | return 0; |
169 | } | 273 | } |
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
181 | return (hwc->config & 0xe0) == 0xe0; | 285 | return (hwc->config & 0xe0) == 0xe0; |
182 | } | 286 | } |
183 | 287 | ||
288 | static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc) | ||
289 | { | ||
290 | return amd_nb_event_constraint && amd_is_nb_event(hwc); | ||
291 | } | ||
292 | |||
184 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | 293 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
185 | { | 294 | { |
186 | struct amd_nb *nb = cpuc->amd_nb; | 295 | struct amd_nb *nb = cpuc->amd_nb; |
@@ -188,20 +297,37 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc) | |||
188 | return nb && nb->nb_id != -1; | 297 | return nb && nb->nb_id != -1; |
189 | } | 298 | } |
190 | 299 | ||
191 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 300 | static int amd_pmu_hw_config(struct perf_event *event) |
192 | struct perf_event *event) | 301 | { |
302 | int ret; | ||
303 | |||
304 | /* pass precise event sampling to ibs: */ | ||
305 | if (event->attr.precise_ip && get_ibs_caps()) | ||
306 | return -ENOENT; | ||
307 | |||
308 | if (has_branch_stack(event)) | ||
309 | return -EOPNOTSUPP; | ||
310 | |||
311 | ret = x86_pmu_hw_config(event); | ||
312 | if (ret) | ||
313 | return ret; | ||
314 | |||
315 | if (event->attr.type == PERF_TYPE_RAW) | ||
316 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | ||
317 | |||
318 | if (amd_is_perfctr_nb_event(&event->hw)) | ||
319 | return amd_nb_hw_config(event); | ||
320 | |||
321 | return amd_core_hw_config(event); | ||
322 | } | ||
323 | |||
324 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, | ||
325 | struct perf_event *event) | ||
193 | { | 326 | { |
194 | struct hw_perf_event *hwc = &event->hw; | ||
195 | struct amd_nb *nb = cpuc->amd_nb; | 327 | struct amd_nb *nb = cpuc->amd_nb; |
196 | int i; | 328 | int i; |
197 | 329 | ||
198 | /* | 330 | /* |
199 | * only care about NB events | ||
200 | */ | ||
201 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) | ||
202 | return; | ||
203 | |||
204 | /* | ||
205 | * need to scan whole list because event may not have | 331 | * need to scan whole list because event may not have |
206 | * been assigned during scheduling | 332 | * been assigned during scheduling |
207 | * | 333 | * |
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
215 | } | 341 | } |
216 | } | 342 | } |
217 | 343 | ||
344 | static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc) | ||
345 | { | ||
346 | int core_id = cpu_data(smp_processor_id()).cpu_core_id; | ||
347 | |||
348 | /* deliver interrupts only to this core */ | ||
349 | if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) { | ||
350 | hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE; | ||
351 | hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK; | ||
352 | hwc->config |= (u64)(core_id) << | ||
353 | AMD64_EVENTSEL_INT_CORE_SEL_SHIFT; | ||
354 | } | ||
355 | } | ||
356 | |||
218 | /* | 357 | /* |
219 | * AMD64 NorthBridge events need special treatment because | 358 | * AMD64 NorthBridge events need special treatment because |
220 | * counter access needs to be synchronized across all cores | 359 | * counter access needs to be synchronized across all cores |
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
247 | * | 386 | * |
248 | * Given that resources are allocated (cmpxchg), they must be | 387 | * Given that resources are allocated (cmpxchg), they must be |
249 | * eventually freed for others to use. This is accomplished by | 388 | * eventually freed for others to use. This is accomplished by |
250 | * calling amd_put_event_constraints(). | 389 | * calling __amd_put_nb_event_constraints() |
251 | * | 390 | * |
252 | * Non NB events are not impacted by this restriction. | 391 | * Non NB events are not impacted by this restriction. |
253 | */ | 392 | */ |
254 | static struct event_constraint * | 393 | static struct event_constraint * |
255 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 394 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
395 | struct event_constraint *c) | ||
256 | { | 396 | { |
257 | struct hw_perf_event *hwc = &event->hw; | 397 | struct hw_perf_event *hwc = &event->hw; |
258 | struct amd_nb *nb = cpuc->amd_nb; | 398 | struct amd_nb *nb = cpuc->amd_nb; |
259 | struct perf_event *old = NULL; | 399 | struct perf_event *old; |
260 | int max = x86_pmu.num_counters; | 400 | int idx, new = -1; |
261 | int i, j, k = -1; | ||
262 | 401 | ||
263 | /* | 402 | if (!c) |
264 | * if not NB event or no NB, then no constraints | 403 | c = &unconstrained; |
265 | */ | 404 | |
266 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) | 405 | if (cpuc->is_fake) |
267 | return &unconstrained; | 406 | return c; |
268 | 407 | ||
269 | /* | 408 | /* |
270 | * detect if already present, if so reuse | 409 | * detect if already present, if so reuse |
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
276 | * because of successive calls to x86_schedule_events() from | 415 | * because of successive calls to x86_schedule_events() from |
277 | * hw_perf_group_sched_in() without hw_perf_enable() | 416 | * hw_perf_group_sched_in() without hw_perf_enable() |
278 | */ | 417 | */ |
279 | for (i = 0; i < max; i++) { | 418 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
280 | /* | 419 | if (new == -1 || hwc->idx == idx) |
281 | * keep track of first free slot | 420 | /* assign free slot, prefer hwc->idx */ |
282 | */ | 421 | old = cmpxchg(nb->owners + idx, NULL, event); |
283 | if (k == -1 && !nb->owners[i]) | 422 | else if (nb->owners[idx] == event) |
284 | k = i; | 423 | /* event already present */ |
424 | old = event; | ||
425 | else | ||
426 | continue; | ||
427 | |||
428 | if (old && old != event) | ||
429 | continue; | ||
430 | |||
431 | /* reassign to this slot */ | ||
432 | if (new != -1) | ||
433 | cmpxchg(nb->owners + new, event, NULL); | ||
434 | new = idx; | ||
285 | 435 | ||
286 | /* already present, reuse */ | 436 | /* already present, reuse */ |
287 | if (nb->owners[i] == event) | 437 | if (old == event) |
288 | goto done; | ||
289 | } | ||
290 | /* | ||
291 | * not present, so grab a new slot | ||
292 | * starting either at: | ||
293 | */ | ||
294 | if (hwc->idx != -1) { | ||
295 | /* previous assignment */ | ||
296 | i = hwc->idx; | ||
297 | } else if (k != -1) { | ||
298 | /* start from free slot found */ | ||
299 | i = k; | ||
300 | } else { | ||
301 | /* | ||
302 | * event not found, no slot found in | ||
303 | * first pass, try again from the | ||
304 | * beginning | ||
305 | */ | ||
306 | i = 0; | ||
307 | } | ||
308 | j = i; | ||
309 | do { | ||
310 | old = cmpxchg(nb->owners+i, NULL, event); | ||
311 | if (!old) | ||
312 | break; | 438 | break; |
313 | if (++i == max) | 439 | } |
314 | i = 0; | 440 | |
315 | } while (i != j); | 441 | if (new == -1) |
316 | done: | 442 | return &emptyconstraint; |
317 | if (!old) | 443 | |
318 | return &nb->event_constraints[i]; | 444 | if (amd_is_perfctr_nb_event(hwc)) |
319 | 445 | amd_nb_interrupt_hw_config(hwc); | |
320 | return &emptyconstraint; | 446 | |
447 | return &nb->event_constraints[new]; | ||
321 | } | 448 | } |
322 | 449 | ||
323 | static struct amd_nb *amd_alloc_nb(int cpu) | 450 | static struct amd_nb *amd_alloc_nb(int cpu) |
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu) | |||
364 | struct amd_nb *nb; | 491 | struct amd_nb *nb; |
365 | int i, nb_id; | 492 | int i, nb_id; |
366 | 493 | ||
367 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | 494 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
368 | 495 | ||
369 | if (boot_cpu_data.x86_max_cores < 2) | 496 | if (boot_cpu_data.x86_max_cores < 2) |
370 | return; | 497 | return; |
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu) | |||
407 | } | 534 | } |
408 | } | 535 | } |
409 | 536 | ||
537 | static struct event_constraint * | ||
538 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
539 | { | ||
540 | /* | ||
541 | * if not NB event or no NB, then no constraints | ||
542 | */ | ||
543 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | ||
544 | return &unconstrained; | ||
545 | |||
546 | return __amd_get_nb_event_constraints(cpuc, event, | ||
547 | amd_nb_event_constraint); | ||
548 | } | ||
549 | |||
550 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | ||
551 | struct perf_event *event) | ||
552 | { | ||
553 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | ||
554 | __amd_put_nb_event_constraints(cpuc, event); | ||
555 | } | ||
556 | |||
410 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); | 557 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
411 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 558 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
412 | PMU_FORMAT_ATTR(edge, "config:18" ); | 559 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, | |||
496 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | 643 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
497 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | 644 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); |
498 | 645 | ||
646 | static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0); | ||
647 | static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0); | ||
648 | |||
499 | static struct event_constraint * | 649 | static struct event_constraint * |
500 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | 650 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) |
501 | { | 651 | { |
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev | |||
561 | return &amd_f15_PMC20; | 711 | return &amd_f15_PMC20; |
562 | } | 712 | } |
563 | case AMD_EVENT_NB: | 713 | case AMD_EVENT_NB: |
564 | /* not yet implemented */ | 714 | return __amd_get_nb_event_constraints(cpuc, event, |
565 | return &emptyconstraint; | 715 | amd_nb_event_constraint); |
566 | default: | 716 | default: |
567 | return &emptyconstraint; | 717 | return &emptyconstraint; |
568 | } | 718 | } |
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
587 | .schedule_events = x86_schedule_events, | 737 | .schedule_events = x86_schedule_events, |
588 | .eventsel = MSR_K7_EVNTSEL0, | 738 | .eventsel = MSR_K7_EVNTSEL0, |
589 | .perfctr = MSR_K7_PERFCTR0, | 739 | .perfctr = MSR_K7_PERFCTR0, |
740 | .addr_offset = amd_pmu_addr_offset, | ||
741 | .rdpmc_index = amd_pmu_rdpmc_index, | ||
590 | .event_map = amd_pmu_event_map, | 742 | .event_map = amd_pmu_event_map, |
591 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | 743 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
592 | .num_counters = AMD64_NUM_COUNTERS, | 744 | .num_counters = AMD64_NUM_COUNTERS, |
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
608 | 760 | ||
609 | static int setup_event_constraints(void) | 761 | static int setup_event_constraints(void) |
610 | { | 762 | { |
611 | if (boot_cpu_data.x86 >= 0x15) | 763 | if (boot_cpu_data.x86 == 0x15) |
612 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; | 764 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
613 | return 0; | 765 | return 0; |
614 | } | 766 | } |
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void) | |||
638 | return 0; | 790 | return 0; |
639 | } | 791 | } |
640 | 792 | ||
793 | static int setup_perfctr_nb(void) | ||
794 | { | ||
795 | if (!cpu_has_perfctr_nb) | ||
796 | return -ENODEV; | ||
797 | |||
798 | x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB; | ||
799 | |||
800 | if (cpu_has_perfctr_core) | ||
801 | amd_nb_event_constraint = &amd_NBPMC96; | ||
802 | else | ||
803 | amd_nb_event_constraint = &amd_NBPMC74; | ||
804 | |||
805 | printk(KERN_INFO "perf: AMD northbridge performance counters detected\n"); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | |||
641 | __init int amd_pmu_init(void) | 810 | __init int amd_pmu_init(void) |
642 | { | 811 | { |
643 | /* Performance-monitoring supported from K7 and later: */ | 812 | /* Performance-monitoring supported from K7 and later: */ |
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void) | |||
648 | 817 | ||
649 | setup_event_constraints(); | 818 | setup_event_constraints(); |
650 | setup_perfctr_core(); | 819 | setup_perfctr_core(); |
820 | setup_perfctr_nb(); | ||
651 | 821 | ||
652 | /* Events are common for all AMDs */ | 822 | /* Events are common for all AMDs */ |
653 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 823 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void) | |||
678 | * SVM is disabled the Guest-only bits still gets set and the counter | 848 | * SVM is disabled the Guest-only bits still gets set and the counter |
679 | * will not count anything. | 849 | * will not count anything. |
680 | */ | 850 | */ |
681 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | 851 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
682 | 852 | ||
683 | /* Reload all events */ | 853 | /* Reload all events */ |
684 | x86_pmu_disable_all(); | 854 | x86_pmu_disable_all(); |