diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 181 |
1 files changed, 133 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d977a2ea693..eed3673a8656 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
33 | #include <asm/smp.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #undef wrmsrl | 36 | #undef wrmsrl |
@@ -93,6 +94,8 @@ struct amd_nb { | |||
93 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | 94 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
94 | }; | 95 | }; |
95 | 96 | ||
97 | struct intel_percore; | ||
98 | |||
96 | #define MAX_LBR_ENTRIES 16 | 99 | #define MAX_LBR_ENTRIES 16 |
97 | 100 | ||
98 | struct cpu_hw_events { | 101 | struct cpu_hw_events { |
@@ -128,6 +131,13 @@ struct cpu_hw_events { | |||
128 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
129 | 132 | ||
130 | /* | 133 | /* |
134 | * Intel percore register state. | ||
135 | * Coordinate shared resources between HT threads. | ||
136 | */ | ||
137 | int percore_used; /* Used by this CPU? */ | ||
138 | struct intel_percore *per_core; | ||
139 | |||
140 | /* | ||
131 | * AMD specific bits | 141 | * AMD specific bits |
132 | */ | 142 | */ |
133 | struct amd_nb *amd_nb; | 143 | struct amd_nb *amd_nb; |
@@ -166,7 +176,7 @@ struct cpu_hw_events { | |||
166 | /* | 176 | /* |
167 | * Constraint on the Event code + UMask | 177 | * Constraint on the Event code + UMask |
168 | */ | 178 | */ |
169 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | 179 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
170 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 180 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
171 | 181 | ||
172 | #define EVENT_CONSTRAINT_END \ | 182 | #define EVENT_CONSTRAINT_END \ |
@@ -175,6 +185,28 @@ struct cpu_hw_events { | |||
175 | #define for_each_event_constraint(e, c) \ | 185 | #define for_each_event_constraint(e, c) \ |
176 | for ((e) = (c); (e)->weight; (e)++) | 186 | for ((e) = (c); (e)->weight; (e)++) |
177 | 187 | ||
188 | /* | ||
189 | * Extra registers for specific events. | ||
190 | * Some events need large masks and require external MSRs. | ||
191 | * Define a mapping to these extra registers. | ||
192 | */ | ||
193 | struct extra_reg { | ||
194 | unsigned int event; | ||
195 | unsigned int msr; | ||
196 | u64 config_mask; | ||
197 | u64 valid_mask; | ||
198 | }; | ||
199 | |||
200 | #define EVENT_EXTRA_REG(e, ms, m, vm) { \ | ||
201 | .event = (e), \ | ||
202 | .msr = (ms), \ | ||
203 | .config_mask = (m), \ | ||
204 | .valid_mask = (vm), \ | ||
205 | } | ||
206 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ | ||
207 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) | ||
208 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) | ||
209 | |||
178 | union perf_capabilities { | 210 | union perf_capabilities { |
179 | struct { | 211 | struct { |
180 | u64 lbr_format : 6; | 212 | u64 lbr_format : 6; |
@@ -219,6 +251,7 @@ struct x86_pmu { | |||
219 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 251 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
220 | struct perf_event *event); | 252 | struct perf_event *event); |
221 | struct event_constraint *event_constraints; | 253 | struct event_constraint *event_constraints; |
254 | struct event_constraint *percore_constraints; | ||
222 | void (*quirks)(void); | 255 | void (*quirks)(void); |
223 | int perfctr_second_write; | 256 | int perfctr_second_write; |
224 | 257 | ||
@@ -247,6 +280,11 @@ struct x86_pmu { | |||
247 | */ | 280 | */ |
248 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | 281 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
249 | int lbr_nr; /* hardware stack size */ | 282 | int lbr_nr; /* hardware stack size */ |
283 | |||
284 | /* | ||
285 | * Extra registers for events | ||
286 | */ | ||
287 | struct extra_reg *extra_regs; | ||
250 | }; | 288 | }; |
251 | 289 | ||
252 | static struct x86_pmu x86_pmu __read_mostly; | 290 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -271,6 +309,10 @@ static u64 __read_mostly hw_cache_event_ids | |||
271 | [PERF_COUNT_HW_CACHE_MAX] | 309 | [PERF_COUNT_HW_CACHE_MAX] |
272 | [PERF_COUNT_HW_CACHE_OP_MAX] | 310 | [PERF_COUNT_HW_CACHE_OP_MAX] |
273 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 311 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
312 | static u64 __read_mostly hw_cache_extra_regs | ||
313 | [PERF_COUNT_HW_CACHE_MAX] | ||
314 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
315 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
274 | 316 | ||
275 | /* | 317 | /* |
276 | * Propagate event elapsed time into the generic event. | 318 | * Propagate event elapsed time into the generic event. |
@@ -298,7 +340,7 @@ x86_perf_event_update(struct perf_event *event) | |||
298 | */ | 340 | */ |
299 | again: | 341 | again: |
300 | prev_raw_count = local64_read(&hwc->prev_count); | 342 | prev_raw_count = local64_read(&hwc->prev_count); |
301 | rdmsrl(hwc->event_base + idx, new_raw_count); | 343 | rdmsrl(hwc->event_base, new_raw_count); |
302 | 344 | ||
303 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 345 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
304 | new_raw_count) != prev_raw_count) | 346 | new_raw_count) != prev_raw_count) |
@@ -321,6 +363,49 @@ again: | |||
321 | return new_raw_count; | 363 | return new_raw_count; |
322 | } | 364 | } |
323 | 365 | ||
366 | /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ | ||
367 | static inline int x86_pmu_addr_offset(int index) | ||
368 | { | ||
369 | if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) | ||
370 | return index << 1; | ||
371 | return index; | ||
372 | } | ||
373 | |||
374 | static inline unsigned int x86_pmu_config_addr(int index) | ||
375 | { | ||
376 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
377 | } | ||
378 | |||
379 | static inline unsigned int x86_pmu_event_addr(int index) | ||
380 | { | ||
381 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Find and validate any extra registers to set up. | ||
386 | */ | ||
387 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | ||
388 | { | ||
389 | struct extra_reg *er; | ||
390 | |||
391 | event->hw.extra_reg = 0; | ||
392 | event->hw.extra_config = 0; | ||
393 | |||
394 | if (!x86_pmu.extra_regs) | ||
395 | return 0; | ||
396 | |||
397 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
398 | if (er->event != (config & er->config_mask)) | ||
399 | continue; | ||
400 | if (event->attr.config1 & ~er->valid_mask) | ||
401 | return -EINVAL; | ||
402 | event->hw.extra_reg = er->msr; | ||
403 | event->hw.extra_config = event->attr.config1; | ||
404 | break; | ||
405 | } | ||
406 | return 0; | ||
407 | } | ||
408 | |||
324 | static atomic_t active_events; | 409 | static atomic_t active_events; |
325 | static DEFINE_MUTEX(pmc_reserve_mutex); | 410 | static DEFINE_MUTEX(pmc_reserve_mutex); |
326 | 411 | ||
@@ -331,12 +416,12 @@ static bool reserve_pmc_hardware(void) | |||
331 | int i; | 416 | int i; |
332 | 417 | ||
333 | for (i = 0; i < x86_pmu.num_counters; i++) { | 418 | for (i = 0; i < x86_pmu.num_counters; i++) { |
334 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 419 | if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) |
335 | goto perfctr_fail; | 420 | goto perfctr_fail; |
336 | } | 421 | } |
337 | 422 | ||
338 | for (i = 0; i < x86_pmu.num_counters; i++) { | 423 | for (i = 0; i < x86_pmu.num_counters; i++) { |
339 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 424 | if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) |
340 | goto eventsel_fail; | 425 | goto eventsel_fail; |
341 | } | 426 | } |
342 | 427 | ||
@@ -344,13 +429,13 @@ static bool reserve_pmc_hardware(void) | |||
344 | 429 | ||
345 | eventsel_fail: | 430 | eventsel_fail: |
346 | for (i--; i >= 0; i--) | 431 | for (i--; i >= 0; i--) |
347 | release_evntsel_nmi(x86_pmu.eventsel + i); | 432 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
348 | 433 | ||
349 | i = x86_pmu.num_counters; | 434 | i = x86_pmu.num_counters; |
350 | 435 | ||
351 | perfctr_fail: | 436 | perfctr_fail: |
352 | for (i--; i >= 0; i--) | 437 | for (i--; i >= 0; i--) |
353 | release_perfctr_nmi(x86_pmu.perfctr + i); | 438 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
354 | 439 | ||
355 | return false; | 440 | return false; |
356 | } | 441 | } |
@@ -360,8 +445,8 @@ static void release_pmc_hardware(void) | |||
360 | int i; | 445 | int i; |
361 | 446 | ||
362 | for (i = 0; i < x86_pmu.num_counters; i++) { | 447 | for (i = 0; i < x86_pmu.num_counters; i++) { |
363 | release_perfctr_nmi(x86_pmu.perfctr + i); | 448 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
364 | release_evntsel_nmi(x86_pmu.eventsel + i); | 449 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
365 | } | 450 | } |
366 | } | 451 | } |
367 | 452 | ||
@@ -382,7 +467,7 @@ static bool check_hw_exists(void) | |||
382 | * complain and bail. | 467 | * complain and bail. |
383 | */ | 468 | */ |
384 | for (i = 0; i < x86_pmu.num_counters; i++) { | 469 | for (i = 0; i < x86_pmu.num_counters; i++) { |
385 | reg = x86_pmu.eventsel + i; | 470 | reg = x86_pmu_config_addr(i); |
386 | ret = rdmsrl_safe(reg, &val); | 471 | ret = rdmsrl_safe(reg, &val); |
387 | if (ret) | 472 | if (ret) |
388 | goto msr_fail; | 473 | goto msr_fail; |
@@ -407,20 +492,25 @@ static bool check_hw_exists(void) | |||
407 | * that don't trap on the MSR access and always return 0s. | 492 | * that don't trap on the MSR access and always return 0s. |
408 | */ | 493 | */ |
409 | val = 0xabcdUL; | 494 | val = 0xabcdUL; |
410 | ret = checking_wrmsrl(x86_pmu.perfctr, val); | 495 | ret = checking_wrmsrl(x86_pmu_event_addr(0), val); |
411 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | 496 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
412 | if (ret || val != val_new) | 497 | if (ret || val != val_new) |
413 | goto msr_fail; | 498 | goto msr_fail; |
414 | 499 | ||
415 | return true; | 500 | return true; |
416 | 501 | ||
417 | bios_fail: | 502 | bios_fail: |
418 | printk(KERN_CONT "Broken BIOS detected, using software events only.\n"); | 503 | /* |
504 | * We still allow the PMU driver to operate: | ||
505 | */ | ||
506 | printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); | ||
419 | printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); | 507 | printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); |
420 | return false; | 508 | |
509 | return true; | ||
421 | 510 | ||
422 | msr_fail: | 511 | msr_fail: |
423 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); | 512 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); |
513 | |||
424 | return false; | 514 | return false; |
425 | } | 515 | } |
426 | 516 | ||
@@ -442,8 +532,9 @@ static inline int x86_pmu_initialized(void) | |||
442 | } | 532 | } |
443 | 533 | ||
444 | static inline int | 534 | static inline int |
445 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | 535 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) |
446 | { | 536 | { |
537 | struct perf_event_attr *attr = &event->attr; | ||
447 | unsigned int cache_type, cache_op, cache_result; | 538 | unsigned int cache_type, cache_op, cache_result; |
448 | u64 config, val; | 539 | u64 config, val; |
449 | 540 | ||
@@ -470,8 +561,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | |||
470 | return -EINVAL; | 561 | return -EINVAL; |
471 | 562 | ||
472 | hwc->config |= val; | 563 | hwc->config |= val; |
473 | 564 | attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; | |
474 | return 0; | 565 | return x86_pmu_extra_regs(val, event); |
475 | } | 566 | } |
476 | 567 | ||
477 | static int x86_setup_perfctr(struct perf_event *event) | 568 | static int x86_setup_perfctr(struct perf_event *event) |
@@ -496,10 +587,10 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
496 | } | 587 | } |
497 | 588 | ||
498 | if (attr->type == PERF_TYPE_RAW) | 589 | if (attr->type == PERF_TYPE_RAW) |
499 | return 0; | 590 | return x86_pmu_extra_regs(event->attr.config, event); |
500 | 591 | ||
501 | if (attr->type == PERF_TYPE_HW_CACHE) | 592 | if (attr->type == PERF_TYPE_HW_CACHE) |
502 | return set_ext_hw_attr(hwc, attr); | 593 | return set_ext_hw_attr(hwc, event); |
503 | 594 | ||
504 | if (attr->config >= x86_pmu.max_events) | 595 | if (attr->config >= x86_pmu.max_events) |
505 | return -EINVAL; | 596 | return -EINVAL; |
@@ -617,11 +708,11 @@ static void x86_pmu_disable_all(void) | |||
617 | 708 | ||
618 | if (!test_bit(idx, cpuc->active_mask)) | 709 | if (!test_bit(idx, cpuc->active_mask)) |
619 | continue; | 710 | continue; |
620 | rdmsrl(x86_pmu.eventsel + idx, val); | 711 | rdmsrl(x86_pmu_config_addr(idx), val); |
621 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) | 712 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
622 | continue; | 713 | continue; |
623 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 714 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
624 | wrmsrl(x86_pmu.eventsel + idx, val); | 715 | wrmsrl(x86_pmu_config_addr(idx), val); |
625 | } | 716 | } |
626 | } | 717 | } |
627 | 718 | ||
@@ -642,21 +733,26 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
642 | x86_pmu.disable_all(); | 733 | x86_pmu.disable_all(); |
643 | } | 734 | } |
644 | 735 | ||
736 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
737 | u64 enable_mask) | ||
738 | { | ||
739 | if (hwc->extra_reg) | ||
740 | wrmsrl(hwc->extra_reg, hwc->extra_config); | ||
741 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
742 | } | ||
743 | |||
645 | static void x86_pmu_enable_all(int added) | 744 | static void x86_pmu_enable_all(int added) |
646 | { | 745 | { |
647 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 746 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
648 | int idx; | 747 | int idx; |
649 | 748 | ||
650 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 749 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
651 | struct perf_event *event = cpuc->events[idx]; | 750 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; |
652 | u64 val; | ||
653 | 751 | ||
654 | if (!test_bit(idx, cpuc->active_mask)) | 752 | if (!test_bit(idx, cpuc->active_mask)) |
655 | continue; | 753 | continue; |
656 | 754 | ||
657 | val = event->hw.config; | 755 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
658 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | ||
659 | wrmsrl(x86_pmu.eventsel + idx, val); | ||
660 | } | 756 | } |
661 | } | 757 | } |
662 | 758 | ||
@@ -821,15 +917,10 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
821 | hwc->event_base = 0; | 917 | hwc->event_base = 0; |
822 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { | 918 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { |
823 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | 919 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
824 | /* | 920 | hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); |
825 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
826 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
827 | */ | ||
828 | hwc->event_base = | ||
829 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
830 | } else { | 921 | } else { |
831 | hwc->config_base = x86_pmu.eventsel; | 922 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
832 | hwc->event_base = x86_pmu.perfctr; | 923 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
833 | } | 924 | } |
834 | } | 925 | } |
835 | 926 | ||
@@ -915,17 +1006,11 @@ static void x86_pmu_enable(struct pmu *pmu) | |||
915 | x86_pmu.enable_all(added); | 1006 | x86_pmu.enable_all(added); |
916 | } | 1007 | } |
917 | 1008 | ||
918 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
919 | u64 enable_mask) | ||
920 | { | ||
921 | wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask); | ||
922 | } | ||
923 | |||
924 | static inline void x86_pmu_disable_event(struct perf_event *event) | 1009 | static inline void x86_pmu_disable_event(struct perf_event *event) |
925 | { | 1010 | { |
926 | struct hw_perf_event *hwc = &event->hw; | 1011 | struct hw_perf_event *hwc = &event->hw; |
927 | 1012 | ||
928 | wrmsrl(hwc->config_base + hwc->idx, hwc->config); | 1013 | wrmsrl(hwc->config_base, hwc->config); |
929 | } | 1014 | } |
930 | 1015 | ||
931 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 1016 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -978,7 +1063,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
978 | */ | 1063 | */ |
979 | local64_set(&hwc->prev_count, (u64)-left); | 1064 | local64_set(&hwc->prev_count, (u64)-left); |
980 | 1065 | ||
981 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); | 1066 | wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); |
982 | 1067 | ||
983 | /* | 1068 | /* |
984 | * Due to erratum on certan cpu we need | 1069 | * Due to erratum on certan cpu we need |
@@ -986,7 +1071,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
986 | * is updated properly | 1071 | * is updated properly |
987 | */ | 1072 | */ |
988 | if (x86_pmu.perfctr_second_write) { | 1073 | if (x86_pmu.perfctr_second_write) { |
989 | wrmsrl(hwc->event_base + idx, | 1074 | wrmsrl(hwc->event_base, |
990 | (u64)(-left) & x86_pmu.cntval_mask); | 1075 | (u64)(-left) & x86_pmu.cntval_mask); |
991 | } | 1076 | } |
992 | 1077 | ||
@@ -1029,7 +1114,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) | |||
1029 | 1114 | ||
1030 | /* | 1115 | /* |
1031 | * If group events scheduling transaction was started, | 1116 | * If group events scheduling transaction was started, |
1032 | * skip the schedulability test here, it will be peformed | 1117 | * skip the schedulability test here, it will be performed |
1033 | * at commit time (->commit_txn) as a whole | 1118 | * at commit time (->commit_txn) as a whole |
1034 | */ | 1119 | */ |
1035 | if (cpuc->group_flag & PERF_EVENT_TXN) | 1120 | if (cpuc->group_flag & PERF_EVENT_TXN) |
@@ -1113,8 +1198,8 @@ void perf_event_print_debug(void) | |||
1113 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); | 1198 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); |
1114 | 1199 | ||
1115 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1200 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1116 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1201 | rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); |
1117 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1202 | rdmsrl(x86_pmu_event_addr(idx), pmc_count); |
1118 | 1203 | ||
1119 | prev_left = per_cpu(pmc_prev_left[idx], cpu); | 1204 | prev_left = per_cpu(pmc_prev_left[idx], cpu); |
1120 | 1205 | ||
@@ -1389,7 +1474,7 @@ static void __init pmu_check_apic(void) | |||
1389 | pr_info("no hardware sampling interrupt available.\n"); | 1474 | pr_info("no hardware sampling interrupt available.\n"); |
1390 | } | 1475 | } |
1391 | 1476 | ||
1392 | int __init init_hw_perf_events(void) | 1477 | static int __init init_hw_perf_events(void) |
1393 | { | 1478 | { |
1394 | struct event_constraint *c; | 1479 | struct event_constraint *c; |
1395 | int err; | 1480 | int err; |
@@ -1608,7 +1693,7 @@ out: | |||
1608 | return ret; | 1693 | return ret; |
1609 | } | 1694 | } |
1610 | 1695 | ||
1611 | int x86_pmu_event_init(struct perf_event *event) | 1696 | static int x86_pmu_event_init(struct perf_event *event) |
1612 | { | 1697 | { |
1613 | struct pmu *tmp; | 1698 | struct pmu *tmp; |
1614 | int err; | 1699 | int err; |
@@ -1710,7 +1795,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1710 | 1795 | ||
1711 | perf_callchain_store(entry, regs->ip); | 1796 | perf_callchain_store(entry, regs->ip); |
1712 | 1797 | ||
1713 | dump_trace(NULL, regs, NULL, &backtrace_ops, entry); | 1798 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
1714 | } | 1799 | } |
1715 | 1800 | ||
1716 | #ifdef CONFIG_COMPAT | 1801 | #ifdef CONFIG_COMPAT |