diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 172 |
1 files changed, 126 insertions, 46 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d977a2ea693..87eab4a27dfc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
33 | #include <asm/smp.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #undef wrmsrl | 36 | #undef wrmsrl |
@@ -93,6 +94,8 @@ struct amd_nb { | |||
93 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | 94 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
94 | }; | 95 | }; |
95 | 96 | ||
97 | struct intel_percore; | ||
98 | |||
96 | #define MAX_LBR_ENTRIES 16 | 99 | #define MAX_LBR_ENTRIES 16 |
97 | 100 | ||
98 | struct cpu_hw_events { | 101 | struct cpu_hw_events { |
@@ -128,6 +131,13 @@ struct cpu_hw_events { | |||
128 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
129 | 132 | ||
130 | /* | 133 | /* |
134 | * Intel percore register state. | ||
135 | * Coordinate shared resources between HT threads. | ||
136 | */ | ||
137 | int percore_used; /* Used by this CPU? */ | ||
138 | struct intel_percore *per_core; | ||
139 | |||
140 | /* | ||
131 | * AMD specific bits | 141 | * AMD specific bits |
132 | */ | 142 | */ |
133 | struct amd_nb *amd_nb; | 143 | struct amd_nb *amd_nb; |
@@ -166,7 +176,7 @@ struct cpu_hw_events { | |||
166 | /* | 176 | /* |
167 | * Constraint on the Event code + UMask | 177 | * Constraint on the Event code + UMask |
168 | */ | 178 | */ |
169 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | 179 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
170 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 180 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
171 | 181 | ||
172 | #define EVENT_CONSTRAINT_END \ | 182 | #define EVENT_CONSTRAINT_END \ |
@@ -175,6 +185,28 @@ struct cpu_hw_events { | |||
175 | #define for_each_event_constraint(e, c) \ | 185 | #define for_each_event_constraint(e, c) \ |
176 | for ((e) = (c); (e)->weight; (e)++) | 186 | for ((e) = (c); (e)->weight; (e)++) |
177 | 187 | ||
188 | /* | ||
189 | * Extra registers for specific events. | ||
190 | * Some events need large masks and require external MSRs. | ||
191 | * Define a mapping to these extra registers. | ||
192 | */ | ||
193 | struct extra_reg { | ||
194 | unsigned int event; | ||
195 | unsigned int msr; | ||
196 | u64 config_mask; | ||
197 | u64 valid_mask; | ||
198 | }; | ||
199 | |||
200 | #define EVENT_EXTRA_REG(e, ms, m, vm) { \ | ||
201 | .event = (e), \ | ||
202 | .msr = (ms), \ | ||
203 | .config_mask = (m), \ | ||
204 | .valid_mask = (vm), \ | ||
205 | } | ||
206 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ | ||
207 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) | ||
208 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) | ||
209 | |||
178 | union perf_capabilities { | 210 | union perf_capabilities { |
179 | struct { | 211 | struct { |
180 | u64 lbr_format : 6; | 212 | u64 lbr_format : 6; |
@@ -219,6 +251,7 @@ struct x86_pmu { | |||
219 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 251 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
220 | struct perf_event *event); | 252 | struct perf_event *event); |
221 | struct event_constraint *event_constraints; | 253 | struct event_constraint *event_constraints; |
254 | struct event_constraint *percore_constraints; | ||
222 | void (*quirks)(void); | 255 | void (*quirks)(void); |
223 | int perfctr_second_write; | 256 | int perfctr_second_write; |
224 | 257 | ||
@@ -247,6 +280,11 @@ struct x86_pmu { | |||
247 | */ | 280 | */ |
248 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | 281 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
249 | int lbr_nr; /* hardware stack size */ | 282 | int lbr_nr; /* hardware stack size */ |
283 | |||
284 | /* | ||
285 | * Extra registers for events | ||
286 | */ | ||
287 | struct extra_reg *extra_regs; | ||
250 | }; | 288 | }; |
251 | 289 | ||
252 | static struct x86_pmu x86_pmu __read_mostly; | 290 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -271,6 +309,10 @@ static u64 __read_mostly hw_cache_event_ids | |||
271 | [PERF_COUNT_HW_CACHE_MAX] | 309 | [PERF_COUNT_HW_CACHE_MAX] |
272 | [PERF_COUNT_HW_CACHE_OP_MAX] | 310 | [PERF_COUNT_HW_CACHE_OP_MAX] |
273 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 311 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
312 | static u64 __read_mostly hw_cache_extra_regs | ||
313 | [PERF_COUNT_HW_CACHE_MAX] | ||
314 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
315 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
274 | 316 | ||
275 | /* | 317 | /* |
276 | * Propagate event elapsed time into the generic event. | 318 | * Propagate event elapsed time into the generic event. |
@@ -298,7 +340,7 @@ x86_perf_event_update(struct perf_event *event) | |||
298 | */ | 340 | */ |
299 | again: | 341 | again: |
300 | prev_raw_count = local64_read(&hwc->prev_count); | 342 | prev_raw_count = local64_read(&hwc->prev_count); |
301 | rdmsrl(hwc->event_base + idx, new_raw_count); | 343 | rdmsrl(hwc->event_base, new_raw_count); |
302 | 344 | ||
303 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 345 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
304 | new_raw_count) != prev_raw_count) | 346 | new_raw_count) != prev_raw_count) |
@@ -321,6 +363,49 @@ again: | |||
321 | return new_raw_count; | 363 | return new_raw_count; |
322 | } | 364 | } |
323 | 365 | ||
366 | /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ | ||
367 | static inline int x86_pmu_addr_offset(int index) | ||
368 | { | ||
369 | if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) | ||
370 | return index << 1; | ||
371 | return index; | ||
372 | } | ||
373 | |||
374 | static inline unsigned int x86_pmu_config_addr(int index) | ||
375 | { | ||
376 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
377 | } | ||
378 | |||
379 | static inline unsigned int x86_pmu_event_addr(int index) | ||
380 | { | ||
381 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Find and validate any extra registers to set up. | ||
386 | */ | ||
387 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | ||
388 | { | ||
389 | struct extra_reg *er; | ||
390 | |||
391 | event->hw.extra_reg = 0; | ||
392 | event->hw.extra_config = 0; | ||
393 | |||
394 | if (!x86_pmu.extra_regs) | ||
395 | return 0; | ||
396 | |||
397 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
398 | if (er->event != (config & er->config_mask)) | ||
399 | continue; | ||
400 | if (event->attr.config1 & ~er->valid_mask) | ||
401 | return -EINVAL; | ||
402 | event->hw.extra_reg = er->msr; | ||
403 | event->hw.extra_config = event->attr.config1; | ||
404 | break; | ||
405 | } | ||
406 | return 0; | ||
407 | } | ||
408 | |||
324 | static atomic_t active_events; | 409 | static atomic_t active_events; |
325 | static DEFINE_MUTEX(pmc_reserve_mutex); | 410 | static DEFINE_MUTEX(pmc_reserve_mutex); |
326 | 411 | ||
@@ -331,12 +416,12 @@ static bool reserve_pmc_hardware(void) | |||
331 | int i; | 416 | int i; |
332 | 417 | ||
333 | for (i = 0; i < x86_pmu.num_counters; i++) { | 418 | for (i = 0; i < x86_pmu.num_counters; i++) { |
334 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 419 | if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) |
335 | goto perfctr_fail; | 420 | goto perfctr_fail; |
336 | } | 421 | } |
337 | 422 | ||
338 | for (i = 0; i < x86_pmu.num_counters; i++) { | 423 | for (i = 0; i < x86_pmu.num_counters; i++) { |
339 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 424 | if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) |
340 | goto eventsel_fail; | 425 | goto eventsel_fail; |
341 | } | 426 | } |
342 | 427 | ||
@@ -344,13 +429,13 @@ static bool reserve_pmc_hardware(void) | |||
344 | 429 | ||
345 | eventsel_fail: | 430 | eventsel_fail: |
346 | for (i--; i >= 0; i--) | 431 | for (i--; i >= 0; i--) |
347 | release_evntsel_nmi(x86_pmu.eventsel + i); | 432 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
348 | 433 | ||
349 | i = x86_pmu.num_counters; | 434 | i = x86_pmu.num_counters; |
350 | 435 | ||
351 | perfctr_fail: | 436 | perfctr_fail: |
352 | for (i--; i >= 0; i--) | 437 | for (i--; i >= 0; i--) |
353 | release_perfctr_nmi(x86_pmu.perfctr + i); | 438 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
354 | 439 | ||
355 | return false; | 440 | return false; |
356 | } | 441 | } |
@@ -360,8 +445,8 @@ static void release_pmc_hardware(void) | |||
360 | int i; | 445 | int i; |
361 | 446 | ||
362 | for (i = 0; i < x86_pmu.num_counters; i++) { | 447 | for (i = 0; i < x86_pmu.num_counters; i++) { |
363 | release_perfctr_nmi(x86_pmu.perfctr + i); | 448 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
364 | release_evntsel_nmi(x86_pmu.eventsel + i); | 449 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
365 | } | 450 | } |
366 | } | 451 | } |
367 | 452 | ||
@@ -382,7 +467,7 @@ static bool check_hw_exists(void) | |||
382 | * complain and bail. | 467 | * complain and bail. |
383 | */ | 468 | */ |
384 | for (i = 0; i < x86_pmu.num_counters; i++) { | 469 | for (i = 0; i < x86_pmu.num_counters; i++) { |
385 | reg = x86_pmu.eventsel + i; | 470 | reg = x86_pmu_config_addr(i); |
386 | ret = rdmsrl_safe(reg, &val); | 471 | ret = rdmsrl_safe(reg, &val); |
387 | if (ret) | 472 | if (ret) |
388 | goto msr_fail; | 473 | goto msr_fail; |
@@ -407,8 +492,8 @@ static bool check_hw_exists(void) | |||
407 | * that don't trap on the MSR access and always return 0s. | 492 | * that don't trap on the MSR access and always return 0s. |
408 | */ | 493 | */ |
409 | val = 0xabcdUL; | 494 | val = 0xabcdUL; |
410 | ret = checking_wrmsrl(x86_pmu.perfctr, val); | 495 | ret = checking_wrmsrl(x86_pmu_event_addr(0), val); |
411 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | 496 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
412 | if (ret || val != val_new) | 497 | if (ret || val != val_new) |
413 | goto msr_fail; | 498 | goto msr_fail; |
414 | 499 | ||
@@ -442,8 +527,9 @@ static inline int x86_pmu_initialized(void) | |||
442 | } | 527 | } |
443 | 528 | ||
444 | static inline int | 529 | static inline int |
445 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | 530 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) |
446 | { | 531 | { |
532 | struct perf_event_attr *attr = &event->attr; | ||
447 | unsigned int cache_type, cache_op, cache_result; | 533 | unsigned int cache_type, cache_op, cache_result; |
448 | u64 config, val; | 534 | u64 config, val; |
449 | 535 | ||
@@ -470,8 +556,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | |||
470 | return -EINVAL; | 556 | return -EINVAL; |
471 | 557 | ||
472 | hwc->config |= val; | 558 | hwc->config |= val; |
473 | 559 | attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; | |
474 | return 0; | 560 | return x86_pmu_extra_regs(val, event); |
475 | } | 561 | } |
476 | 562 | ||
477 | static int x86_setup_perfctr(struct perf_event *event) | 563 | static int x86_setup_perfctr(struct perf_event *event) |
@@ -496,10 +582,10 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
496 | } | 582 | } |
497 | 583 | ||
498 | if (attr->type == PERF_TYPE_RAW) | 584 | if (attr->type == PERF_TYPE_RAW) |
499 | return 0; | 585 | return x86_pmu_extra_regs(event->attr.config, event); |
500 | 586 | ||
501 | if (attr->type == PERF_TYPE_HW_CACHE) | 587 | if (attr->type == PERF_TYPE_HW_CACHE) |
502 | return set_ext_hw_attr(hwc, attr); | 588 | return set_ext_hw_attr(hwc, event); |
503 | 589 | ||
504 | if (attr->config >= x86_pmu.max_events) | 590 | if (attr->config >= x86_pmu.max_events) |
505 | return -EINVAL; | 591 | return -EINVAL; |
@@ -617,11 +703,11 @@ static void x86_pmu_disable_all(void) | |||
617 | 703 | ||
618 | if (!test_bit(idx, cpuc->active_mask)) | 704 | if (!test_bit(idx, cpuc->active_mask)) |
619 | continue; | 705 | continue; |
620 | rdmsrl(x86_pmu.eventsel + idx, val); | 706 | rdmsrl(x86_pmu_config_addr(idx), val); |
621 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) | 707 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
622 | continue; | 708 | continue; |
623 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 709 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
624 | wrmsrl(x86_pmu.eventsel + idx, val); | 710 | wrmsrl(x86_pmu_config_addr(idx), val); |
625 | } | 711 | } |
626 | } | 712 | } |
627 | 713 | ||
@@ -642,21 +728,26 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
642 | x86_pmu.disable_all(); | 728 | x86_pmu.disable_all(); |
643 | } | 729 | } |
644 | 730 | ||
731 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
732 | u64 enable_mask) | ||
733 | { | ||
734 | if (hwc->extra_reg) | ||
735 | wrmsrl(hwc->extra_reg, hwc->extra_config); | ||
736 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
737 | } | ||
738 | |||
645 | static void x86_pmu_enable_all(int added) | 739 | static void x86_pmu_enable_all(int added) |
646 | { | 740 | { |
647 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 741 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
648 | int idx; | 742 | int idx; |
649 | 743 | ||
650 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 744 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
651 | struct perf_event *event = cpuc->events[idx]; | 745 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; |
652 | u64 val; | ||
653 | 746 | ||
654 | if (!test_bit(idx, cpuc->active_mask)) | 747 | if (!test_bit(idx, cpuc->active_mask)) |
655 | continue; | 748 | continue; |
656 | 749 | ||
657 | val = event->hw.config; | 750 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
658 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | ||
659 | wrmsrl(x86_pmu.eventsel + idx, val); | ||
660 | } | 751 | } |
661 | } | 752 | } |
662 | 753 | ||
@@ -821,15 +912,10 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
821 | hwc->event_base = 0; | 912 | hwc->event_base = 0; |
822 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { | 913 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { |
823 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | 914 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
824 | /* | 915 | hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; |
825 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
826 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
827 | */ | ||
828 | hwc->event_base = | ||
829 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
830 | } else { | 916 | } else { |
831 | hwc->config_base = x86_pmu.eventsel; | 917 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
832 | hwc->event_base = x86_pmu.perfctr; | 918 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
833 | } | 919 | } |
834 | } | 920 | } |
835 | 921 | ||
@@ -915,17 +1001,11 @@ static void x86_pmu_enable(struct pmu *pmu) | |||
915 | x86_pmu.enable_all(added); | 1001 | x86_pmu.enable_all(added); |
916 | } | 1002 | } |
917 | 1003 | ||
918 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
919 | u64 enable_mask) | ||
920 | { | ||
921 | wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask); | ||
922 | } | ||
923 | |||
924 | static inline void x86_pmu_disable_event(struct perf_event *event) | 1004 | static inline void x86_pmu_disable_event(struct perf_event *event) |
925 | { | 1005 | { |
926 | struct hw_perf_event *hwc = &event->hw; | 1006 | struct hw_perf_event *hwc = &event->hw; |
927 | 1007 | ||
928 | wrmsrl(hwc->config_base + hwc->idx, hwc->config); | 1008 | wrmsrl(hwc->config_base, hwc->config); |
929 | } | 1009 | } |
930 | 1010 | ||
931 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 1011 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -978,7 +1058,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
978 | */ | 1058 | */ |
979 | local64_set(&hwc->prev_count, (u64)-left); | 1059 | local64_set(&hwc->prev_count, (u64)-left); |
980 | 1060 | ||
981 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); | 1061 | wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); |
982 | 1062 | ||
983 | /* | 1063 | /* |
984 | * Due to erratum on certan cpu we need | 1064 | * Due to erratum on certan cpu we need |
@@ -986,7 +1066,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
986 | * is updated properly | 1066 | * is updated properly |
987 | */ | 1067 | */ |
988 | if (x86_pmu.perfctr_second_write) { | 1068 | if (x86_pmu.perfctr_second_write) { |
989 | wrmsrl(hwc->event_base + idx, | 1069 | wrmsrl(hwc->event_base, |
990 | (u64)(-left) & x86_pmu.cntval_mask); | 1070 | (u64)(-left) & x86_pmu.cntval_mask); |
991 | } | 1071 | } |
992 | 1072 | ||
@@ -1029,7 +1109,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) | |||
1029 | 1109 | ||
1030 | /* | 1110 | /* |
1031 | * If group events scheduling transaction was started, | 1111 | * If group events scheduling transaction was started, |
1032 | * skip the schedulability test here, it will be peformed | 1112 | * skip the schedulability test here, it will be performed |
1033 | * at commit time (->commit_txn) as a whole | 1113 | * at commit time (->commit_txn) as a whole |
1034 | */ | 1114 | */ |
1035 | if (cpuc->group_flag & PERF_EVENT_TXN) | 1115 | if (cpuc->group_flag & PERF_EVENT_TXN) |
@@ -1113,8 +1193,8 @@ void perf_event_print_debug(void) | |||
1113 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); | 1193 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); |
1114 | 1194 | ||
1115 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1195 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1116 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1196 | rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); |
1117 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1197 | rdmsrl(x86_pmu_event_addr(idx), pmc_count); |
1118 | 1198 | ||
1119 | prev_left = per_cpu(pmc_prev_left[idx], cpu); | 1199 | prev_left = per_cpu(pmc_prev_left[idx], cpu); |
1120 | 1200 | ||
@@ -1389,7 +1469,7 @@ static void __init pmu_check_apic(void) | |||
1389 | pr_info("no hardware sampling interrupt available.\n"); | 1469 | pr_info("no hardware sampling interrupt available.\n"); |
1390 | } | 1470 | } |
1391 | 1471 | ||
1392 | int __init init_hw_perf_events(void) | 1472 | static int __init init_hw_perf_events(void) |
1393 | { | 1473 | { |
1394 | struct event_constraint *c; | 1474 | struct event_constraint *c; |
1395 | int err; | 1475 | int err; |
@@ -1608,7 +1688,7 @@ out: | |||
1608 | return ret; | 1688 | return ret; |
1609 | } | 1689 | } |
1610 | 1690 | ||
1611 | int x86_pmu_event_init(struct perf_event *event) | 1691 | static int x86_pmu_event_init(struct perf_event *event) |
1612 | { | 1692 | { |
1613 | struct pmu *tmp; | 1693 | struct pmu *tmp; |
1614 | int err; | 1694 | int err; |
@@ -1710,7 +1790,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1710 | 1790 | ||
1711 | perf_callchain_store(entry, regs->ip); | 1791 | perf_callchain_store(entry, regs->ip); |
1712 | 1792 | ||
1713 | dump_trace(NULL, regs, NULL, &backtrace_ops, entry); | 1793 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
1714 | } | 1794 | } |
1715 | 1795 | ||
1716 | #ifdef CONFIG_COMPAT | 1796 | #ifdef CONFIG_COMPAT |