diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 170 |
1 files changed, 126 insertions, 44 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9d977a2ea69..26604188aa4 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
| 31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
| 32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
| 33 | #include <asm/smp.h> | ||
| 33 | 34 | ||
| 34 | #if 0 | 35 | #if 0 |
| 35 | #undef wrmsrl | 36 | #undef wrmsrl |
| @@ -93,6 +94,8 @@ struct amd_nb { | |||
| 93 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | 94 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; |
| 94 | }; | 95 | }; |
| 95 | 96 | ||
| 97 | struct intel_percore; | ||
| 98 | |||
| 96 | #define MAX_LBR_ENTRIES 16 | 99 | #define MAX_LBR_ENTRIES 16 |
| 97 | 100 | ||
| 98 | struct cpu_hw_events { | 101 | struct cpu_hw_events { |
| @@ -128,6 +131,13 @@ struct cpu_hw_events { | |||
| 128 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
| 129 | 132 | ||
| 130 | /* | 133 | /* |
| 134 | * Intel percore register state. | ||
| 135 | * Coordinate shared resources between HT threads. | ||
| 136 | */ | ||
| 137 | int percore_used; /* Used by this CPU? */ | ||
| 138 | struct intel_percore *per_core; | ||
| 139 | |||
| 140 | /* | ||
| 131 | * AMD specific bits | 141 | * AMD specific bits |
| 132 | */ | 142 | */ |
| 133 | struct amd_nb *amd_nb; | 143 | struct amd_nb *amd_nb; |
| @@ -166,8 +176,10 @@ struct cpu_hw_events { | |||
| 166 | /* | 176 | /* |
| 167 | * Constraint on the Event code + UMask | 177 | * Constraint on the Event code + UMask |
| 168 | */ | 178 | */ |
| 169 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | 179 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ |
| 170 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | 180 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
| 181 | #define PEBS_EVENT_CONSTRAINT(c, n) \ | ||
| 182 | INTEL_UEVENT_CONSTRAINT(c, n) | ||
| 171 | 183 | ||
| 172 | #define EVENT_CONSTRAINT_END \ | 184 | #define EVENT_CONSTRAINT_END \ |
| 173 | EVENT_CONSTRAINT(0, 0, 0) | 185 | EVENT_CONSTRAINT(0, 0, 0) |
| @@ -175,6 +187,28 @@ struct cpu_hw_events { | |||
| 175 | #define for_each_event_constraint(e, c) \ | 187 | #define for_each_event_constraint(e, c) \ |
| 176 | for ((e) = (c); (e)->weight; (e)++) | 188 | for ((e) = (c); (e)->weight; (e)++) |
| 177 | 189 | ||
| 190 | /* | ||
| 191 | * Extra registers for specific events. | ||
| 192 | * Some events need large masks and require external MSRs. | ||
| 193 | * Define a mapping to these extra registers. | ||
| 194 | */ | ||
| 195 | struct extra_reg { | ||
| 196 | unsigned int event; | ||
| 197 | unsigned int msr; | ||
| 198 | u64 config_mask; | ||
| 199 | u64 valid_mask; | ||
| 200 | }; | ||
| 201 | |||
| 202 | #define EVENT_EXTRA_REG(e, ms, m, vm) { \ | ||
| 203 | .event = (e), \ | ||
| 204 | .msr = (ms), \ | ||
| 205 | .config_mask = (m), \ | ||
| 206 | .valid_mask = (vm), \ | ||
| 207 | } | ||
| 208 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ | ||
| 209 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) | ||
| 210 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) | ||
| 211 | |||
| 178 | union perf_capabilities { | 212 | union perf_capabilities { |
| 179 | struct { | 213 | struct { |
| 180 | u64 lbr_format : 6; | 214 | u64 lbr_format : 6; |
| @@ -219,6 +253,7 @@ struct x86_pmu { | |||
| 219 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 253 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
| 220 | struct perf_event *event); | 254 | struct perf_event *event); |
| 221 | struct event_constraint *event_constraints; | 255 | struct event_constraint *event_constraints; |
| 256 | struct event_constraint *percore_constraints; | ||
| 222 | void (*quirks)(void); | 257 | void (*quirks)(void); |
| 223 | int perfctr_second_write; | 258 | int perfctr_second_write; |
| 224 | 259 | ||
| @@ -247,6 +282,11 @@ struct x86_pmu { | |||
| 247 | */ | 282 | */ |
| 248 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | 283 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
| 249 | int lbr_nr; /* hardware stack size */ | 284 | int lbr_nr; /* hardware stack size */ |
| 285 | |||
| 286 | /* | ||
| 287 | * Extra registers for events | ||
| 288 | */ | ||
| 289 | struct extra_reg *extra_regs; | ||
| 250 | }; | 290 | }; |
| 251 | 291 | ||
| 252 | static struct x86_pmu x86_pmu __read_mostly; | 292 | static struct x86_pmu x86_pmu __read_mostly; |
| @@ -271,6 +311,10 @@ static u64 __read_mostly hw_cache_event_ids | |||
| 271 | [PERF_COUNT_HW_CACHE_MAX] | 311 | [PERF_COUNT_HW_CACHE_MAX] |
| 272 | [PERF_COUNT_HW_CACHE_OP_MAX] | 312 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 273 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | 313 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
| 314 | static u64 __read_mostly hw_cache_extra_regs | ||
| 315 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 316 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 317 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 274 | 318 | ||
| 275 | /* | 319 | /* |
| 276 | * Propagate event elapsed time into the generic event. | 320 | * Propagate event elapsed time into the generic event. |
| @@ -298,7 +342,7 @@ x86_perf_event_update(struct perf_event *event) | |||
| 298 | */ | 342 | */ |
| 299 | again: | 343 | again: |
| 300 | prev_raw_count = local64_read(&hwc->prev_count); | 344 | prev_raw_count = local64_read(&hwc->prev_count); |
| 301 | rdmsrl(hwc->event_base + idx, new_raw_count); | 345 | rdmsrl(hwc->event_base, new_raw_count); |
| 302 | 346 | ||
| 303 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 347 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 304 | new_raw_count) != prev_raw_count) | 348 | new_raw_count) != prev_raw_count) |
| @@ -321,6 +365,49 @@ again: | |||
| 321 | return new_raw_count; | 365 | return new_raw_count; |
| 322 | } | 366 | } |
| 323 | 367 | ||
| 368 | /* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ | ||
| 369 | static inline int x86_pmu_addr_offset(int index) | ||
| 370 | { | ||
| 371 | if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) | ||
| 372 | return index << 1; | ||
| 373 | return index; | ||
| 374 | } | ||
| 375 | |||
| 376 | static inline unsigned int x86_pmu_config_addr(int index) | ||
| 377 | { | ||
| 378 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | ||
| 379 | } | ||
| 380 | |||
| 381 | static inline unsigned int x86_pmu_event_addr(int index) | ||
| 382 | { | ||
| 383 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | ||
| 384 | } | ||
| 385 | |||
| 386 | /* | ||
| 387 | * Find and validate any extra registers to set up. | ||
| 388 | */ | ||
| 389 | static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | ||
| 390 | { | ||
| 391 | struct extra_reg *er; | ||
| 392 | |||
| 393 | event->hw.extra_reg = 0; | ||
| 394 | event->hw.extra_config = 0; | ||
| 395 | |||
| 396 | if (!x86_pmu.extra_regs) | ||
| 397 | return 0; | ||
| 398 | |||
| 399 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
| 400 | if (er->event != (config & er->config_mask)) | ||
| 401 | continue; | ||
| 402 | if (event->attr.config1 & ~er->valid_mask) | ||
| 403 | return -EINVAL; | ||
| 404 | event->hw.extra_reg = er->msr; | ||
| 405 | event->hw.extra_config = event->attr.config1; | ||
| 406 | break; | ||
| 407 | } | ||
| 408 | return 0; | ||
| 409 | } | ||
| 410 | |||
| 324 | static atomic_t active_events; | 411 | static atomic_t active_events; |
| 325 | static DEFINE_MUTEX(pmc_reserve_mutex); | 412 | static DEFINE_MUTEX(pmc_reserve_mutex); |
| 326 | 413 | ||
| @@ -331,12 +418,12 @@ static bool reserve_pmc_hardware(void) | |||
| 331 | int i; | 418 | int i; |
| 332 | 419 | ||
| 333 | for (i = 0; i < x86_pmu.num_counters; i++) { | 420 | for (i = 0; i < x86_pmu.num_counters; i++) { |
| 334 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 421 | if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) |
| 335 | goto perfctr_fail; | 422 | goto perfctr_fail; |
| 336 | } | 423 | } |
| 337 | 424 | ||
| 338 | for (i = 0; i < x86_pmu.num_counters; i++) { | 425 | for (i = 0; i < x86_pmu.num_counters; i++) { |
| 339 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 426 | if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) |
| 340 | goto eventsel_fail; | 427 | goto eventsel_fail; |
| 341 | } | 428 | } |
| 342 | 429 | ||
| @@ -344,13 +431,13 @@ static bool reserve_pmc_hardware(void) | |||
| 344 | 431 | ||
| 345 | eventsel_fail: | 432 | eventsel_fail: |
| 346 | for (i--; i >= 0; i--) | 433 | for (i--; i >= 0; i--) |
| 347 | release_evntsel_nmi(x86_pmu.eventsel + i); | 434 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
| 348 | 435 | ||
| 349 | i = x86_pmu.num_counters; | 436 | i = x86_pmu.num_counters; |
| 350 | 437 | ||
| 351 | perfctr_fail: | 438 | perfctr_fail: |
| 352 | for (i--; i >= 0; i--) | 439 | for (i--; i >= 0; i--) |
| 353 | release_perfctr_nmi(x86_pmu.perfctr + i); | 440 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
| 354 | 441 | ||
| 355 | return false; | 442 | return false; |
| 356 | } | 443 | } |
| @@ -360,8 +447,8 @@ static void release_pmc_hardware(void) | |||
| 360 | int i; | 447 | int i; |
| 361 | 448 | ||
| 362 | for (i = 0; i < x86_pmu.num_counters; i++) { | 449 | for (i = 0; i < x86_pmu.num_counters; i++) { |
| 363 | release_perfctr_nmi(x86_pmu.perfctr + i); | 450 | release_perfctr_nmi(x86_pmu_event_addr(i)); |
| 364 | release_evntsel_nmi(x86_pmu.eventsel + i); | 451 | release_evntsel_nmi(x86_pmu_config_addr(i)); |
| 365 | } | 452 | } |
| 366 | } | 453 | } |
| 367 | 454 | ||
| @@ -382,7 +469,7 @@ static bool check_hw_exists(void) | |||
| 382 | * complain and bail. | 469 | * complain and bail. |
| 383 | */ | 470 | */ |
| 384 | for (i = 0; i < x86_pmu.num_counters; i++) { | 471 | for (i = 0; i < x86_pmu.num_counters; i++) { |
| 385 | reg = x86_pmu.eventsel + i; | 472 | reg = x86_pmu_config_addr(i); |
| 386 | ret = rdmsrl_safe(reg, &val); | 473 | ret = rdmsrl_safe(reg, &val); |
| 387 | if (ret) | 474 | if (ret) |
| 388 | goto msr_fail; | 475 | goto msr_fail; |
| @@ -407,8 +494,8 @@ static bool check_hw_exists(void) | |||
| 407 | * that don't trap on the MSR access and always return 0s. | 494 | * that don't trap on the MSR access and always return 0s. |
| 408 | */ | 495 | */ |
| 409 | val = 0xabcdUL; | 496 | val = 0xabcdUL; |
| 410 | ret = checking_wrmsrl(x86_pmu.perfctr, val); | 497 | ret = checking_wrmsrl(x86_pmu_event_addr(0), val); |
| 411 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | 498 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
| 412 | if (ret || val != val_new) | 499 | if (ret || val != val_new) |
| 413 | goto msr_fail; | 500 | goto msr_fail; |
| 414 | 501 | ||
| @@ -442,8 +529,9 @@ static inline int x86_pmu_initialized(void) | |||
| 442 | } | 529 | } |
| 443 | 530 | ||
| 444 | static inline int | 531 | static inline int |
| 445 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | 532 | set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) |
| 446 | { | 533 | { |
| 534 | struct perf_event_attr *attr = &event->attr; | ||
| 447 | unsigned int cache_type, cache_op, cache_result; | 535 | unsigned int cache_type, cache_op, cache_result; |
| 448 | u64 config, val; | 536 | u64 config, val; |
| 449 | 537 | ||
| @@ -470,8 +558,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) | |||
| 470 | return -EINVAL; | 558 | return -EINVAL; |
| 471 | 559 | ||
| 472 | hwc->config |= val; | 560 | hwc->config |= val; |
| 473 | 561 | attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; | |
| 474 | return 0; | 562 | return x86_pmu_extra_regs(val, event); |
| 475 | } | 563 | } |
| 476 | 564 | ||
| 477 | static int x86_setup_perfctr(struct perf_event *event) | 565 | static int x86_setup_perfctr(struct perf_event *event) |
| @@ -496,10 +584,10 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
| 496 | } | 584 | } |
| 497 | 585 | ||
| 498 | if (attr->type == PERF_TYPE_RAW) | 586 | if (attr->type == PERF_TYPE_RAW) |
| 499 | return 0; | 587 | return x86_pmu_extra_regs(event->attr.config, event); |
| 500 | 588 | ||
| 501 | if (attr->type == PERF_TYPE_HW_CACHE) | 589 | if (attr->type == PERF_TYPE_HW_CACHE) |
| 502 | return set_ext_hw_attr(hwc, attr); | 590 | return set_ext_hw_attr(hwc, event); |
| 503 | 591 | ||
| 504 | if (attr->config >= x86_pmu.max_events) | 592 | if (attr->config >= x86_pmu.max_events) |
| 505 | return -EINVAL; | 593 | return -EINVAL; |
| @@ -617,11 +705,11 @@ static void x86_pmu_disable_all(void) | |||
| 617 | 705 | ||
| 618 | if (!test_bit(idx, cpuc->active_mask)) | 706 | if (!test_bit(idx, cpuc->active_mask)) |
| 619 | continue; | 707 | continue; |
| 620 | rdmsrl(x86_pmu.eventsel + idx, val); | 708 | rdmsrl(x86_pmu_config_addr(idx), val); |
| 621 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) | 709 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
| 622 | continue; | 710 | continue; |
| 623 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | 711 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
| 624 | wrmsrl(x86_pmu.eventsel + idx, val); | 712 | wrmsrl(x86_pmu_config_addr(idx), val); |
| 625 | } | 713 | } |
| 626 | } | 714 | } |
| 627 | 715 | ||
| @@ -642,21 +730,26 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
| 642 | x86_pmu.disable_all(); | 730 | x86_pmu.disable_all(); |
| 643 | } | 731 | } |
| 644 | 732 | ||
| 733 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
| 734 | u64 enable_mask) | ||
| 735 | { | ||
| 736 | if (hwc->extra_reg) | ||
| 737 | wrmsrl(hwc->extra_reg, hwc->extra_config); | ||
| 738 | wrmsrl(hwc->config_base, hwc->config | enable_mask); | ||
| 739 | } | ||
| 740 | |||
| 645 | static void x86_pmu_enable_all(int added) | 741 | static void x86_pmu_enable_all(int added) |
| 646 | { | 742 | { |
| 647 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 743 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
| 648 | int idx; | 744 | int idx; |
| 649 | 745 | ||
| 650 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 746 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
| 651 | struct perf_event *event = cpuc->events[idx]; | 747 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; |
| 652 | u64 val; | ||
| 653 | 748 | ||
| 654 | if (!test_bit(idx, cpuc->active_mask)) | 749 | if (!test_bit(idx, cpuc->active_mask)) |
| 655 | continue; | 750 | continue; |
| 656 | 751 | ||
| 657 | val = event->hw.config; | 752 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
| 658 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | ||
| 659 | wrmsrl(x86_pmu.eventsel + idx, val); | ||
| 660 | } | 753 | } |
| 661 | } | 754 | } |
| 662 | 755 | ||
| @@ -821,15 +914,10 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
| 821 | hwc->event_base = 0; | 914 | hwc->event_base = 0; |
| 822 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { | 915 | } else if (hwc->idx >= X86_PMC_IDX_FIXED) { |
| 823 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | 916 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
| 824 | /* | 917 | hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0; |
| 825 | * We set it so that event_base + idx in wrmsr/rdmsr maps to | ||
| 826 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
| 827 | */ | ||
| 828 | hwc->event_base = | ||
| 829 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
| 830 | } else { | 918 | } else { |
| 831 | hwc->config_base = x86_pmu.eventsel; | 919 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
| 832 | hwc->event_base = x86_pmu.perfctr; | 920 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
| 833 | } | 921 | } |
| 834 | } | 922 | } |
| 835 | 923 | ||
| @@ -915,17 +1003,11 @@ static void x86_pmu_enable(struct pmu *pmu) | |||
| 915 | x86_pmu.enable_all(added); | 1003 | x86_pmu.enable_all(added); |
| 916 | } | 1004 | } |
| 917 | 1005 | ||
| 918 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | ||
| 919 | u64 enable_mask) | ||
| 920 | { | ||
| 921 | wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask); | ||
| 922 | } | ||
| 923 | |||
| 924 | static inline void x86_pmu_disable_event(struct perf_event *event) | 1006 | static inline void x86_pmu_disable_event(struct perf_event *event) |
| 925 | { | 1007 | { |
| 926 | struct hw_perf_event *hwc = &event->hw; | 1008 | struct hw_perf_event *hwc = &event->hw; |
| 927 | 1009 | ||
| 928 | wrmsrl(hwc->config_base + hwc->idx, hwc->config); | 1010 | wrmsrl(hwc->config_base, hwc->config); |
| 929 | } | 1011 | } |
| 930 | 1012 | ||
| 931 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 1013 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
| @@ -978,7 +1060,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
| 978 | */ | 1060 | */ |
| 979 | local64_set(&hwc->prev_count, (u64)-left); | 1061 | local64_set(&hwc->prev_count, (u64)-left); |
| 980 | 1062 | ||
| 981 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); | 1063 | wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); |
| 982 | 1064 | ||
| 983 | /* | 1065 | /* |
| 984 | * Due to erratum on certan cpu we need | 1066 | * Due to erratum on certan cpu we need |
| @@ -986,7 +1068,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
| 986 | * is updated properly | 1068 | * is updated properly |
| 987 | */ | 1069 | */ |
| 988 | if (x86_pmu.perfctr_second_write) { | 1070 | if (x86_pmu.perfctr_second_write) { |
| 989 | wrmsrl(hwc->event_base + idx, | 1071 | wrmsrl(hwc->event_base, |
| 990 | (u64)(-left) & x86_pmu.cntval_mask); | 1072 | (u64)(-left) & x86_pmu.cntval_mask); |
| 991 | } | 1073 | } |
| 992 | 1074 | ||
| @@ -1113,8 +1195,8 @@ void perf_event_print_debug(void) | |||
| 1113 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); | 1195 | pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); |
| 1114 | 1196 | ||
| 1115 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1197 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
| 1116 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1198 | rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); |
| 1117 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1199 | rdmsrl(x86_pmu_event_addr(idx), pmc_count); |
| 1118 | 1200 | ||
| 1119 | prev_left = per_cpu(pmc_prev_left[idx], cpu); | 1201 | prev_left = per_cpu(pmc_prev_left[idx], cpu); |
| 1120 | 1202 | ||
| @@ -1389,7 +1471,7 @@ static void __init pmu_check_apic(void) | |||
| 1389 | pr_info("no hardware sampling interrupt available.\n"); | 1471 | pr_info("no hardware sampling interrupt available.\n"); |
| 1390 | } | 1472 | } |
| 1391 | 1473 | ||
| 1392 | int __init init_hw_perf_events(void) | 1474 | static int __init init_hw_perf_events(void) |
| 1393 | { | 1475 | { |
| 1394 | struct event_constraint *c; | 1476 | struct event_constraint *c; |
| 1395 | int err; | 1477 | int err; |
| @@ -1608,7 +1690,7 @@ out: | |||
| 1608 | return ret; | 1690 | return ret; |
| 1609 | } | 1691 | } |
| 1610 | 1692 | ||
| 1611 | int x86_pmu_event_init(struct perf_event *event) | 1693 | static int x86_pmu_event_init(struct perf_event *event) |
| 1612 | { | 1694 | { |
| 1613 | struct pmu *tmp; | 1695 | struct pmu *tmp; |
| 1614 | int err; | 1696 | int err; |
