diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:11 -0400 |
commit | 7c90cc45f89af4dd4617f97d452740ad95b800d5 (patch) | |
tree | 51c76b2fbe59ddcde31098a83a57fef085764f92 /arch/x86/kernel/cpu/perf_counter.c | |
parent | 6f00cada07bb5da7f751929d3173494dcc5446cc (diff) |
perf_counter, x86: rework counter enable functions
There is vendor specific code in generic x86 code, and there is vendor
specific code that could be generic. This patch introduces
x86_pmu_enable_counter() for x86 generic code. Fixed counter code for
Intel is moved to Intel only functions. In the end, checks and calls
via function pointers were reduced to the necessary. Also, the
internal function i/f changed.
[ Impact: refactor and generalize code ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-22-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 52 |
1 files changed, 24 insertions, 28 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d8beebeb270f..ae55933ce79c 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -44,7 +44,7 @@ struct x86_pmu { | |||
44 | int (*handle_irq)(struct pt_regs *, int); | 44 | int (*handle_irq)(struct pt_regs *, int); |
45 | u64 (*save_disable_all)(void); | 45 | u64 (*save_disable_all)(void); |
46 | void (*restore_all)(u64); | 46 | void (*restore_all)(u64); |
47 | void (*enable)(int, u64); | 47 | void (*enable)(struct hw_perf_counter *, int); |
48 | void (*disable)(int, u64); | 48 | void (*disable)(int, u64); |
49 | unsigned eventsel; | 49 | unsigned eventsel; |
50 | unsigned perfctr; | 50 | unsigned perfctr; |
@@ -414,28 +414,15 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
414 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | 414 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); |
415 | } | 415 | } |
416 | 416 | ||
417 | static void intel_pmu_enable_counter(int idx, u64 config) | 417 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
418 | { | 418 | { |
419 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, | 419 | int err; |
420 | config | ARCH_PERFMON_EVENTSEL0_ENABLE); | ||
421 | } | ||
422 | |||
423 | static void amd_pmu_enable_counter(int idx, u64 config) | ||
424 | { | ||
425 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
426 | |||
427 | if (cpuc->enabled) | ||
428 | config |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
429 | |||
430 | wrmsrl(MSR_K7_EVNTSEL0 + idx, config); | ||
431 | } | ||
432 | 420 | ||
433 | static void hw_perf_enable(int idx, u64 config) | ||
434 | { | ||
435 | if (unlikely(!perf_counters_initialized)) | 421 | if (unlikely(!perf_counters_initialized)) |
436 | return; | 422 | return; |
437 | 423 | ||
438 | x86_pmu.enable(idx, config); | 424 | err = checking_wrmsrl(hwc->config_base + idx, |
425 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | ||
439 | } | 426 | } |
440 | 427 | ||
441 | static void intel_pmu_disable_counter(int idx, u64 config) | 428 | static void intel_pmu_disable_counter(int idx, u64 config) |
@@ -522,8 +509,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
522 | } | 509 | } |
523 | 510 | ||
524 | static inline void | 511 | static inline void |
525 | __pmc_fixed_enable(struct perf_counter *counter, | 512 | intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) |
526 | struct hw_perf_counter *hwc, int __idx) | ||
527 | { | 513 | { |
528 | int idx = __idx - X86_PMC_IDX_FIXED; | 514 | int idx = __idx - X86_PMC_IDX_FIXED; |
529 | u64 ctrl_val, bits, mask; | 515 | u64 ctrl_val, bits, mask; |
@@ -548,14 +534,24 @@ __pmc_fixed_enable(struct perf_counter *counter, | |||
548 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 534 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
549 | } | 535 | } |
550 | 536 | ||
551 | static void | 537 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
552 | __x86_pmu_enable(struct perf_counter *counter, | ||
553 | struct hw_perf_counter *hwc, int idx) | ||
554 | { | 538 | { |
555 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) | 539 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
556 | __pmc_fixed_enable(counter, hwc, idx); | 540 | intel_pmu_enable_fixed(hwc, idx); |
541 | return; | ||
542 | } | ||
543 | |||
544 | x86_pmu_enable_counter(hwc, idx); | ||
545 | } | ||
546 | |||
547 | static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
548 | { | ||
549 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
550 | |||
551 | if (cpuc->enabled) | ||
552 | x86_pmu_enable_counter(hwc, idx); | ||
557 | else | 553 | else |
558 | hw_perf_enable(idx, hwc->config); | 554 | amd_pmu_disable_counter(idx, hwc->config); |
559 | } | 555 | } |
560 | 556 | ||
561 | static int | 557 | static int |
@@ -632,7 +628,7 @@ try_generic: | |||
632 | set_bit(idx, cpuc->active); | 628 | set_bit(idx, cpuc->active); |
633 | 629 | ||
634 | x86_perf_counter_set_period(counter, hwc, idx); | 630 | x86_perf_counter_set_period(counter, hwc, idx); |
635 | __x86_pmu_enable(counter, hwc, idx); | 631 | x86_pmu.enable(hwc, idx); |
636 | 632 | ||
637 | return 0; | 633 | return 0; |
638 | } | 634 | } |
@@ -728,7 +724,7 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) | |||
728 | x86_perf_counter_set_period(counter, hwc, idx); | 724 | x86_perf_counter_set_period(counter, hwc, idx); |
729 | 725 | ||
730 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 726 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
731 | __x86_pmu_enable(counter, hwc, idx); | 727 | intel_pmu_enable_counter(hwc, idx); |
732 | } | 728 | } |
733 | 729 | ||
734 | /* | 730 | /* |