diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:05 -0400 |
commit | b7f8859a8ed1937e2139c17b84878f1d413fa659 (patch) | |
tree | 97a4d28566ee26e3f1faf23f829c3519e7450655 /arch | |
parent | 39d81eab2374d71b2d9c82f66258a1a4f57ddd2e (diff) |
perf_counter, x86: remove get_status() from struct x86_pmu
This function is Intel only and not necessary for AMD cpus.
[ Impact: simplify code ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-10-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 39 |
1 files changed, 5 insertions, 34 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9d90de0bd0b0..d0bb02919c63 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -51,7 +51,6 @@ struct x86_pmu { | |||
51 | int (*handle_irq)(struct pt_regs *, int); | 51 | int (*handle_irq)(struct pt_regs *, int); |
52 | u64 (*save_disable_all)(void); | 52 | u64 (*save_disable_all)(void); |
53 | void (*restore_all)(u64); | 53 | void (*restore_all)(u64); |
54 | u64 (*get_status)(u64); | ||
55 | void (*ack_status)(u64); | 54 | void (*ack_status)(u64); |
56 | void (*enable)(int, u64); | 55 | void (*enable)(int, u64); |
57 | void (*disable)(int, u64); | 56 | void (*disable)(int, u64); |
@@ -405,41 +404,15 @@ void hw_perf_restore(u64 ctrl) | |||
405 | */ | 404 | */ |
406 | EXPORT_SYMBOL_GPL(hw_perf_restore); | 405 | EXPORT_SYMBOL_GPL(hw_perf_restore); |
407 | 406 | ||
408 | static u64 intel_pmu_get_status(u64 mask) | 407 | static inline u64 intel_pmu_get_status(u64 mask) |
409 | { | 408 | { |
410 | u64 status; | 409 | u64 status; |
411 | 410 | ||
412 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | ||
413 | |||
414 | return status; | ||
415 | } | ||
416 | |||
417 | static u64 amd_pmu_get_status(u64 mask) | ||
418 | { | ||
419 | u64 status = 0; | ||
420 | int idx; | ||
421 | |||
422 | for (idx = 0; idx < nr_counters_generic; idx++) { | ||
423 | s64 val; | ||
424 | |||
425 | if (!(mask & (1 << idx))) | ||
426 | continue; | ||
427 | |||
428 | rdmsrl(MSR_K7_PERFCTR0 + idx, val); | ||
429 | val <<= (64 - counter_value_bits); | ||
430 | if (val >= 0) | ||
431 | status |= (1 << idx); | ||
432 | } | ||
433 | |||
434 | return status; | ||
435 | } | ||
436 | |||
437 | static u64 hw_perf_get_status(u64 mask) | ||
438 | { | ||
439 | if (unlikely(!perf_counters_initialized)) | 411 | if (unlikely(!perf_counters_initialized)) |
440 | return 0; | 412 | return 0; |
413 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | ||
441 | 414 | ||
442 | return x86_pmu->get_status(mask); | 415 | return status; |
443 | } | 416 | } |
444 | 417 | ||
445 | static void intel_pmu_ack_status(u64 ack) | 418 | static void intel_pmu_ack_status(u64 ack) |
@@ -795,7 +768,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
795 | 768 | ||
796 | cpuc->throttle_ctrl = hw_perf_save_disable(); | 769 | cpuc->throttle_ctrl = hw_perf_save_disable(); |
797 | 770 | ||
798 | status = hw_perf_get_status(cpuc->throttle_ctrl); | 771 | status = intel_pmu_get_status(cpuc->throttle_ctrl); |
799 | if (!status) | 772 | if (!status) |
800 | goto out; | 773 | goto out; |
801 | 774 | ||
@@ -820,7 +793,7 @@ again: | |||
820 | /* | 793 | /* |
821 | * Repeat if there is more work to be done: | 794 | * Repeat if there is more work to be done: |
822 | */ | 795 | */ |
823 | status = hw_perf_get_status(cpuc->throttle_ctrl); | 796 | status = intel_pmu_get_status(cpuc->throttle_ctrl); |
824 | if (status) | 797 | if (status) |
825 | goto again; | 798 | goto again; |
826 | out: | 799 | out: |
@@ -931,7 +904,6 @@ static struct x86_pmu intel_pmu = { | |||
931 | .handle_irq = intel_pmu_handle_irq, | 904 | .handle_irq = intel_pmu_handle_irq, |
932 | .save_disable_all = intel_pmu_save_disable_all, | 905 | .save_disable_all = intel_pmu_save_disable_all, |
933 | .restore_all = intel_pmu_restore_all, | 906 | .restore_all = intel_pmu_restore_all, |
934 | .get_status = intel_pmu_get_status, | ||
935 | .ack_status = intel_pmu_ack_status, | 907 | .ack_status = intel_pmu_ack_status, |
936 | .enable = intel_pmu_enable_counter, | 908 | .enable = intel_pmu_enable_counter, |
937 | .disable = intel_pmu_disable_counter, | 909 | .disable = intel_pmu_disable_counter, |
@@ -946,7 +918,6 @@ static struct x86_pmu amd_pmu = { | |||
946 | .handle_irq = amd_pmu_handle_irq, | 918 | .handle_irq = amd_pmu_handle_irq, |
947 | .save_disable_all = amd_pmu_save_disable_all, | 919 | .save_disable_all = amd_pmu_save_disable_all, |
948 | .restore_all = amd_pmu_restore_all, | 920 | .restore_all = amd_pmu_restore_all, |
949 | .get_status = amd_pmu_get_status, | ||
950 | .ack_status = amd_pmu_ack_status, | 921 | .ack_status = amd_pmu_ack_status, |
951 | .enable = amd_pmu_enable_counter, | 922 | .enable = amd_pmu_enable_counter, |
952 | .disable = amd_pmu_disable_counter, | 923 | .disable = amd_pmu_disable_counter, |