diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:04 -0400 |
commit | 39d81eab2374d71b2d9c82f66258a1a4f57ddd2e (patch) | |
tree | 94900f94b500eb18bb5963a258740c660a33c3e3 | |
parent | 5f4ec28ffe77c840354cce1820a3436106e9e0f1 (diff) |
perf_counter, x86: make interrupt handler model specific
This separates the perfcounter interrupt handler for AMD and Intel
cpus. The AMD interrupt handler implementation is a follow-on patch.
[ Impact: refactor and clean up code ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-9-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 808a1a113463..9d90de0bd0b0 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar | 5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar |
6 | * Copyright(C) 2009 Jaswinder Singh Rajput | 6 | * Copyright(C) 2009 Jaswinder Singh Rajput |
7 | * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter | ||
7 | * | 8 | * |
8 | * For licencing details see kernel-base/COPYING | 9 | * For licencing details see kernel-base/COPYING |
9 | */ | 10 | */ |
@@ -47,6 +48,7 @@ struct cpu_hw_counters { | |||
47 | * struct x86_pmu - generic x86 pmu | 48 | * struct x86_pmu - generic x86 pmu |
48 | */ | 49 | */ |
49 | struct x86_pmu { | 50 | struct x86_pmu { |
51 | int (*handle_irq)(struct pt_regs *, int); | ||
50 | u64 (*save_disable_all)(void); | 52 | u64 (*save_disable_all)(void); |
51 | void (*restore_all)(u64); | 53 | void (*restore_all)(u64); |
52 | u64 (*get_status)(u64); | 54 | u64 (*get_status)(u64); |
@@ -241,6 +243,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
241 | struct hw_perf_counter *hwc = &counter->hw; | 243 | struct hw_perf_counter *hwc = &counter->hw; |
242 | int err; | 244 | int err; |
243 | 245 | ||
246 | /* disable temporarily */ | ||
247 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
248 | return -ENOSYS; | ||
249 | |||
244 | if (unlikely(!perf_counters_initialized)) | 250 | if (unlikely(!perf_counters_initialized)) |
245 | return -EINVAL; | 251 | return -EINVAL; |
246 | 252 | ||
@@ -780,7 +786,7 @@ static void perf_save_and_restart(struct perf_counter *counter) | |||
780 | * This handler is triggered by the local APIC, so the APIC IRQ handling | 786 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
781 | * rules apply: | 787 | * rules apply: |
782 | */ | 788 | */ |
783 | static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) | 789 | static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) |
784 | { | 790 | { |
785 | int bit, cpu = smp_processor_id(); | 791 | int bit, cpu = smp_processor_id(); |
786 | u64 ack, status; | 792 | u64 ack, status; |
@@ -827,6 +833,8 @@ out: | |||
827 | return ret; | 833 | return ret; |
828 | } | 834 | } |
829 | 835 | ||
836 | static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } | ||
837 | |||
830 | void perf_counter_unthrottle(void) | 838 | void perf_counter_unthrottle(void) |
831 | { | 839 | { |
832 | struct cpu_hw_counters *cpuc; | 840 | struct cpu_hw_counters *cpuc; |
@@ -851,7 +859,7 @@ void smp_perf_counter_interrupt(struct pt_regs *regs) | |||
851 | irq_enter(); | 859 | irq_enter(); |
852 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | 860 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
853 | ack_APIC_irq(); | 861 | ack_APIC_irq(); |
854 | __smp_perf_counter_interrupt(regs, 0); | 862 | x86_pmu->handle_irq(regs, 0); |
855 | irq_exit(); | 863 | irq_exit(); |
856 | } | 864 | } |
857 | 865 | ||
@@ -908,7 +916,7 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
908 | regs = args->regs; | 916 | regs = args->regs; |
909 | 917 | ||
910 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 918 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
911 | ret = __smp_perf_counter_interrupt(regs, 1); | 919 | ret = x86_pmu->handle_irq(regs, 1); |
912 | 920 | ||
913 | return ret ? NOTIFY_STOP : NOTIFY_OK; | 921 | return ret ? NOTIFY_STOP : NOTIFY_OK; |
914 | } | 922 | } |
@@ -920,6 +928,7 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |||
920 | }; | 928 | }; |
921 | 929 | ||
922 | static struct x86_pmu intel_pmu = { | 930 | static struct x86_pmu intel_pmu = { |
931 | .handle_irq = intel_pmu_handle_irq, | ||
923 | .save_disable_all = intel_pmu_save_disable_all, | 932 | .save_disable_all = intel_pmu_save_disable_all, |
924 | .restore_all = intel_pmu_restore_all, | 933 | .restore_all = intel_pmu_restore_all, |
925 | .get_status = intel_pmu_get_status, | 934 | .get_status = intel_pmu_get_status, |
@@ -934,6 +943,7 @@ static struct x86_pmu intel_pmu = { | |||
934 | }; | 943 | }; |
935 | 944 | ||
936 | static struct x86_pmu amd_pmu = { | 945 | static struct x86_pmu amd_pmu = { |
946 | .handle_irq = amd_pmu_handle_irq, | ||
937 | .save_disable_all = amd_pmu_save_disable_all, | 947 | .save_disable_all = amd_pmu_save_disable_all, |
938 | .restore_all = amd_pmu_restore_all, | 948 | .restore_all = amd_pmu_restore_all, |
939 | .get_status = amd_pmu_get_status, | 949 | .get_status = amd_pmu_get_status, |