diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:06 -0400 |
commit | 55de0f2e57994b525324bf0d04d242d9358a2417 (patch) | |
tree | 2467fa8dba81135fed5430c488685a12789ccc29 /arch | |
parent | 26816c287e13eedc67bc4ed0cd40c138314b7c7d (diff) |
perf_counter, x86: rename intel only functions
[ Impact: cleanup ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-13-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index fa6541d781bc..5a52d73ccfa7 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
725 | * Save and restart an expired counter. Called by NMI contexts, | 725 | * Save and restart an expired counter. Called by NMI contexts, |
726 | * so it has to be careful about preempting normal counter ops: | 726 | * so it has to be careful about preempting normal counter ops: |
727 | */ | 727 | */ |
728 | static void perf_save_and_restart(struct perf_counter *counter) | 728 | static void intel_pmu_save_and_restart(struct perf_counter *counter) |
729 | { | 729 | { |
730 | struct hw_perf_counter *hwc = &counter->hw; | 730 | struct hw_perf_counter *hwc = &counter->hw; |
731 | int idx = hwc->idx; | 731 | int idx = hwc->idx; |
@@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
753 | struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); | 753 | struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); |
754 | int ret = 0; | 754 | int ret = 0; |
755 | 755 | ||
756 | cpuc->throttle_ctrl = hw_perf_save_disable(); | 756 | cpuc->throttle_ctrl = intel_pmu_save_disable_all(); |
757 | 757 | ||
758 | status = intel_pmu_get_status(cpuc->throttle_ctrl); | 758 | status = intel_pmu_get_status(cpuc->throttle_ctrl); |
759 | if (!status) | 759 | if (!status) |
@@ -770,7 +770,7 @@ again: | |||
770 | if (!counter) | 770 | if (!counter) |
771 | continue; | 771 | continue; |
772 | 772 | ||
773 | perf_save_and_restart(counter); | 773 | intel_pmu_save_and_restart(counter); |
774 | if (perf_counter_overflow(counter, nmi, regs, 0)) | 774 | if (perf_counter_overflow(counter, nmi, regs, 0)) |
775 | __x86_pmu_disable(counter, &counter->hw, bit); | 775 | __x86_pmu_disable(counter, &counter->hw, bit); |
776 | } | 776 | } |
@@ -788,7 +788,7 @@ out: | |||
788 | * Restore - do not reenable when global enable is off or throttled: | 788 | * Restore - do not reenable when global enable is off or throttled: |
789 | */ | 789 | */ |
790 | if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) | 790 | if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) |
791 | hw_perf_restore(cpuc->throttle_ctrl); | 791 | intel_pmu_restore_all(cpuc->throttle_ctrl); |
792 | 792 | ||
793 | return ret; | 793 | return ret; |
794 | } | 794 | } |