aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:17 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:10 -0400
commit6f00cada07bb5da7f751929d3173494dcc5446cc (patch)
treefa7b03e4812a2847d54fd0443284b748f4ca0d62 /arch
parent095342389e2ed8deed07b3076f990260ce3c7c9f (diff)
perf_counter, x86: consistent use of type int for counter index
The type of counter index is sometimes implemented as unsigned int. This patch changes this to have a consistent usage of int. [ Impact: cleanup ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-21-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index f7fd4a355159..d8beebeb270f 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -459,7 +459,7 @@ static void hw_perf_disable(int idx, u64 config)
459 459
460static inline void 460static inline void
461__pmc_fixed_disable(struct perf_counter *counter, 461__pmc_fixed_disable(struct perf_counter *counter,
462 struct hw_perf_counter *hwc, unsigned int __idx) 462 struct hw_perf_counter *hwc, int __idx)
463{ 463{
464 int idx = __idx - X86_PMC_IDX_FIXED; 464 int idx = __idx - X86_PMC_IDX_FIXED;
465 u64 ctrl_val, mask; 465 u64 ctrl_val, mask;
@@ -474,7 +474,7 @@ __pmc_fixed_disable(struct perf_counter *counter,
474 474
475static inline void 475static inline void
476__x86_pmu_disable(struct perf_counter *counter, 476__x86_pmu_disable(struct perf_counter *counter,
477 struct hw_perf_counter *hwc, unsigned int idx) 477 struct hw_perf_counter *hwc, int idx)
478{ 478{
479 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 479 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
480 __pmc_fixed_disable(counter, hwc, idx); 480 __pmc_fixed_disable(counter, hwc, idx);
@@ -523,7 +523,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
523 523
524static inline void 524static inline void
525__pmc_fixed_enable(struct perf_counter *counter, 525__pmc_fixed_enable(struct perf_counter *counter,
526 struct hw_perf_counter *hwc, unsigned int __idx) 526 struct hw_perf_counter *hwc, int __idx)
527{ 527{
528 int idx = __idx - X86_PMC_IDX_FIXED; 528 int idx = __idx - X86_PMC_IDX_FIXED;
529 u64 ctrl_val, bits, mask; 529 u64 ctrl_val, bits, mask;
@@ -691,7 +691,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
691{ 691{
692 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 692 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
693 struct hw_perf_counter *hwc = &counter->hw; 693 struct hw_perf_counter *hwc = &counter->hw;
694 unsigned int idx = hwc->idx; 694 int idx = hwc->idx;
695 695
696 /* 696 /*
697 * Must be done before we disable, otherwise the nmi handler 697 * Must be done before we disable, otherwise the nmi handler