aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJaswinder Singh Rajput <jaswinder@infradead.org>2008-12-27 08:45:43 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-27 10:00:51 -0500
commit2b583d8bc8d7105b58d7481a4a0ceb718dac49c6 (patch)
tree1bdae30cbcb2c51b168b96ee26809de07f226484
parent01ea1ccaa24dea3552e103be13b7897211607a8b (diff)
x86: perf_counter remove unwanted hw_perf_enable_all
Impact: clean, reduce kernel size a bit, avoid sparse warnings Fixes sparse warnings: arch/x86/kernel/cpu/perf_counter.c:153:6: warning: symbol 'hw_perf_enable_all' was not declared. Should it be static? arch/x86/kernel/cpu/perf_counter.c:279:3: warning: returning void-valued expression arch/x86/kernel/cpu/perf_counter.c:206:3: warning: returning void-valued expression arch/x86/kernel/cpu/perf_counter.c:206:3: warning: returning void-valued expression Signed-off-by: Jaswinder Singh Rajput <jaswinderrajput@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c24
1 files changed, 7 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index da46eca12543..9376771f757b 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -150,14 +150,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
150 return 0; 150 return 0;
151} 151}
152 152
153void hw_perf_enable_all(void)
154{
155 if (unlikely(!perf_counters_initialized))
156 return;
157
158 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask);
159}
160
161u64 hw_perf_save_disable(void) 153u64 hw_perf_save_disable(void)
162{ 154{
163 u64 ctrl; 155 u64 ctrl;
@@ -200,12 +192,10 @@ static inline void
200__pmc_generic_disable(struct perf_counter *counter, 192__pmc_generic_disable(struct perf_counter *counter,
201 struct hw_perf_counter *hwc, unsigned int idx) 193 struct hw_perf_counter *hwc, unsigned int idx)
202{ 194{
203 int err;
204
205 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 195 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
206 return __pmc_fixed_disable(counter, hwc, idx); 196 __pmc_fixed_disable(counter, hwc, idx);
207 197 else
208 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); 198 wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
209} 199}
210 200
211static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); 201static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
@@ -276,10 +266,10 @@ __pmc_generic_enable(struct perf_counter *counter,
276 struct hw_perf_counter *hwc, int idx) 266 struct hw_perf_counter *hwc, int idx)
277{ 267{
278 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 268 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
279 return __pmc_fixed_enable(counter, hwc, idx); 269 __pmc_fixed_enable(counter, hwc, idx);
280 270 else
281 wrmsr(hwc->config_base + idx, 271 wrmsr(hwc->config_base + idx,
282 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); 272 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
283} 273}
284 274
285static int 275static int