aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-09 06:18:18 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-09 13:28:49 -0500
commit1e12567678054bc1d4c944ecfad17624b3e49345 (patch)
tree6f421d80b1bf3c68d0cc65092a86c8a9075f2db1 /arch
parent7e2ae34749edf19e76e594b9c4b2cdde1066afc5 (diff)
perfcounters, x86: clean up debug code
Impact: cleanup Get rid of unused debug code. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c35
1 files changed, 12 insertions, 23 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 7d528ffc2d26..919ec46679b2 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -214,13 +214,11 @@ static void __hw_perf_save_counter(struct perf_counter *counter,
214{ 214{
215 s64 raw = -1; 215 s64 raw = -1;
216 s64 delta; 216 s64 delta;
217 int err;
218 217
219 /* 218 /*
220 * Get the raw hw counter value: 219 * Get the raw hw counter value:
221 */ 220 */
222 err = rdmsrl_safe(hwc->counter_base + idx, &raw); 221 rdmsrl(hwc->counter_base + idx, raw);
223 WARN_ON_ONCE(err);
224 222
225 /* 223 /*
226 * Rebase it to zero (it started counting at -irq_period), 224 * Rebase it to zero (it started counting at -irq_period),
@@ -252,20 +250,18 @@ static void __hw_perf_save_counter(struct perf_counter *counter,
252void perf_counter_print_debug(void) 250void perf_counter_print_debug(void)
253{ 251{
254 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; 252 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count;
255 int cpu, err, idx; 253 int cpu, idx;
254
255 if (!nr_hw_counters)
256 return;
256 257
257 local_irq_disable(); 258 local_irq_disable();
258 259
259 cpu = smp_processor_id(); 260 cpu = smp_processor_id();
260 261
261 err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_CTRL, &ctrl); 262 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
262 WARN_ON_ONCE(err); 263 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
263 264 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
264 err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_STATUS, &status);
265 WARN_ON_ONCE(err);
266
267 err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_OVF_CTRL, &overflow);
268 WARN_ON_ONCE(err);
269 265
270 printk(KERN_INFO "\n"); 266 printk(KERN_INFO "\n");
271 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); 267 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
@@ -273,11 +269,8 @@ void perf_counter_print_debug(void)
273 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); 269 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
274 270
275 for (idx = 0; idx < nr_hw_counters; idx++) { 271 for (idx = 0; idx < nr_hw_counters; idx++) {
276 err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); 272 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
277 WARN_ON_ONCE(err); 273 rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count);
278
279 err = rdmsrl_safe(MSR_ARCH_PERFMON_PERFCTR0 + idx, &pmc_count);
280 WARN_ON_ONCE(err);
281 274
282 next_count = per_cpu(prev_next_count[idx], cpu); 275 next_count = per_cpu(prev_next_count[idx], cpu);
283 276
@@ -310,13 +303,11 @@ void hw_perf_counter_read(struct perf_counter *counter)
310 unsigned long addr = hwc->counter_base + hwc->idx; 303 unsigned long addr = hwc->counter_base + hwc->idx;
311 s64 offs, val = -1LL; 304 s64 offs, val = -1LL;
312 s32 val32; 305 s32 val32;
313 int err;
314 306
315 /* Careful: NMI might modify the counter offset */ 307 /* Careful: NMI might modify the counter offset */
316 do { 308 do {
317 offs = hwc->prev_count; 309 offs = hwc->prev_count;
318 err = rdmsrl_safe(addr, &val); 310 rdmsrl(addr, val);
319 WARN_ON_ONCE(err);
320 } while (offs != hwc->prev_count); 311 } while (offs != hwc->prev_count);
321 312
322 val32 = (s32) val; 313 val32 = (s32) val;
@@ -346,10 +337,8 @@ static void perf_save_and_restart(struct perf_counter *counter)
346 struct hw_perf_counter *hwc = &counter->hw; 337 struct hw_perf_counter *hwc = &counter->hw;
347 int idx = hwc->idx; 338 int idx = hwc->idx;
348 u64 pmc_ctrl; 339 u64 pmc_ctrl;
349 int err;
350 340
351 err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); 341 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
352 WARN_ON_ONCE(err);
353 342
354 __hw_perf_save_counter(counter, hwc, idx); 343 __hw_perf_save_counter(counter, hwc, idx);
355 __hw_perf_counter_set_period(hwc, idx); 344 __hw_perf_counter_set_period(hwc, idx);