aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:01 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:02 -0400
commit4295ee62660b13ddb87d41539f49b239e6e7d56f (patch)
treebdd7c1b1c1b4c1c4b05d42d4837c6fbf8ad5e5f8 /arch
parent4138960a9251a265002b5cf07e671a49f8495381 (diff)
perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all()
MSR reads and writes are expensive. This patch adds checks to avoid its usage where possible. [ Impact: micro-optimization on AMD CPUs ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d6d6529349dd..75a090394b6d 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void)
334 for (idx = 0; idx < nr_counters_generic; idx++) { 334 for (idx = 0; idx < nr_counters_generic; idx++) {
335 u64 val; 335 u64 val;
336 336
337 if (!test_bit(idx, cpuc->active_mask))
338 continue;
337 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 339 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
338 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) { 340 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
339 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 341 continue;
340 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 342 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
341 } 343 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
342 } 344 }
343 345
344 return enabled; 346 return enabled;
@@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl)
372 return; 374 return;
373 375
374 for (idx = 0; idx < nr_counters_generic; idx++) { 376 for (idx = 0; idx < nr_counters_generic; idx++) {
375 if (test_bit(idx, cpuc->active_mask)) { 377 u64 val;
376 u64 val;
377 378
378 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 379 if (!test_bit(idx, cpuc->active_mask))
379 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 380 continue;
380 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 381 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
381 } 382 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
383 continue;
384 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
385 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
382 } 386 }
383} 387}
384 388