aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorCyrill Gorcunov <gorcunov@gmail.com>2008-06-24 16:52:03 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 06:51:39 -0400
commit47a486cc110fe77518c79a566b50a5c785c813ae (patch)
treea7e6849bd5a0a651400b1c0a3f9de9064c29c862 /arch/x86/kernel/cpu
parent2bc0d2615a15a93d344abbe8cb1b9056122bce9d (diff)
x86: perfctr-watchdog.c - coding style cleanup
Just some code beautification. Nothing else. Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com> Cc: macro@linux-mips.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c202
1 files changed, 112 insertions, 90 deletions
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index ddda4b64f545..2e9bef6e3aa3 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -1,11 +1,15 @@
1/* local apic based NMI watchdog for various CPUs. 1/*
2 This file also handles reservation of performance counters for coordination 2 * local apic based NMI watchdog for various CPUs.
3 with other users (like oprofile). 3 *
4 4 * This file also handles reservation of performance counters for coordination
5 Note that these events normally don't tick when the CPU idles. This means 5 * with other users (like oprofile).
6 the frequency varies with CPU load. 6 *
7 7 * Note that these events normally don't tick when the CPU idles. This means
8 Original code for K7/P6 written by Keith Owens */ 8 * the frequency varies with CPU load.
9 *
10 * Original code for K7/P6 written by Keith Owens
11 *
12 */
9 13
10#include <linux/percpu.h> 14#include <linux/percpu.h>
11#include <linux/module.h> 15#include <linux/module.h>
@@ -36,12 +40,16 @@ struct wd_ops {
36 40
37static const struct wd_ops *wd_ops; 41static const struct wd_ops *wd_ops;
38 42
39/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's 43/*
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now) 44 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
45 * offset from MSR_P4_BSU_ESCR0.
46 *
47 * It will be the max for all platforms (for now)
41 */ 48 */
42#define NMI_MAX_COUNTER_BITS 66 49#define NMI_MAX_COUNTER_BITS 66
43 50
44/* perfctr_nmi_owner tracks the ownership of the perfctr registers: 51/*
52 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
45 * evtsel_nmi_owner tracks the ownership of the event selection 53 * evtsel_nmi_owner tracks the ownership of the event selection
46 * - different performance counters/ event selection may be reserved for 54 * - different performance counters/ event selection may be reserved for
47 * different subsystems this reservation system just tries to coordinate 55 * different subsystems this reservation system just tries to coordinate
@@ -73,8 +81,10 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
73 return 0; 81 return 0;
74} 82}
75 83
76/* converts an msr to an appropriate reservation bit */ 84/*
77/* returns the bit offset of the event selection register */ 85 * converts an msr to an appropriate reservation bit
86 * returns the bit offset of the event selection register
87 */
78static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) 88static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
79{ 89{
80 /* returns the bit offset of the event selection register */ 90 /* returns the bit offset of the event selection register */
@@ -114,6 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr)
114 124
115 return (!test_bit(counter, perfctr_nmi_owner)); 125 return (!test_bit(counter, perfctr_nmi_owner));
116} 126}
127EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
117 128
118int reserve_perfctr_nmi(unsigned int msr) 129int reserve_perfctr_nmi(unsigned int msr)
119{ 130{
@@ -128,6 +139,7 @@ int reserve_perfctr_nmi(unsigned int msr)
128 return 1; 139 return 1;
129 return 0; 140 return 0;
130} 141}
142EXPORT_SYMBOL(reserve_perfctr_nmi);
131 143
132void release_perfctr_nmi(unsigned int msr) 144void release_perfctr_nmi(unsigned int msr)
133{ 145{
@@ -140,6 +152,7 @@ void release_perfctr_nmi(unsigned int msr)
140 152
141 clear_bit(counter, perfctr_nmi_owner); 153 clear_bit(counter, perfctr_nmi_owner);
142} 154}
155EXPORT_SYMBOL(release_perfctr_nmi);
143 156
144int reserve_evntsel_nmi(unsigned int msr) 157int reserve_evntsel_nmi(unsigned int msr)
145{ 158{
@@ -154,6 +167,7 @@ int reserve_evntsel_nmi(unsigned int msr)
154 return 1; 167 return 1;
155 return 0; 168 return 0;
156} 169}
170EXPORT_SYMBOL(reserve_evntsel_nmi);
157 171
158void release_evntsel_nmi(unsigned int msr) 172void release_evntsel_nmi(unsigned int msr)
159{ 173{
@@ -166,11 +180,6 @@ void release_evntsel_nmi(unsigned int msr)
166 180
167 clear_bit(counter, evntsel_nmi_owner); 181 clear_bit(counter, evntsel_nmi_owner);
168} 182}
169
170EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
171EXPORT_SYMBOL(reserve_perfctr_nmi);
172EXPORT_SYMBOL(release_perfctr_nmi);
173EXPORT_SYMBOL(reserve_evntsel_nmi);
174EXPORT_SYMBOL(release_evntsel_nmi); 183EXPORT_SYMBOL(release_evntsel_nmi);
175 184
176void disable_lapic_nmi_watchdog(void) 185void disable_lapic_nmi_watchdog(void)
@@ -234,8 +243,8 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz)
234 return retval; 243 return retval;
235} 244}
236 245
237static void 246static void write_watchdog_counter(unsigned int perfctr_msr,
238write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz) 247 const char *descr, unsigned nmi_hz)
239{ 248{
240 u64 count = (u64)cpu_khz * 1000; 249 u64 count = (u64)cpu_khz * 1000;
241 250
@@ -246,7 +255,7 @@ write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi
246} 255}
247 256
248static void write_watchdog_counter32(unsigned int perfctr_msr, 257static void write_watchdog_counter32(unsigned int perfctr_msr,
249 const char *descr, unsigned nmi_hz) 258 const char *descr, unsigned nmi_hz)
250{ 259{
251 u64 count = (u64)cpu_khz * 1000; 260 u64 count = (u64)cpu_khz * 1000;
252 261
@@ -256,9 +265,10 @@ static void write_watchdog_counter32(unsigned int perfctr_msr,
256 wrmsr(perfctr_msr, (u32)(-count), 0); 265 wrmsr(perfctr_msr, (u32)(-count), 0);
257} 266}
258 267
259/* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface 268/*
260 nicely stable so there is not much variety */ 269 * AMD K7/K8/Family10h/Family11h support.
261 270 * AMD keeps this interface nicely stable so there is not much variety
271 */
262#define K7_EVNTSEL_ENABLE (1 << 22) 272#define K7_EVNTSEL_ENABLE (1 << 22)
263#define K7_EVNTSEL_INT (1 << 20) 273#define K7_EVNTSEL_INT (1 << 20)
264#define K7_EVNTSEL_OS (1 << 17) 274#define K7_EVNTSEL_OS (1 << 17)
@@ -291,7 +301,7 @@ static int setup_k7_watchdog(unsigned nmi_hz)
291 301
292 wd->perfctr_msr = perfctr_msr; 302 wd->perfctr_msr = perfctr_msr;
293 wd->evntsel_msr = evntsel_msr; 303 wd->evntsel_msr = evntsel_msr;
294 wd->cccr_msr = 0; //unused 304 wd->cccr_msr = 0; /* unused */
295 return 1; 305 return 1;
296} 306}
297 307
@@ -327,18 +337,19 @@ static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
327} 337}
328 338
329static const struct wd_ops k7_wd_ops = { 339static const struct wd_ops k7_wd_ops = {
330 .reserve = single_msr_reserve, 340 .reserve = single_msr_reserve,
331 .unreserve = single_msr_unreserve, 341 .unreserve = single_msr_unreserve,
332 .setup = setup_k7_watchdog, 342 .setup = setup_k7_watchdog,
333 .rearm = single_msr_rearm, 343 .rearm = single_msr_rearm,
334 .stop = single_msr_stop_watchdog, 344 .stop = single_msr_stop_watchdog,
335 .perfctr = MSR_K7_PERFCTR0, 345 .perfctr = MSR_K7_PERFCTR0,
336 .evntsel = MSR_K7_EVNTSEL0, 346 .evntsel = MSR_K7_EVNTSEL0,
337 .checkbit = 1ULL<<47, 347 .checkbit = 1ULL << 47,
338}; 348};
339 349
340/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */ 350/*
341 351 * Intel Model 6 (PPro+,P2,P3,P-M,Core1)
352 */
342#define P6_EVNTSEL0_ENABLE (1 << 22) 353#define P6_EVNTSEL0_ENABLE (1 << 22)
343#define P6_EVNTSEL_INT (1 << 20) 354#define P6_EVNTSEL_INT (1 << 20)
344#define P6_EVNTSEL_OS (1 << 17) 355#define P6_EVNTSEL_OS (1 << 17)
@@ -374,52 +385,58 @@ static int setup_p6_watchdog(unsigned nmi_hz)
374 385
375 wd->perfctr_msr = perfctr_msr; 386 wd->perfctr_msr = perfctr_msr;
376 wd->evntsel_msr = evntsel_msr; 387 wd->evntsel_msr = evntsel_msr;
377 wd->cccr_msr = 0; //unused 388 wd->cccr_msr = 0; /* unused */
378 return 1; 389 return 1;
379} 390}
380 391
381static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) 392static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
382{ 393{
383 /* P6 based Pentium M need to re-unmask 394 /*
395 * P6 based Pentium M need to re-unmask
384 * the apic vector but it doesn't hurt 396 * the apic vector but it doesn't hurt
385 * other P6 variant. 397 * other P6 variant.
386 * ArchPerfom/Core Duo also needs this */ 398 * ArchPerfom/Core Duo also needs this
399 */
387 apic_write(APIC_LVTPC, APIC_DM_NMI); 400 apic_write(APIC_LVTPC, APIC_DM_NMI);
401
388 /* P6/ARCH_PERFMON has 32 bit counter write */ 402 /* P6/ARCH_PERFMON has 32 bit counter write */
389 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); 403 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
390} 404}
391 405
392static const struct wd_ops p6_wd_ops = { 406static const struct wd_ops p6_wd_ops = {
393 .reserve = single_msr_reserve, 407 .reserve = single_msr_reserve,
394 .unreserve = single_msr_unreserve, 408 .unreserve = single_msr_unreserve,
395 .setup = setup_p6_watchdog, 409 .setup = setup_p6_watchdog,
396 .rearm = p6_rearm, 410 .rearm = p6_rearm,
397 .stop = single_msr_stop_watchdog, 411 .stop = single_msr_stop_watchdog,
398 .perfctr = MSR_P6_PERFCTR0, 412 .perfctr = MSR_P6_PERFCTR0,
399 .evntsel = MSR_P6_EVNTSEL0, 413 .evntsel = MSR_P6_EVNTSEL0,
400 .checkbit = 1ULL<<39, 414 .checkbit = 1ULL << 39,
401}; 415};
402 416
403/* Intel P4 performance counters. By far the most complicated of all. */ 417/*
404 418 * Intel P4 performance counters.
405#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) 419 * By far the most complicated of all.
406#define P4_ESCR_EVENT_SELECT(N) ((N)<<25) 420 */
407#define P4_ESCR_OS (1<<3) 421#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7)
408#define P4_ESCR_USR (1<<2) 422#define P4_ESCR_EVENT_SELECT(N) ((N) << 25)
409#define P4_CCCR_OVF_PMI0 (1<<26) 423#define P4_ESCR_OS (1 << 3)
410#define P4_CCCR_OVF_PMI1 (1<<27) 424#define P4_ESCR_USR (1 << 2)
411#define P4_CCCR_THRESHOLD(N) ((N)<<20) 425#define P4_CCCR_OVF_PMI0 (1 << 26)
412#define P4_CCCR_COMPLEMENT (1<<19) 426#define P4_CCCR_OVF_PMI1 (1 << 27)
413#define P4_CCCR_COMPARE (1<<18) 427#define P4_CCCR_THRESHOLD(N) ((N) << 20)
414#define P4_CCCR_REQUIRED (3<<16) 428#define P4_CCCR_COMPLEMENT (1 << 19)
415#define P4_CCCR_ESCR_SELECT(N) ((N)<<13) 429#define P4_CCCR_COMPARE (1 << 18)
416#define P4_CCCR_ENABLE (1<<12) 430#define P4_CCCR_REQUIRED (3 << 16)
417#define P4_CCCR_OVF (1<<31) 431#define P4_CCCR_ESCR_SELECT(N) ((N) << 13)
418 432#define P4_CCCR_ENABLE (1 << 12)
419/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 433#define P4_CCCR_OVF (1 << 31)
420 CRU_ESCR0 (with any non-null event selector) through a complemented
421 max threshold. [IA32-Vol3, Section 14.9.9] */
422 434
435/*
436 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
437 * CRU_ESCR0 (with any non-null event selector) through a complemented
438 * max threshold. [IA32-Vol3, Section 14.9.9]
439 */
423static int setup_p4_watchdog(unsigned nmi_hz) 440static int setup_p4_watchdog(unsigned nmi_hz)
424{ 441{
425 unsigned int perfctr_msr, evntsel_msr, cccr_msr; 442 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
@@ -444,7 +461,8 @@ static int setup_p4_watchdog(unsigned nmi_hz)
444#endif 461#endif
445 ht_num = 0; 462 ht_num = 0;
446 463
447 /* performance counters are shared resources 464 /*
465 * performance counters are shared resources
448 * assign each hyperthread its own set 466 * assign each hyperthread its own set
449 * (re-use the ESCR0 register, seems safe 467 * (re-use the ESCR0 register, seems safe
450 * and keeps the cccr_val the same) 468 * and keeps the cccr_val the same)
@@ -542,20 +560,21 @@ static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
542} 560}
543 561
544static const struct wd_ops p4_wd_ops = { 562static const struct wd_ops p4_wd_ops = {
545 .reserve = p4_reserve, 563 .reserve = p4_reserve,
546 .unreserve = p4_unreserve, 564 .unreserve = p4_unreserve,
547 .setup = setup_p4_watchdog, 565 .setup = setup_p4_watchdog,
548 .rearm = p4_rearm, 566 .rearm = p4_rearm,
549 .stop = stop_p4_watchdog, 567 .stop = stop_p4_watchdog,
550 /* RED-PEN this is wrong for the other sibling */ 568 /* RED-PEN this is wrong for the other sibling */
551 .perfctr = MSR_P4_BPU_PERFCTR0, 569 .perfctr = MSR_P4_BPU_PERFCTR0,
552 .evntsel = MSR_P4_BSU_ESCR0, 570 .evntsel = MSR_P4_BSU_ESCR0,
553 .checkbit = 1ULL<<39, 571 .checkbit = 1ULL << 39,
554}; 572};
555 573
556/* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully 574/*
557 all future Intel CPUs. */ 575 * Watchdog using the Intel architected PerfMon.
558 576 * Used for Core2 and hopefully all future Intel CPUs.
577 */
559#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 578#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
560#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK 579#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
561 580
@@ -601,19 +620,19 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
601 620
602 wd->perfctr_msr = perfctr_msr; 621 wd->perfctr_msr = perfctr_msr;
603 wd->evntsel_msr = evntsel_msr; 622 wd->evntsel_msr = evntsel_msr;
604 wd->cccr_msr = 0; //unused 623 wd->cccr_msr = 0; /* unused */
605 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 624 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
606 return 1; 625 return 1;
607} 626}
608 627
609static struct wd_ops intel_arch_wd_ops __read_mostly = { 628static struct wd_ops intel_arch_wd_ops __read_mostly = {
610 .reserve = single_msr_reserve, 629 .reserve = single_msr_reserve,
611 .unreserve = single_msr_unreserve, 630 .unreserve = single_msr_unreserve,
612 .setup = setup_intel_arch_watchdog, 631 .setup = setup_intel_arch_watchdog,
613 .rearm = p6_rearm, 632 .rearm = p6_rearm,
614 .stop = single_msr_stop_watchdog, 633 .stop = single_msr_stop_watchdog,
615 .perfctr = MSR_ARCH_PERFMON_PERFCTR1, 634 .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
616 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, 635 .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
617}; 636};
618 637
619static void probe_nmi_watchdog(void) 638static void probe_nmi_watchdog(void)
@@ -626,8 +645,10 @@ static void probe_nmi_watchdog(void)
626 wd_ops = &k7_wd_ops; 645 wd_ops = &k7_wd_ops;
627 break; 646 break;
628 case X86_VENDOR_INTEL: 647 case X86_VENDOR_INTEL:
629 /* Work around Core Duo (Yonah) errata AE49 where perfctr1 648 /*
630 doesn't have a working enable bit. */ 649 * Work around Core Duo (Yonah) errata AE49 where perfctr1
650 * doesn't have a working enable bit.
651 */
631 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { 652 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
632 intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0; 653 intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
633 intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0; 654 intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
@@ -638,7 +659,7 @@ static void probe_nmi_watchdog(void)
638 } 659 }
639 switch (boot_cpu_data.x86) { 660 switch (boot_cpu_data.x86) {
640 case 6: 661 case 6:
641 if (boot_cpu_data.x86_model > 0xd) 662 if (boot_cpu_data.x86_model > 13)
642 return; 663 return;
643 664
644 wd_ops = &p6_wd_ops; 665 wd_ops = &p6_wd_ops;
@@ -699,10 +720,11 @@ int lapic_wd_event(unsigned nmi_hz)
699{ 720{
700 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 721 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
701 u64 ctr; 722 u64 ctr;
723
702 rdmsrl(wd->perfctr_msr, ctr); 724 rdmsrl(wd->perfctr_msr, ctr);
703 if (ctr & wd_ops->checkbit) { /* perfctr still running? */ 725 if (ctr & wd_ops->checkbit) /* perfctr still running? */
704 return 0; 726 return 0;
705 } 727
706 wd_ops->rearm(wd, nmi_hz); 728 wd_ops->rearm(wd, nmi_hz);
707 return 1; 729 return 1;
708} 730}