aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/nmi.c
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2006-06-26 07:59:59 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 13:48:22 -0400
commit0080e667550db5ae8c9318181500c413b99ff164 (patch)
tree3d6d162f0017aaa7c6049e785bbcdfc18dbc23ae /arch/x86_64/kernel/nmi.c
parente77deacb7b078156fcadf27b838a4ce1a65eda04 (diff)
[PATCH] x86_64: i386/x86-64 Add nmi watchdog support for new Intel CPUs
Intel now has support for Architectural Performance Monitoring Counters ( Refer to IA-32 Intel Architecture Software Developer's Manual http://www.intel.com/design/pentium4/manuals/253669.htm ). This feature is present starting from Intel Core Duo and Intel Core Solo processors. What this means is, the performance monitoring counters and some performance monitoring events are now defined in an architectural way (using cpuid). And there will be no need to check for family/model etc for these architectural events. Below is the patch to use this performance counters in nmi watchdog driver. Patch handles both i386 and x86-64 kernels. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r--arch/x86_64/kernel/nmi.c81
1 files changed, 76 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index ab421e22fa67..399489c93132 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -27,6 +27,7 @@
27#include <asm/proto.h> 27#include <asm/proto.h>
28#include <asm/kdebug.h> 28#include <asm/kdebug.h>
29#include <asm/mce.h> 29#include <asm/mce.h>
30#include <asm/intel_arch_perfmon.h>
30 31
31/* 32/*
32 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 33 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -66,6 +67,9 @@ static unsigned int nmi_p4_cccr_val;
66#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 67#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
67#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 68#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
68 69
70#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
71#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
72
69#define MSR_P4_MISC_ENABLE 0x1A0 73#define MSR_P4_MISC_ENABLE 0x1A0
70#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) 74#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
71#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) 75#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
@@ -97,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void)
97 case X86_VENDOR_AMD: 101 case X86_VENDOR_AMD:
98 return boot_cpu_data.x86 == 15; 102 return boot_cpu_data.x86 == 15;
99 case X86_VENDOR_INTEL: 103 case X86_VENDOR_INTEL:
100 return boot_cpu_data.x86 == 15; 104 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
105 return 1;
106 else
107 return (boot_cpu_data.x86 == 15);
101 } 108 }
102 return 0; 109 return 0;
103} 110}
@@ -203,6 +210,8 @@ int __init setup_nmi_watchdog(char *str)
203 210
204__setup("nmi_watchdog=", setup_nmi_watchdog); 211__setup("nmi_watchdog=", setup_nmi_watchdog);
205 212
213static void disable_intel_arch_watchdog(void);
214
206static void disable_lapic_nmi_watchdog(void) 215static void disable_lapic_nmi_watchdog(void)
207{ 216{
208 if (nmi_active <= 0) 217 if (nmi_active <= 0)
@@ -215,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void)
215 if (boot_cpu_data.x86 == 15) { 224 if (boot_cpu_data.x86 == 15) {
216 wrmsr(MSR_P4_IQ_CCCR0, 0, 0); 225 wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
217 wrmsr(MSR_P4_CRU_ESCR0, 0, 0); 226 wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
227 } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
228 disable_intel_arch_watchdog();
218 } 229 }
219 break; 230 break;
220 } 231 }
@@ -367,6 +378,53 @@ static void setup_k7_watchdog(void)
367 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 378 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
368} 379}
369 380
381static void disable_intel_arch_watchdog(void)
382{
383 unsigned ebx;
384
385 /*
386 * Check whether the Architectural PerfMon supports
387 * Unhalted Core Cycles Event or not.
388 * NOTE: Corresponding bit = 0 in ebp indicates event present.
389 */
390 ebx = cpuid_ebx(10);
391 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
392 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
393}
394
395static int setup_intel_arch_watchdog(void)
396{
397 unsigned int evntsel;
398 unsigned ebx;
399
400 /*
401 * Check whether the Architectural PerfMon supports
402 * Unhalted Core Cycles Event or not.
403 * NOTE: Corresponding bit = 0 in ebp indicates event present.
404 */
405 ebx = cpuid_ebx(10);
406 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
407 return 0;
408
409 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
410
411 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
412 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
413
414 evntsel = ARCH_PERFMON_EVENTSEL_INT
415 | ARCH_PERFMON_EVENTSEL_OS
416 | ARCH_PERFMON_EVENTSEL_USR
417 | ARCH_PERFMON_NMI_EVENT_SEL
418 | ARCH_PERFMON_NMI_EVENT_UMASK;
419
420 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
421 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
422 apic_write(APIC_LVTPC, APIC_DM_NMI);
423 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
424 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
425 return 1;
426}
427
370 428
371static int setup_p4_watchdog(void) 429static int setup_p4_watchdog(void)
372{ 430{
@@ -420,10 +478,16 @@ void setup_apic_nmi_watchdog(void)
420 setup_k7_watchdog(); 478 setup_k7_watchdog();
421 break; 479 break;
422 case X86_VENDOR_INTEL: 480 case X86_VENDOR_INTEL:
423 if (boot_cpu_data.x86 != 15) 481 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
424 return; 482 if (!setup_intel_arch_watchdog())
425 if (!setup_p4_watchdog()) 483 return;
484 } else if (boot_cpu_data.x86 == 15) {
485 if (!setup_p4_watchdog())
486 return;
487 } else {
426 return; 488 return;
489 }
490
427 break; 491 break;
428 492
429 default: 493 default:
@@ -508,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
508 */ 572 */
509 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 573 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
510 apic_write(APIC_LVTPC, APIC_DM_NMI); 574 apic_write(APIC_LVTPC, APIC_DM_NMI);
511 } 575 } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
576 /*
577 * For Intel based architectural perfmon
578 * - LVTPC is masked on interrupt and must be
579 * unmasked by the LVTPC handler.
580 */
581 apic_write(APIC_LVTPC, APIC_DM_NMI);
582 }
512 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); 583 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
513 } 584 }
514} 585}