aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2006-06-26 07:59:59 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-26 13:48:22 -0400
commit0080e667550db5ae8c9318181500c413b99ff164 (patch)
tree3d6d162f0017aaa7c6049e785bbcdfc18dbc23ae /arch
parente77deacb7b078156fcadf27b838a4ce1a65eda04 (diff)
[PATCH] x86_64: i386/x86-64 Add nmi watchdog support for new Intel CPUs
Intel now has support for Architectural Performance Monitoring Counters ( Refer to IA-32 Intel Architecture Software Developer's Manual http://www.intel.com/design/pentium4/manuals/253669.htm ). This feature is present starting from Intel Core Duo and Intel Core Solo processors. What this means is, the performance monitoring counters and some performance monitoring events are now defined in an architectural way (using cpuid). And there will be no need to check for family/model etc for these architectural events. Below is the patch to use this performance counters in nmi watchdog driver. Patch handles both i386 and x86-64 kernels. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/cpu/intel.c6
-rw-r--r--arch/i386/kernel/nmi.c65
-rw-r--r--arch/x86_64/kernel/nmi.c81
-rw-r--r--arch/x86_64/kernel/setup.c7
4 files changed, 153 insertions, 6 deletions
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5386b29bb5a5..10afc645c540 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
122 122
123 select_idle_routine(c); 123 select_idle_routine(c);
124 l2 = init_intel_cacheinfo(c); 124 l2 = init_intel_cacheinfo(c);
125 if (c->cpuid_level > 9 ) {
126 unsigned eax = cpuid_eax(10);
127 /* Check for version and the number of counters */
128 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
129 set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
130 }
125 131
126 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ 132 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
127 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 133 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index bd3875419630..a76e93146585 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -24,6 +24,7 @@
24 24
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/intel_arch_perfmon.h>
27 28
28#include "mach_traps.h" 29#include "mach_traps.h"
29 30
@@ -95,6 +96,9 @@ int nmi_active;
95 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 96 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
96 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 97 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
97 98
99#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
100#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
101
98#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
99/* The performance counters used by NMI_LOCAL_APIC don't trigger when 103/* The performance counters used by NMI_LOCAL_APIC don't trigger when
100 * the CPU is idle. To make sure the NMI watchdog really ticks on all 104 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -207,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str)
207 211
208__setup("nmi_watchdog=", setup_nmi_watchdog); 212__setup("nmi_watchdog=", setup_nmi_watchdog);
209 213
214static void disable_intel_arch_watchdog(void);
215
210static void disable_lapic_nmi_watchdog(void) 216static void disable_lapic_nmi_watchdog(void)
211{ 217{
212 if (nmi_active <= 0) 218 if (nmi_active <= 0)
@@ -216,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void)
216 wrmsr(MSR_K7_EVNTSEL0, 0, 0); 222 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
217 break; 223 break;
218 case X86_VENDOR_INTEL: 224 case X86_VENDOR_INTEL:
225 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
226 disable_intel_arch_watchdog();
227 break;
228 }
219 switch (boot_cpu_data.x86) { 229 switch (boot_cpu_data.x86) {
220 case 6: 230 case 6:
221 if (boot_cpu_data.x86_model > 0xd) 231 if (boot_cpu_data.x86_model > 0xd)
@@ -444,6 +454,53 @@ static int setup_p4_watchdog(void)
444 return 1; 454 return 1;
445} 455}
446 456
457static void disable_intel_arch_watchdog(void)
458{
459 unsigned ebx;
460
461 /*
462 * Check whether the Architectural PerfMon supports
463 * Unhalted Core Cycles Event or not.
464 * NOTE: Corresponding bit = 0 in ebp indicates event present.
465 */
466 ebx = cpuid_ebx(10);
467 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
468 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
469}
470
471static int setup_intel_arch_watchdog(void)
472{
473 unsigned int evntsel;
474 unsigned ebx;
475
476 /*
477 * Check whether the Architectural PerfMon supports
478 * Unhalted Core Cycles Event or not.
479 * NOTE: Corresponding bit = 0 in ebp indicates event present.
480 */
481 ebx = cpuid_ebx(10);
482 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
483 return 0;
484
485 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
486
487 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
488 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
489
490 evntsel = ARCH_PERFMON_EVENTSEL_INT
491 | ARCH_PERFMON_EVENTSEL_OS
492 | ARCH_PERFMON_EVENTSEL_USR
493 | ARCH_PERFMON_NMI_EVENT_SEL
494 | ARCH_PERFMON_NMI_EVENT_UMASK;
495
496 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
497 write_watchdog_counter("INTEL_ARCH_PERFCTR0");
498 apic_write(APIC_LVTPC, APIC_DM_NMI);
499 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
500 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
501 return 1;
502}
503
447void setup_apic_nmi_watchdog (void) 504void setup_apic_nmi_watchdog (void)
448{ 505{
449 switch (boot_cpu_data.x86_vendor) { 506 switch (boot_cpu_data.x86_vendor) {
@@ -453,6 +510,11 @@ void setup_apic_nmi_watchdog (void)
453 setup_k7_watchdog(); 510 setup_k7_watchdog();
454 break; 511 break;
455 case X86_VENDOR_INTEL: 512 case X86_VENDOR_INTEL:
513 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
514 if (!setup_intel_arch_watchdog())
515 return;
516 break;
517 }
456 switch (boot_cpu_data.x86) { 518 switch (boot_cpu_data.x86) {
457 case 6: 519 case 6:
458 if (boot_cpu_data.x86_model > 0xd) 520 if (boot_cpu_data.x86_model > 0xd)
@@ -556,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
556 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 618 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
557 apic_write(APIC_LVTPC, APIC_DM_NMI); 619 apic_write(APIC_LVTPC, APIC_DM_NMI);
558 } 620 }
559 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { 621 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
622 nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
560 /* Only P6 based Pentium M need to re-unmask 623 /* Only P6 based Pentium M need to re-unmask
561 * the apic vector but it doesn't hurt 624 * the apic vector but it doesn't hurt
562 * other P6 variant */ 625 * other P6 variant */
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index ab421e22fa67..399489c93132 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -27,6 +27,7 @@
27#include <asm/proto.h> 27#include <asm/proto.h>
28#include <asm/kdebug.h> 28#include <asm/kdebug.h>
29#include <asm/mce.h> 29#include <asm/mce.h>
30#include <asm/intel_arch_perfmon.h>
30 31
31/* 32/*
32 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 33 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -66,6 +67,9 @@ static unsigned int nmi_p4_cccr_val;
66#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 67#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
67#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 68#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
68 69
70#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
71#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
72
69#define MSR_P4_MISC_ENABLE 0x1A0 73#define MSR_P4_MISC_ENABLE 0x1A0
70#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) 74#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
71#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) 75#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
@@ -97,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void)
97 case X86_VENDOR_AMD: 101 case X86_VENDOR_AMD:
98 return boot_cpu_data.x86 == 15; 102 return boot_cpu_data.x86 == 15;
99 case X86_VENDOR_INTEL: 103 case X86_VENDOR_INTEL:
100 return boot_cpu_data.x86 == 15; 104 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
105 return 1;
106 else
107 return (boot_cpu_data.x86 == 15);
101 } 108 }
102 return 0; 109 return 0;
103} 110}
@@ -203,6 +210,8 @@ int __init setup_nmi_watchdog(char *str)
203 210
204__setup("nmi_watchdog=", setup_nmi_watchdog); 211__setup("nmi_watchdog=", setup_nmi_watchdog);
205 212
213static void disable_intel_arch_watchdog(void);
214
206static void disable_lapic_nmi_watchdog(void) 215static void disable_lapic_nmi_watchdog(void)
207{ 216{
208 if (nmi_active <= 0) 217 if (nmi_active <= 0)
@@ -215,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void)
215 if (boot_cpu_data.x86 == 15) { 224 if (boot_cpu_data.x86 == 15) {
216 wrmsr(MSR_P4_IQ_CCCR0, 0, 0); 225 wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
217 wrmsr(MSR_P4_CRU_ESCR0, 0, 0); 226 wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
227 } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
228 disable_intel_arch_watchdog();
218 } 229 }
219 break; 230 break;
220 } 231 }
@@ -367,6 +378,53 @@ static void setup_k7_watchdog(void)
367 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 378 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
368} 379}
369 380
381static void disable_intel_arch_watchdog(void)
382{
383 unsigned ebx;
384
385 /*
386 * Check whether the Architectural PerfMon supports
387 * Unhalted Core Cycles Event or not.
388 * NOTE: Corresponding bit = 0 in ebp indicates event present.
389 */
390 ebx = cpuid_ebx(10);
391 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
392 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
393}
394
395static int setup_intel_arch_watchdog(void)
396{
397 unsigned int evntsel;
398 unsigned ebx;
399
400 /*
401 * Check whether the Architectural PerfMon supports
402 * Unhalted Core Cycles Event or not.
403 * NOTE: Corresponding bit = 0 in ebp indicates event present.
404 */
405 ebx = cpuid_ebx(10);
406 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
407 return 0;
408
409 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
410
411 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
412 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
413
414 evntsel = ARCH_PERFMON_EVENTSEL_INT
415 | ARCH_PERFMON_EVENTSEL_OS
416 | ARCH_PERFMON_EVENTSEL_USR
417 | ARCH_PERFMON_NMI_EVENT_SEL
418 | ARCH_PERFMON_NMI_EVENT_UMASK;
419
420 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
421 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
422 apic_write(APIC_LVTPC, APIC_DM_NMI);
423 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
424 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
425 return 1;
426}
427
370 428
371static int setup_p4_watchdog(void) 429static int setup_p4_watchdog(void)
372{ 430{
@@ -420,10 +478,16 @@ void setup_apic_nmi_watchdog(void)
420 setup_k7_watchdog(); 478 setup_k7_watchdog();
421 break; 479 break;
422 case X86_VENDOR_INTEL: 480 case X86_VENDOR_INTEL:
423 if (boot_cpu_data.x86 != 15) 481 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
424 return; 482 if (!setup_intel_arch_watchdog())
425 if (!setup_p4_watchdog()) 483 return;
484 } else if (boot_cpu_data.x86 == 15) {
485 if (!setup_p4_watchdog())
486 return;
487 } else {
426 return; 488 return;
489 }
490
427 break; 491 break;
428 492
429 default: 493 default:
@@ -508,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
508 */ 572 */
509 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 573 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
510 apic_write(APIC_LVTPC, APIC_DM_NMI); 574 apic_write(APIC_LVTPC, APIC_DM_NMI);
511 } 575 } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
576 /*
577 * For Intel based architectural perfmon
578 * - LVTPC is masked on interrupt and must be
579 * unmasked by the LVTPC handler.
580 */
581 apic_write(APIC_LVTPC, APIC_DM_NMI);
582 }
512 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); 583 wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
513 } 584 }
514} 585}
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 2a5fce0fd1c4..594642a6c037 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -988,6 +988,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
988 unsigned n; 988 unsigned n;
989 989
990 init_intel_cacheinfo(c); 990 init_intel_cacheinfo(c);
991 if (c->cpuid_level > 9 ) {
992 unsigned eax = cpuid_eax(10);
993 /* Check for version and the number of counters */
994 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
995 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
996 }
997
991 n = c->extended_cpuid_level; 998 n = c->extended_cpuid_level;
992 if (n >= 0x80000008) { 999 if (n >= 0x80000008) {
993 unsigned eax = cpuid_eax(0x80000008); 1000 unsigned eax = cpuid_eax(0x80000008);