aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/nmi.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 17:55:20 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 17:55:20 -0400
commitea62ccd00fd0b6720b033adfc9984f31130ce195 (patch)
tree9837b797b2466fffcb0af96c388b06eae9c3df18 /arch/x86_64/kernel/nmi.c
parent886a0768affe9a32f18c45f8e1393bca9ece5392 (diff)
parent35060b6a9a4e1c89bc6fbea61090e302dbc61847 (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (231 commits) [PATCH] i386: Don't delete cpu_devs data to identify different x86 types in late_initcall [PATCH] i386: type may be unused [PATCH] i386: Some additional chipset register values validation. [PATCH] i386: Add missing !X86_PAE dependincy to the 2G/2G split. [PATCH] x86-64: Don't exclude asm-offsets.c in Documentation/dontdiff [PATCH] i386: avoid redundant preempt_disable in __unlazy_fpu [PATCH] i386: white space fixes in i387.h [PATCH] i386: Drop noisy e820 debugging printks [PATCH] x86-64: Fix allnoconfig error in genapic_flat.c [PATCH] x86-64: Shut up warnings for vfat compat ioctls on other file systems [PATCH] x86-64: Share identical video.S between i386 and x86-64 [PATCH] x86-64: Remove CONFIG_REORDER [PATCH] x86-64: Print type and size correctly for unknown compat ioctls [PATCH] i386: Remove copy_*_user BUG_ONs for (size < 0) [PATCH] i386: Little cleanups in smpboot.c [PATCH] x86-64: Don't enable NUMA for a single node in K8 NUMA scanning [PATCH] x86: Use RDTSCP for synchronous get_cycles if possible [PATCH] i386: Add X86_FEATURE_RDTSCP [PATCH] i386: Implement X86_FEATURE_SYNC_RDTSC on i386 [PATCH] i386: Implement alternative_io for i386 ... Fix up trivial conflict in include/linux/highmem.h manually. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r--arch/x86_64/kernel/nmi.c678
1 files changed, 33 insertions, 645 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index dfab9f167366..6cd2b30e2ffc 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -27,28 +27,11 @@
27#include <asm/proto.h> 27#include <asm/proto.h>
28#include <asm/kdebug.h> 28#include <asm/kdebug.h>
29#include <asm/mce.h> 29#include <asm/mce.h>
30#include <asm/intel_arch_perfmon.h>
31 30
32int unknown_nmi_panic; 31int unknown_nmi_panic;
33int nmi_watchdog_enabled; 32int nmi_watchdog_enabled;
34int panic_on_unrecovered_nmi; 33int panic_on_unrecovered_nmi;
35 34
36/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
37 * evtsel_nmi_owner tracks the ownership of the event selection
38 * - different performance counters/ event selection may be reserved for
39 * different subsystems this reservation system just tries to coordinate
40 * things a little
41 */
42
43/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
44 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
45 */
46#define NMI_MAX_COUNTER_BITS 66
47#define NMI_MAX_COUNTER_LONGS BITS_TO_LONGS(NMI_MAX_COUNTER_BITS)
48
49static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner[NMI_MAX_COUNTER_LONGS]);
50static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[NMI_MAX_COUNTER_LONGS]);
51
52static cpumask_t backtrace_mask = CPU_MASK_NONE; 35static cpumask_t backtrace_mask = CPU_MASK_NONE;
53 36
54/* nmi_active: 37/* nmi_active:
@@ -63,191 +46,11 @@ int panic_on_timeout;
63unsigned int nmi_watchdog = NMI_DEFAULT; 46unsigned int nmi_watchdog = NMI_DEFAULT;
64static unsigned int nmi_hz = HZ; 47static unsigned int nmi_hz = HZ;
65 48
66struct nmi_watchdog_ctlblk { 49static DEFINE_PER_CPU(short, wd_enabled);
67 int enabled;
68 u64 check_bit;
69 unsigned int cccr_msr;
70 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
71 unsigned int evntsel_msr; /* the MSR to select the events to handle */
72};
73static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
74 50
75/* local prototypes */ 51/* local prototypes */
76static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); 52static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
77 53
78/* converts an msr to an appropriate reservation bit */
79static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
80{
81 /* returns the bit offset of the performance counter register */
82 switch (boot_cpu_data.x86_vendor) {
83 case X86_VENDOR_AMD:
84 return (msr - MSR_K7_PERFCTR0);
85 case X86_VENDOR_INTEL:
86 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
87 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
88 else
89 return (msr - MSR_P4_BPU_PERFCTR0);
90 }
91 return 0;
92}
93
94/* converts an msr to an appropriate reservation bit */
95static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
96{
97 /* returns the bit offset of the event selection register */
98 switch (boot_cpu_data.x86_vendor) {
99 case X86_VENDOR_AMD:
100 return (msr - MSR_K7_EVNTSEL0);
101 case X86_VENDOR_INTEL:
102 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
103 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
104 else
105 return (msr - MSR_P4_BSU_ESCR0);
106 }
107 return 0;
108}
109
110/* checks for a bit availability (hack for oprofile) */
111int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
112{
113 int cpu;
114 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
115 for_each_possible_cpu (cpu) {
116 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
117 return 0;
118 }
119 return 1;
120}
121
122/* checks the an msr for availability */
123int avail_to_resrv_perfctr_nmi(unsigned int msr)
124{
125 unsigned int counter;
126 int cpu;
127
128 counter = nmi_perfctr_msr_to_bit(msr);
129 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
130
131 for_each_possible_cpu (cpu) {
132 if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
133 return 0;
134 }
135 return 1;
136}
137
138static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
139{
140 unsigned int counter;
141 if (cpu < 0)
142 cpu = smp_processor_id();
143
144 counter = nmi_perfctr_msr_to_bit(msr);
145 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
146
147 if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
148 return 1;
149 return 0;
150}
151
152static void __release_perfctr_nmi(int cpu, unsigned int msr)
153{
154 unsigned int counter;
155 if (cpu < 0)
156 cpu = smp_processor_id();
157
158 counter = nmi_perfctr_msr_to_bit(msr);
159 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
160
161 clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
162}
163
164int reserve_perfctr_nmi(unsigned int msr)
165{
166 int cpu, i;
167 for_each_possible_cpu (cpu) {
168 if (!__reserve_perfctr_nmi(cpu, msr)) {
169 for_each_possible_cpu (i) {
170 if (i >= cpu)
171 break;
172 __release_perfctr_nmi(i, msr);
173 }
174 return 0;
175 }
176 }
177 return 1;
178}
179
180void release_perfctr_nmi(unsigned int msr)
181{
182 int cpu;
183 for_each_possible_cpu (cpu)
184 __release_perfctr_nmi(cpu, msr);
185}
186
187int __reserve_evntsel_nmi(int cpu, unsigned int msr)
188{
189 unsigned int counter;
190 if (cpu < 0)
191 cpu = smp_processor_id();
192
193 counter = nmi_evntsel_msr_to_bit(msr);
194 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
195
196 if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
197 return 1;
198 return 0;
199}
200
201static void __release_evntsel_nmi(int cpu, unsigned int msr)
202{
203 unsigned int counter;
204 if (cpu < 0)
205 cpu = smp_processor_id();
206
207 counter = nmi_evntsel_msr_to_bit(msr);
208 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
209
210 clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
211}
212
213int reserve_evntsel_nmi(unsigned int msr)
214{
215 int cpu, i;
216 for_each_possible_cpu (cpu) {
217 if (!__reserve_evntsel_nmi(cpu, msr)) {
218 for_each_possible_cpu (i) {
219 if (i >= cpu)
220 break;
221 __release_evntsel_nmi(i, msr);
222 }
223 return 0;
224 }
225 }
226 return 1;
227}
228
229void release_evntsel_nmi(unsigned int msr)
230{
231 int cpu;
232 for_each_possible_cpu (cpu) {
233 __release_evntsel_nmi(cpu, msr);
234 }
235}
236
237static __cpuinit inline int nmi_known_cpu(void)
238{
239 switch (boot_cpu_data.x86_vendor) {
240 case X86_VENDOR_AMD:
241 return boot_cpu_data.x86 == 15 || boot_cpu_data.x86 == 16;
242 case X86_VENDOR_INTEL:
243 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
244 return 1;
245 else
246 return (boot_cpu_data.x86 == 15);
247 }
248 return 0;
249}
250
251/* Run after command line and cpu_init init, but before all other checks */ 54/* Run after command line and cpu_init init, but before all other checks */
252void nmi_watchdog_default(void) 55void nmi_watchdog_default(void)
253{ 56{
@@ -277,23 +80,6 @@ static __init void nmi_cpu_busy(void *data)
277} 80}
278#endif 81#endif
279 82
280static unsigned int adjust_for_32bit_ctr(unsigned int hz)
281{
282 unsigned int retval = hz;
283
284 /*
285 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
286 * are writable, with higher bits sign extending from bit 31.
287 * So, we can only program the counter with 31 bit values and
288 * 32nd bit should be 1, for 33.. to be 1.
289 * Find the appropriate nmi_hz
290 */
291 if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) {
292 retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
293 }
294 return retval;
295}
296
297int __init check_nmi_watchdog (void) 83int __init check_nmi_watchdog (void)
298{ 84{
299 int *counts; 85 int *counts;
@@ -322,14 +108,14 @@ int __init check_nmi_watchdog (void)
322 mdelay((20*1000)/nmi_hz); // wait 20 ticks 108 mdelay((20*1000)/nmi_hz); // wait 20 ticks
323 109
324 for_each_online_cpu(cpu) { 110 for_each_online_cpu(cpu) {
325 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled) 111 if (!per_cpu(wd_enabled, cpu))
326 continue; 112 continue;
327 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { 113 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
328 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", 114 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
329 cpu, 115 cpu,
330 counts[cpu], 116 counts[cpu],
331 cpu_pda(cpu)->__nmi_count); 117 cpu_pda(cpu)->__nmi_count);
332 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0; 118 per_cpu(wd_enabled, cpu) = 0;
333 atomic_dec(&nmi_active); 119 atomic_dec(&nmi_active);
334 } 120 }
335 } 121 }
@@ -344,13 +130,8 @@ int __init check_nmi_watchdog (void)
344 130
345 /* now that we know it works we can reduce NMI frequency to 131 /* now that we know it works we can reduce NMI frequency to
346 something more reasonable; makes a difference in some configs */ 132 something more reasonable; makes a difference in some configs */
347 if (nmi_watchdog == NMI_LOCAL_APIC) { 133 if (nmi_watchdog == NMI_LOCAL_APIC)
348 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 134 nmi_hz = lapic_adjust_nmi_hz(1);
349
350 nmi_hz = 1;
351 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0)
352 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
353 }
354 135
355 kfree(counts); 136 kfree(counts);
356 return 0; 137 return 0;
@@ -379,57 +160,6 @@ int __init setup_nmi_watchdog(char *str)
379 160
380__setup("nmi_watchdog=", setup_nmi_watchdog); 161__setup("nmi_watchdog=", setup_nmi_watchdog);
381 162
382static void disable_lapic_nmi_watchdog(void)
383{
384 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
385
386 if (atomic_read(&nmi_active) <= 0)
387 return;
388
389 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
390
391 BUG_ON(atomic_read(&nmi_active) != 0);
392}
393
394static void enable_lapic_nmi_watchdog(void)
395{
396 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
397
398 /* are we already enabled */
399 if (atomic_read(&nmi_active) != 0)
400 return;
401
402 /* are we lapic aware */
403 if (nmi_known_cpu() <= 0)
404 return;
405
406 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
407 touch_nmi_watchdog();
408}
409
410void disable_timer_nmi_watchdog(void)
411{
412 BUG_ON(nmi_watchdog != NMI_IO_APIC);
413
414 if (atomic_read(&nmi_active) <= 0)
415 return;
416
417 disable_irq(0);
418 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
419
420 BUG_ON(atomic_read(&nmi_active) != 0);
421}
422
423void enable_timer_nmi_watchdog(void)
424{
425 BUG_ON(nmi_watchdog != NMI_IO_APIC);
426
427 if (atomic_read(&nmi_active) == 0) {
428 touch_nmi_watchdog();
429 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
430 enable_irq(0);
431 }
432}
433 163
434static void __acpi_nmi_disable(void *__unused) 164static void __acpi_nmi_disable(void *__unused)
435{ 165{
@@ -515,275 +245,9 @@ late_initcall(init_lapic_nmi_sysfs);
515 245
516#endif /* CONFIG_PM */ 246#endif /* CONFIG_PM */
517 247
518/*
519 * Activate the NMI watchdog via the local APIC.
520 * Original code written by Keith Owens.
521 */
522
523/* Note that these events don't tick when the CPU idles. This means
524 the frequency varies with CPU load. */
525
526#define K7_EVNTSEL_ENABLE (1 << 22)
527#define K7_EVNTSEL_INT (1 << 20)
528#define K7_EVNTSEL_OS (1 << 17)
529#define K7_EVNTSEL_USR (1 << 16)
530#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
531#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
532
533static int setup_k7_watchdog(void)
534{
535 unsigned int perfctr_msr, evntsel_msr;
536 unsigned int evntsel;
537 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
538
539 perfctr_msr = MSR_K7_PERFCTR0;
540 evntsel_msr = MSR_K7_EVNTSEL0;
541 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
542 goto fail;
543
544 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
545 goto fail1;
546
547 /* Simulator may not support it */
548 if (checking_wrmsrl(evntsel_msr, 0UL))
549 goto fail2;
550 wrmsrl(perfctr_msr, 0UL);
551
552 evntsel = K7_EVNTSEL_INT
553 | K7_EVNTSEL_OS
554 | K7_EVNTSEL_USR
555 | K7_NMI_EVENT;
556
557 /* setup the timer */
558 wrmsr(evntsel_msr, evntsel, 0);
559 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
560 apic_write(APIC_LVTPC, APIC_DM_NMI);
561 evntsel |= K7_EVNTSEL_ENABLE;
562 wrmsr(evntsel_msr, evntsel, 0);
563
564 wd->perfctr_msr = perfctr_msr;
565 wd->evntsel_msr = evntsel_msr;
566 wd->cccr_msr = 0; //unused
567 wd->check_bit = 1ULL<<63;
568 return 1;
569fail2:
570 __release_evntsel_nmi(-1, evntsel_msr);
571fail1:
572 __release_perfctr_nmi(-1, perfctr_msr);
573fail:
574 return 0;
575}
576
577static void stop_k7_watchdog(void)
578{
579 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
580
581 wrmsr(wd->evntsel_msr, 0, 0);
582
583 __release_evntsel_nmi(-1, wd->evntsel_msr);
584 __release_perfctr_nmi(-1, wd->perfctr_msr);
585}
586
587/* Note that these events don't tick when the CPU idles. This means
588 the frequency varies with CPU load. */
589
590#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
591#define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
592#define P4_ESCR_OS (1<<3)
593#define P4_ESCR_USR (1<<2)
594#define P4_CCCR_OVF_PMI0 (1<<26)
595#define P4_CCCR_OVF_PMI1 (1<<27)
596#define P4_CCCR_THRESHOLD(N) ((N)<<20)
597#define P4_CCCR_COMPLEMENT (1<<19)
598#define P4_CCCR_COMPARE (1<<18)
599#define P4_CCCR_REQUIRED (3<<16)
600#define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
601#define P4_CCCR_ENABLE (1<<12)
602#define P4_CCCR_OVF (1<<31)
603/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
604 CRU_ESCR0 (with any non-null event selector) through a complemented
605 max threshold. [IA32-Vol3, Section 14.9.9] */
606
607static int setup_p4_watchdog(void)
608{
609 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
610 unsigned int evntsel, cccr_val;
611 unsigned int misc_enable, dummy;
612 unsigned int ht_num;
613 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
614
615 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
616 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
617 return 0;
618
619#ifdef CONFIG_SMP
620 /* detect which hyperthread we are on */
621 if (smp_num_siblings == 2) {
622 unsigned int ebx, apicid;
623
624 ebx = cpuid_ebx(1);
625 apicid = (ebx >> 24) & 0xff;
626 ht_num = apicid & 1;
627 } else
628#endif
629 ht_num = 0;
630
631 /* performance counters are shared resources
632 * assign each hyperthread its own set
633 * (re-use the ESCR0 register, seems safe
634 * and keeps the cccr_val the same)
635 */
636 if (!ht_num) {
637 /* logical cpu 0 */
638 perfctr_msr = MSR_P4_IQ_PERFCTR0;
639 evntsel_msr = MSR_P4_CRU_ESCR0;
640 cccr_msr = MSR_P4_IQ_CCCR0;
641 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
642 } else {
643 /* logical cpu 1 */
644 perfctr_msr = MSR_P4_IQ_PERFCTR1;
645 evntsel_msr = MSR_P4_CRU_ESCR0;
646 cccr_msr = MSR_P4_IQ_CCCR1;
647 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
648 }
649
650 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
651 goto fail;
652
653 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
654 goto fail1;
655
656 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
657 | P4_ESCR_OS
658 | P4_ESCR_USR;
659
660 cccr_val |= P4_CCCR_THRESHOLD(15)
661 | P4_CCCR_COMPLEMENT
662 | P4_CCCR_COMPARE
663 | P4_CCCR_REQUIRED;
664
665 wrmsr(evntsel_msr, evntsel, 0);
666 wrmsr(cccr_msr, cccr_val, 0);
667 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
668 apic_write(APIC_LVTPC, APIC_DM_NMI);
669 cccr_val |= P4_CCCR_ENABLE;
670 wrmsr(cccr_msr, cccr_val, 0);
671
672 wd->perfctr_msr = perfctr_msr;
673 wd->evntsel_msr = evntsel_msr;
674 wd->cccr_msr = cccr_msr;
675 wd->check_bit = 1ULL<<39;
676 return 1;
677fail1:
678 __release_perfctr_nmi(-1, perfctr_msr);
679fail:
680 return 0;
681}
682
683static void stop_p4_watchdog(void)
684{
685 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
686
687 wrmsr(wd->cccr_msr, 0, 0);
688 wrmsr(wd->evntsel_msr, 0, 0);
689
690 __release_evntsel_nmi(-1, wd->evntsel_msr);
691 __release_perfctr_nmi(-1, wd->perfctr_msr);
692}
693
694#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
695#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
696
697static int setup_intel_arch_watchdog(void)
698{
699 unsigned int ebx;
700 union cpuid10_eax eax;
701 unsigned int unused;
702 unsigned int perfctr_msr, evntsel_msr;
703 unsigned int evntsel;
704 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
705
706 /*
707 * Check whether the Architectural PerfMon supports
708 * Unhalted Core Cycles Event or not.
709 * NOTE: Corresponding bit = 0 in ebx indicates event present.
710 */
711 cpuid(10, &(eax.full), &ebx, &unused, &unused);
712 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
713 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
714 goto fail;
715
716 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
717 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
718
719 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
720 goto fail;
721
722 if (!__reserve_evntsel_nmi(-1, evntsel_msr))
723 goto fail1;
724
725 wrmsrl(perfctr_msr, 0UL);
726
727 evntsel = ARCH_PERFMON_EVENTSEL_INT
728 | ARCH_PERFMON_EVENTSEL_OS
729 | ARCH_PERFMON_EVENTSEL_USR
730 | ARCH_PERFMON_NMI_EVENT_SEL
731 | ARCH_PERFMON_NMI_EVENT_UMASK;
732
733 /* setup the timer */
734 wrmsr(evntsel_msr, evntsel, 0);
735
736 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
737 wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
738
739 apic_write(APIC_LVTPC, APIC_DM_NMI);
740 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
741 wrmsr(evntsel_msr, evntsel, 0);
742
743 wd->perfctr_msr = perfctr_msr;
744 wd->evntsel_msr = evntsel_msr;
745 wd->cccr_msr = 0; //unused
746 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
747 return 1;
748fail1:
749 __release_perfctr_nmi(-1, perfctr_msr);
750fail:
751 return 0;
752}
753
754static void stop_intel_arch_watchdog(void)
755{
756 unsigned int ebx;
757 union cpuid10_eax eax;
758 unsigned int unused;
759 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
760
761 /*
762 * Check whether the Architectural PerfMon supports
763 * Unhalted Core Cycles Event or not.
764 * NOTE: Corresponding bit = 0 in ebx indicates event present.
765 */
766 cpuid(10, &(eax.full), &ebx, &unused, &unused);
767 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
768 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
769 return;
770
771 wrmsr(wd->evntsel_msr, 0, 0);
772
773 __release_evntsel_nmi(-1, wd->evntsel_msr);
774 __release_perfctr_nmi(-1, wd->perfctr_msr);
775}
776
777void setup_apic_nmi_watchdog(void *unused) 248void setup_apic_nmi_watchdog(void *unused)
778{ 249{
779 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 250 if (__get_cpu_var(wd_enabled) == 1)
780
781 /* only support LOCAL and IO APICs for now */
782 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
783 (nmi_watchdog != NMI_IO_APIC))
784 return;
785
786 if (wd->enabled == 1)
787 return; 251 return;
788 252
789 /* cheap hack to support suspend/resume */ 253 /* cheap hack to support suspend/resume */
@@ -791,62 +255,31 @@ void setup_apic_nmi_watchdog(void *unused)
791 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) 255 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
792 return; 256 return;
793 257
794 if (nmi_watchdog == NMI_LOCAL_APIC) { 258 switch (nmi_watchdog) {
795 switch (boot_cpu_data.x86_vendor) { 259 case NMI_LOCAL_APIC:
796 case X86_VENDOR_AMD: 260 __get_cpu_var(wd_enabled) = 1;
797 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) 261 if (lapic_watchdog_init(nmi_hz) < 0) {
798 return; 262 __get_cpu_var(wd_enabled) = 0;
799 if (!setup_k7_watchdog())
800 return;
801 break;
802 case X86_VENDOR_INTEL:
803 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
804 if (!setup_intel_arch_watchdog())
805 return;
806 break;
807 }
808 if (!setup_p4_watchdog())
809 return;
810 break;
811 default:
812 return; 263 return;
813 } 264 }
265 /* FALL THROUGH */
266 case NMI_IO_APIC:
267 __get_cpu_var(wd_enabled) = 1;
268 atomic_inc(&nmi_active);
814 } 269 }
815 wd->enabled = 1;
816 atomic_inc(&nmi_active);
817} 270}
818 271
819void stop_apic_nmi_watchdog(void *unused) 272void stop_apic_nmi_watchdog(void *unused)
820{ 273{
821 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
822
823 /* only support LOCAL and IO APICs for now */ 274 /* only support LOCAL and IO APICs for now */
824 if ((nmi_watchdog != NMI_LOCAL_APIC) && 275 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
825 (nmi_watchdog != NMI_IO_APIC)) 276 (nmi_watchdog != NMI_IO_APIC))
826 return; 277 return;
827 278 if (__get_cpu_var(wd_enabled) == 0)
828 if (wd->enabled == 0)
829 return; 279 return;
830 280 if (nmi_watchdog == NMI_LOCAL_APIC)
831 if (nmi_watchdog == NMI_LOCAL_APIC) { 281 lapic_watchdog_stop();
832 switch (boot_cpu_data.x86_vendor) { 282 __get_cpu_var(wd_enabled) = 0;
833 case X86_VENDOR_AMD:
834 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
835 return;
836 stop_k7_watchdog();
837 break;
838 case X86_VENDOR_INTEL:
839 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
840 stop_intel_arch_watchdog();
841 break;
842 }
843 stop_p4_watchdog();
844 break;
845 default:
846 return;
847 }
848 }
849 wd->enabled = 0;
850 atomic_dec(&nmi_active); 283 atomic_dec(&nmi_active);
851} 284}
852 285
@@ -885,9 +318,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
885 int sum; 318 int sum;
886 int touched = 0; 319 int touched = 0;
887 int cpu = smp_processor_id(); 320 int cpu = smp_processor_id();
888 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 321 int rc = 0;
889 u64 dummy;
890 int rc=0;
891 322
892 /* check for other users first */ 323 /* check for other users first */
893 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) 324 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
@@ -934,55 +365,20 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
934 } 365 }
935 366
936 /* see if the nmi watchdog went off */ 367 /* see if the nmi watchdog went off */
937 if (wd->enabled) { 368 if (!__get_cpu_var(wd_enabled))
938 if (nmi_watchdog == NMI_LOCAL_APIC) { 369 return rc;
939 rdmsrl(wd->perfctr_msr, dummy); 370 switch (nmi_watchdog) {
940 if (dummy & wd->check_bit){ 371 case NMI_LOCAL_APIC:
941 /* this wasn't a watchdog timer interrupt */ 372 rc |= lapic_wd_event(nmi_hz);
942 goto done; 373 break;
943 } 374 case NMI_IO_APIC:
944 375 /* don't know how to accurately check for this.
945 /* only Intel uses the cccr msr */ 376 * just assume it was a watchdog timer interrupt
946 if (wd->cccr_msr != 0) { 377 * This matches the old behaviour.
947 /* 378 */
948 * P4 quirks: 379 rc = 1;
949 * - An overflown perfctr will assert its interrupt 380 break;
950 * until the OVF flag in its CCCR is cleared.
951 * - LVTPC is masked on interrupt and must be
952 * unmasked by the LVTPC handler.
953 */
954 rdmsrl(wd->cccr_msr, dummy);
955 dummy &= ~P4_CCCR_OVF;
956 wrmsrl(wd->cccr_msr, dummy);
957 apic_write(APIC_LVTPC, APIC_DM_NMI);
958 /* start the cycle over again */
959 wrmsrl(wd->perfctr_msr,
960 -((u64)cpu_khz * 1000 / nmi_hz));
961 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
962 /*
963 * ArchPerfom/Core Duo needs to re-unmask
964 * the apic vector
965 */
966 apic_write(APIC_LVTPC, APIC_DM_NMI);
967 /* ARCH_PERFMON has 32 bit counter writes */
968 wrmsr(wd->perfctr_msr,
969 (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
970 } else {
971 /* start the cycle over again */
972 wrmsrl(wd->perfctr_msr,
973 -((u64)cpu_khz * 1000 / nmi_hz));
974 }
975 rc = 1;
976 } else if (nmi_watchdog == NMI_IO_APIC) {
977 /* don't know how to accurately check for this.
978 * just assume it was a watchdog timer interrupt
979 * This matches the old behaviour.
980 */
981 rc = 1;
982 } else
983 printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
984 } 381 }
985done:
986 return rc; 382 return rc;
987} 383}
988 384
@@ -1067,12 +463,4 @@ void __trigger_all_cpu_backtrace(void)
1067 463
1068EXPORT_SYMBOL(nmi_active); 464EXPORT_SYMBOL(nmi_active);
1069EXPORT_SYMBOL(nmi_watchdog); 465EXPORT_SYMBOL(nmi_watchdog);
1070EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
1071EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
1072EXPORT_SYMBOL(reserve_perfctr_nmi);
1073EXPORT_SYMBOL(release_perfctr_nmi);
1074EXPORT_SYMBOL(reserve_evntsel_nmi);
1075EXPORT_SYMBOL(release_evntsel_nmi);
1076EXPORT_SYMBOL(disable_timer_nmi_watchdog);
1077EXPORT_SYMBOL(enable_timer_nmi_watchdog);
1078EXPORT_SYMBOL(touch_nmi_watchdog); 466EXPORT_SYMBOL(touch_nmi_watchdog);