aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDon Zickus <dzickus@redhat.com>2006-09-26 04:52:26 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:26 -0400
commit828f0afda123a96ff4e8078f057a302f4b4232ae (patch)
treea6f7398e0037f5c8f4cbd95ff11c5e4bf78a4c4d /arch
parentb07f8915cda3fcd73b8b68075ba1e6cd0673365d (diff)
[PATCH] x86: Add performance counter reservation framework for UP kernels
Adds basic infrastructure to allow subsystems to reserve performance counters on the x86 chips. Only UP kernels are supported in this patch to make reviewing easier. The SMP portion makes a lot more changes. Think of this as a locking mechanism where each bit represents a different counter. In addition, each subsystem should also reserve an appropriate event selection register that will correspond to the performance counter it will be using (this is mainly neccessary for the Pentium 4 chips as they break the 1:1 relationship to performance counters). This will help prevent subsystems like oprofile from interfering with the nmi watchdog. Signed-off-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/nmi.c188
-rw-r--r--arch/x86_64/kernel/nmi.c178
2 files changed, 294 insertions, 72 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 1282d70ff971..5d58dfeacd59 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -34,6 +34,20 @@ static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
34static unsigned int nmi_p4_cccr_val; 34static unsigned int nmi_p4_cccr_val;
35extern void show_registers(struct pt_regs *regs); 35extern void show_registers(struct pt_regs *regs);
36 36
37/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
38 * evtsel_nmi_owner tracks the ownership of the event selection
39 * - different performance counters/ event selection may be reserved for
40 * different subsystems this reservation system just tries to coordinate
41 * things a little
42 */
43static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
44static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
45
46/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
47 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
48 */
49#define NMI_MAX_COUNTER_BITS 66
50
37/* 51/*
38 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 52 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39 * - it may be reserved by some other driver, or not 53 * - it may be reserved by some other driver, or not
@@ -95,6 +109,105 @@ int nmi_active;
95 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 109 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
96 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 110 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
97 111
112/* converts an msr to an appropriate reservation bit */
113static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
114{
115 /* returns the bit offset of the performance counter register */
116 switch (boot_cpu_data.x86_vendor) {
117 case X86_VENDOR_AMD:
118 return (msr - MSR_K7_PERFCTR0);
119 case X86_VENDOR_INTEL:
120 switch (boot_cpu_data.x86) {
121 case 6:
122 return (msr - MSR_P6_PERFCTR0);
123 case 15:
124 return (msr - MSR_P4_BPU_PERFCTR0);
125 }
126 }
127 return 0;
128}
129
130/* converts an msr to an appropriate reservation bit */
131static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
132{
133 /* returns the bit offset of the event selection register */
134 switch (boot_cpu_data.x86_vendor) {
135 case X86_VENDOR_AMD:
136 return (msr - MSR_K7_EVNTSEL0);
137 case X86_VENDOR_INTEL:
138 switch (boot_cpu_data.x86) {
139 case 6:
140 return (msr - MSR_P6_EVNTSEL0);
141 case 15:
142 return (msr - MSR_P4_BSU_ESCR0);
143 }
144 }
145 return 0;
146}
147
148/* checks for a bit availability (hack for oprofile) */
149int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
150{
151 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
152
153 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
154}
155
156/* checks the an msr for availability */
157int avail_to_resrv_perfctr_nmi(unsigned int msr)
158{
159 unsigned int counter;
160
161 counter = nmi_perfctr_msr_to_bit(msr);
162 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
163
164 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
165}
166
167int reserve_perfctr_nmi(unsigned int msr)
168{
169 unsigned int counter;
170
171 counter = nmi_perfctr_msr_to_bit(msr);
172 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
173
174 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
175 return 1;
176 return 0;
177}
178
179void release_perfctr_nmi(unsigned int msr)
180{
181 unsigned int counter;
182
183 counter = nmi_perfctr_msr_to_bit(msr);
184 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
185
186 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
187}
188
189int reserve_evntsel_nmi(unsigned int msr)
190{
191 unsigned int counter;
192
193 counter = nmi_evntsel_msr_to_bit(msr);
194 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
195
196 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
197 return 1;
198 return 0;
199}
200
201void release_evntsel_nmi(unsigned int msr)
202{
203 unsigned int counter;
204
205 counter = nmi_evntsel_msr_to_bit(msr);
206 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
207
208 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
209}
210
98#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
99/* The performance counters used by NMI_LOCAL_APIC don't trigger when 212/* The performance counters used by NMI_LOCAL_APIC don't trigger when
100 * the CPU is idle. To make sure the NMI watchdog really ticks on all 213 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -344,14 +457,6 @@ late_initcall(init_lapic_nmi_sysfs);
344 * Original code written by Keith Owens. 457 * Original code written by Keith Owens.
345 */ 458 */
346 459
347static void clear_msr_range(unsigned int base, unsigned int n)
348{
349 unsigned int i;
350
351 for(i = 0; i < n; ++i)
352 wrmsr(base+i, 0, 0);
353}
354
355static void write_watchdog_counter(const char *descr) 460static void write_watchdog_counter(const char *descr)
356{ 461{
357 u64 count = (u64)cpu_khz * 1000; 462 u64 count = (u64)cpu_khz * 1000;
@@ -362,14 +467,19 @@ static void write_watchdog_counter(const char *descr)
362 wrmsrl(nmi_perfctr_msr, 0 - count); 467 wrmsrl(nmi_perfctr_msr, 0 - count);
363} 468}
364 469
365static void setup_k7_watchdog(void) 470static int setup_k7_watchdog(void)
366{ 471{
367 unsigned int evntsel; 472 unsigned int evntsel;
368 473
369 nmi_perfctr_msr = MSR_K7_PERFCTR0; 474 nmi_perfctr_msr = MSR_K7_PERFCTR0;
370 475
371 clear_msr_range(MSR_K7_EVNTSEL0, 4); 476 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
372 clear_msr_range(MSR_K7_PERFCTR0, 4); 477 goto fail;
478
479 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0))
480 goto fail1;
481
482 wrmsrl(MSR_K7_PERFCTR0, 0UL);
373 483
374 evntsel = K7_EVNTSEL_INT 484 evntsel = K7_EVNTSEL_INT
375 | K7_EVNTSEL_OS 485 | K7_EVNTSEL_OS
@@ -381,16 +491,24 @@ static void setup_k7_watchdog(void)
381 apic_write(APIC_LVTPC, APIC_DM_NMI); 491 apic_write(APIC_LVTPC, APIC_DM_NMI);
382 evntsel |= K7_EVNTSEL_ENABLE; 492 evntsel |= K7_EVNTSEL_ENABLE;
383 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 493 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
494 return 1;
495fail1:
496 release_perfctr_nmi(nmi_perfctr_msr);
497fail:
498 return 0;
384} 499}
385 500
386static void setup_p6_watchdog(void) 501static int setup_p6_watchdog(void)
387{ 502{
388 unsigned int evntsel; 503 unsigned int evntsel;
389 504
390 nmi_perfctr_msr = MSR_P6_PERFCTR0; 505 nmi_perfctr_msr = MSR_P6_PERFCTR0;
391 506
392 clear_msr_range(MSR_P6_EVNTSEL0, 2); 507 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
393 clear_msr_range(MSR_P6_PERFCTR0, 2); 508 goto fail;
509
510 if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0))
511 goto fail1;
394 512
395 evntsel = P6_EVNTSEL_INT 513 evntsel = P6_EVNTSEL_INT
396 | P6_EVNTSEL_OS 514 | P6_EVNTSEL_OS
@@ -402,6 +520,11 @@ static void setup_p6_watchdog(void)
402 apic_write(APIC_LVTPC, APIC_DM_NMI); 520 apic_write(APIC_LVTPC, APIC_DM_NMI);
403 evntsel |= P6_EVNTSEL0_ENABLE; 521 evntsel |= P6_EVNTSEL0_ENABLE;
404 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); 522 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
523 return 1;
524fail1:
525 release_perfctr_nmi(nmi_perfctr_msr);
526fail:
527 return 0;
405} 528}
406 529
407static int setup_p4_watchdog(void) 530static int setup_p4_watchdog(void)
@@ -419,22 +542,11 @@ static int setup_p4_watchdog(void)
419 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; 542 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
420#endif 543#endif
421 544
422 if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) 545 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
423 clear_msr_range(0x3F1, 2); 546 goto fail;
424 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current 547
425 docs doesn't fully define it, so leave it alone for now. */ 548 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
426 if (boot_cpu_data.x86_model >= 0x3) { 549 goto fail1;
427 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
428 clear_msr_range(0x3A0, 26);
429 clear_msr_range(0x3BC, 3);
430 } else {
431 clear_msr_range(0x3A0, 31);
432 }
433 clear_msr_range(0x3C0, 6);
434 clear_msr_range(0x3C8, 6);
435 clear_msr_range(0x3E0, 2);
436 clear_msr_range(MSR_P4_CCCR0, 18);
437 clear_msr_range(MSR_P4_PERFCTR0, 18);
438 550
439 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); 551 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
440 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); 552 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
@@ -442,6 +554,10 @@ static int setup_p4_watchdog(void)
442 apic_write(APIC_LVTPC, APIC_DM_NMI); 554 apic_write(APIC_LVTPC, APIC_DM_NMI);
443 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 555 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
444 return 1; 556 return 1;
557fail1:
558 release_perfctr_nmi(nmi_perfctr_msr);
559fail:
560 return 0;
445} 561}
446 562
447void setup_apic_nmi_watchdog (void) 563void setup_apic_nmi_watchdog (void)
@@ -450,7 +566,8 @@ void setup_apic_nmi_watchdog (void)
450 case X86_VENDOR_AMD: 566 case X86_VENDOR_AMD:
451 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) 567 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
452 return; 568 return;
453 setup_k7_watchdog(); 569 if (!setup_k7_watchdog())
570 return;
454 break; 571 break;
455 case X86_VENDOR_INTEL: 572 case X86_VENDOR_INTEL:
456 switch (boot_cpu_data.x86) { 573 switch (boot_cpu_data.x86) {
@@ -458,7 +575,8 @@ void setup_apic_nmi_watchdog (void)
458 if (boot_cpu_data.x86_model > 0xd) 575 if (boot_cpu_data.x86_model > 0xd)
459 return; 576 return;
460 577
461 setup_p6_watchdog(); 578 if(!setup_p6_watchdog())
579 return;
462 break; 580 break;
463 case 15: 581 case 15:
464 if (boot_cpu_data.x86_model > 0x4) 582 if (boot_cpu_data.x86_model > 0x4)
@@ -612,6 +730,12 @@ int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file,
612 730
613EXPORT_SYMBOL(nmi_active); 731EXPORT_SYMBOL(nmi_active);
614EXPORT_SYMBOL(nmi_watchdog); 732EXPORT_SYMBOL(nmi_watchdog);
733EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
734EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
735EXPORT_SYMBOL(reserve_perfctr_nmi);
736EXPORT_SYMBOL(release_perfctr_nmi);
737EXPORT_SYMBOL(reserve_evntsel_nmi);
738EXPORT_SYMBOL(release_evntsel_nmi);
615EXPORT_SYMBOL(reserve_lapic_nmi); 739EXPORT_SYMBOL(reserve_lapic_nmi);
616EXPORT_SYMBOL(release_lapic_nmi); 740EXPORT_SYMBOL(release_lapic_nmi);
617EXPORT_SYMBOL(disable_timer_nmi_watchdog); 741EXPORT_SYMBOL(disable_timer_nmi_watchdog);
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 42c05d6907b9..b7a7c9973849 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -27,6 +27,20 @@
27#include <asm/kdebug.h> 27#include <asm/kdebug.h>
28#include <asm/mce.h> 28#include <asm/mce.h>
29 29
30/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
31 * evtsel_nmi_owner tracks the ownership of the event selection
32 * - different performance counters/ event selection may be reserved for
33 * different subsystems this reservation system just tries to coordinate
34 * things a little
35 */
36static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
37static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
38
39/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
41 */
42#define NMI_MAX_COUNTER_BITS 66
43
30/* 44/*
31 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 45 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
32 * - it may be reserved by some other driver, or not 46 * - it may be reserved by some other driver, or not
@@ -90,6 +104,95 @@ static unsigned int nmi_p4_cccr_val;
90 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 104 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
91 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 105 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
92 106
107/* converts an msr to an appropriate reservation bit */
108static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
109{
110 /* returns the bit offset of the performance counter register */
111 switch (boot_cpu_data.x86_vendor) {
112 case X86_VENDOR_AMD:
113 return (msr - MSR_K7_PERFCTR0);
114 case X86_VENDOR_INTEL:
115 return (msr - MSR_P4_BPU_PERFCTR0);
116 }
117 return 0;
118}
119
120/* converts an msr to an appropriate reservation bit */
121static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
122{
123 /* returns the bit offset of the event selection register */
124 switch (boot_cpu_data.x86_vendor) {
125 case X86_VENDOR_AMD:
126 return (msr - MSR_K7_EVNTSEL0);
127 case X86_VENDOR_INTEL:
128 return (msr - MSR_P4_BSU_ESCR0);
129 }
130 return 0;
131}
132
133/* checks for a bit availability (hack for oprofile) */
134int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
135{
136 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
137
138 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
139}
140
141/* checks the an msr for availability */
142int avail_to_resrv_perfctr_nmi(unsigned int msr)
143{
144 unsigned int counter;
145
146 counter = nmi_perfctr_msr_to_bit(msr);
147 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
148
149 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
150}
151
152int reserve_perfctr_nmi(unsigned int msr)
153{
154 unsigned int counter;
155
156 counter = nmi_perfctr_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
158
159 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
160 return 1;
161 return 0;
162}
163
164void release_perfctr_nmi(unsigned int msr)
165{
166 unsigned int counter;
167
168 counter = nmi_perfctr_msr_to_bit(msr);
169 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
170
171 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
172}
173
174int reserve_evntsel_nmi(unsigned int msr)
175{
176 unsigned int counter;
177
178 counter = nmi_evntsel_msr_to_bit(msr);
179 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
180
181 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
182 return 1;
183 return 0;
184}
185
186void release_evntsel_nmi(unsigned int msr)
187{
188 unsigned int counter;
189
190 counter = nmi_evntsel_msr_to_bit(msr);
191 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
192
193 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
194}
195
93static __cpuinit inline int nmi_known_cpu(void) 196static __cpuinit inline int nmi_known_cpu(void)
94{ 197{
95 switch (boot_cpu_data.x86_vendor) { 198 switch (boot_cpu_data.x86_vendor) {
@@ -325,34 +428,22 @@ late_initcall(init_lapic_nmi_sysfs);
325 428
326#endif /* CONFIG_PM */ 429#endif /* CONFIG_PM */
327 430
328/* 431static int setup_k7_watchdog(void)
329 * Activate the NMI watchdog via the local APIC.
330 * Original code written by Keith Owens.
331 */
332
333static void clear_msr_range(unsigned int base, unsigned int n)
334{ 432{
335 unsigned int i;
336
337 for(i = 0; i < n; ++i)
338 wrmsr(base+i, 0, 0);
339}
340
341static void setup_k7_watchdog(void)
342{
343 int i;
344 unsigned int evntsel; 433 unsigned int evntsel;
345 434
346 nmi_perfctr_msr = MSR_K7_PERFCTR0; 435 nmi_perfctr_msr = MSR_K7_PERFCTR0;
347 436
348 for(i = 0; i < 4; ++i) { 437 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
349 /* Simulator may not support it */ 438 goto fail;
350 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) { 439
351 nmi_perfctr_msr = 0; 440 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0))
352 return; 441 goto fail1;
353 } 442
354 wrmsrl(MSR_K7_PERFCTR0+i, 0UL); 443 /* Simulator may not support it */
355 } 444 if (checking_wrmsrl(MSR_K7_EVNTSEL0, 0UL))
445 goto fail2;
446 wrmsrl(MSR_K7_PERFCTR0, 0UL);
356 447
357 evntsel = K7_EVNTSEL_INT 448 evntsel = K7_EVNTSEL_INT
358 | K7_EVNTSEL_OS 449 | K7_EVNTSEL_OS
@@ -364,6 +455,13 @@ static void setup_k7_watchdog(void)
364 apic_write(APIC_LVTPC, APIC_DM_NMI); 455 apic_write(APIC_LVTPC, APIC_DM_NMI);
365 evntsel |= K7_EVNTSEL_ENABLE; 456 evntsel |= K7_EVNTSEL_ENABLE;
366 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 457 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
458 return 1;
459fail2:
460 release_evntsel_nmi(MSR_K7_EVNTSEL0);
461fail1:
462 release_perfctr_nmi(nmi_perfctr_msr);
463fail:
464 return 0;
367} 465}
368 466
369 467
@@ -382,22 +480,11 @@ static int setup_p4_watchdog(void)
382 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; 480 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
383#endif 481#endif
384 482
385 if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) 483 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
386 clear_msr_range(0x3F1, 2); 484 goto fail;
387 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current 485
388 docs doesn't fully define it, so leave it alone for now. */ 486 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
389 if (boot_cpu_data.x86_model >= 0x3) { 487 goto fail1;
390 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
391 clear_msr_range(0x3A0, 26);
392 clear_msr_range(0x3BC, 3);
393 } else {
394 clear_msr_range(0x3A0, 31);
395 }
396 clear_msr_range(0x3C0, 6);
397 clear_msr_range(0x3C8, 6);
398 clear_msr_range(0x3E0, 2);
399 clear_msr_range(MSR_P4_CCCR0, 18);
400 clear_msr_range(MSR_P4_PERFCTR0, 18);
401 488
402 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); 489 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
403 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); 490 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
@@ -406,6 +493,10 @@ static int setup_p4_watchdog(void)
406 apic_write(APIC_LVTPC, APIC_DM_NMI); 493 apic_write(APIC_LVTPC, APIC_DM_NMI);
407 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 494 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
408 return 1; 495 return 1;
496fail1:
497 release_perfctr_nmi(nmi_perfctr_msr);
498fail:
499 return 0;
409} 500}
410 501
411void setup_apic_nmi_watchdog(void) 502void setup_apic_nmi_watchdog(void)
@@ -416,7 +507,8 @@ void setup_apic_nmi_watchdog(void)
416 return; 507 return;
417 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) 508 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
418 return; 509 return;
419 setup_k7_watchdog(); 510 if (!setup_k7_watchdog())
511 return;
420 break; 512 break;
421 case X86_VENDOR_INTEL: 513 case X86_VENDOR_INTEL:
422 if (boot_cpu_data.x86 != 15) 514 if (boot_cpu_data.x86 != 15)
@@ -588,6 +680,12 @@ int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file
588 680
589EXPORT_SYMBOL(nmi_active); 681EXPORT_SYMBOL(nmi_active);
590EXPORT_SYMBOL(nmi_watchdog); 682EXPORT_SYMBOL(nmi_watchdog);
683EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
684EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
685EXPORT_SYMBOL(reserve_perfctr_nmi);
686EXPORT_SYMBOL(release_perfctr_nmi);
687EXPORT_SYMBOL(reserve_evntsel_nmi);
688EXPORT_SYMBOL(release_evntsel_nmi);
591EXPORT_SYMBOL(reserve_lapic_nmi); 689EXPORT_SYMBOL(reserve_lapic_nmi);
592EXPORT_SYMBOL(release_lapic_nmi); 690EXPORT_SYMBOL(release_lapic_nmi);
593EXPORT_SYMBOL(disable_timer_nmi_watchdog); 691EXPORT_SYMBOL(disable_timer_nmi_watchdog);