aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/nmi.c
diff options
context:
space:
mode:
authorDon Zickus <dzickus@redhat.com>2006-09-26 04:52:26 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:26 -0400
commit828f0afda123a96ff4e8078f057a302f4b4232ae (patch)
treea6f7398e0037f5c8f4cbd95ff11c5e4bf78a4c4d /arch/x86_64/kernel/nmi.c
parentb07f8915cda3fcd73b8b68075ba1e6cd0673365d (diff)
[PATCH] x86: Add performance counter reservation framework for UP kernels
Adds basic infrastructure to allow subsystems to reserve performance counters on the x86 chips. Only UP kernels are supported in this patch to make reviewing easier. The SMP portion makes a lot more changes. Think of this as a locking mechanism where each bit represents a different counter. In addition, each subsystem should also reserve an appropriate event selection register that will correspond to the performance counter it will be using (this is mainly neccessary for the Pentium 4 chips as they break the 1:1 relationship to performance counters). This will help prevent subsystems like oprofile from interfering with the nmi watchdog. Signed-off-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r--arch/x86_64/kernel/nmi.c178
1 files changed, 138 insertions, 40 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 42c05d6907b..b7a7c997384 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -27,6 +27,20 @@
27#include <asm/kdebug.h> 27#include <asm/kdebug.h>
28#include <asm/mce.h> 28#include <asm/mce.h>
29 29
30/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
31 * evtsel_nmi_owner tracks the ownership of the event selection
32 * - different performance counters/ event selection may be reserved for
33 * different subsystems this reservation system just tries to coordinate
34 * things a little
35 */
36static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
37static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
38
39/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
41 */
42#define NMI_MAX_COUNTER_BITS 66
43
30/* 44/*
31 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 45 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
32 * - it may be reserved by some other driver, or not 46 * - it may be reserved by some other driver, or not
@@ -90,6 +104,95 @@ static unsigned int nmi_p4_cccr_val;
90 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 104 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
91 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 105 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
92 106
107/* converts an msr to an appropriate reservation bit */
108static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
109{
110 /* returns the bit offset of the performance counter register */
111 switch (boot_cpu_data.x86_vendor) {
112 case X86_VENDOR_AMD:
113 return (msr - MSR_K7_PERFCTR0);
114 case X86_VENDOR_INTEL:
115 return (msr - MSR_P4_BPU_PERFCTR0);
116 }
117 return 0;
118}
119
120/* converts an msr to an appropriate reservation bit */
121static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
122{
123 /* returns the bit offset of the event selection register */
124 switch (boot_cpu_data.x86_vendor) {
125 case X86_VENDOR_AMD:
126 return (msr - MSR_K7_EVNTSEL0);
127 case X86_VENDOR_INTEL:
128 return (msr - MSR_P4_BSU_ESCR0);
129 }
130 return 0;
131}
132
133/* checks for a bit availability (hack for oprofile) */
134int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
135{
136 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
137
138 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
139}
140
141/* checks the an msr for availability */
142int avail_to_resrv_perfctr_nmi(unsigned int msr)
143{
144 unsigned int counter;
145
146 counter = nmi_perfctr_msr_to_bit(msr);
147 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
148
149 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
150}
151
152int reserve_perfctr_nmi(unsigned int msr)
153{
154 unsigned int counter;
155
156 counter = nmi_perfctr_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
158
159 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
160 return 1;
161 return 0;
162}
163
164void release_perfctr_nmi(unsigned int msr)
165{
166 unsigned int counter;
167
168 counter = nmi_perfctr_msr_to_bit(msr);
169 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
170
171 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
172}
173
174int reserve_evntsel_nmi(unsigned int msr)
175{
176 unsigned int counter;
177
178 counter = nmi_evntsel_msr_to_bit(msr);
179 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
180
181 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
182 return 1;
183 return 0;
184}
185
186void release_evntsel_nmi(unsigned int msr)
187{
188 unsigned int counter;
189
190 counter = nmi_evntsel_msr_to_bit(msr);
191 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
192
193 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
194}
195
93static __cpuinit inline int nmi_known_cpu(void) 196static __cpuinit inline int nmi_known_cpu(void)
94{ 197{
95 switch (boot_cpu_data.x86_vendor) { 198 switch (boot_cpu_data.x86_vendor) {
@@ -325,34 +428,22 @@ late_initcall(init_lapic_nmi_sysfs);
325 428
326#endif /* CONFIG_PM */ 429#endif /* CONFIG_PM */
327 430
328/* 431static int setup_k7_watchdog(void)
329 * Activate the NMI watchdog via the local APIC.
330 * Original code written by Keith Owens.
331 */
332
333static void clear_msr_range(unsigned int base, unsigned int n)
334{ 432{
335 unsigned int i;
336
337 for(i = 0; i < n; ++i)
338 wrmsr(base+i, 0, 0);
339}
340
341static void setup_k7_watchdog(void)
342{
343 int i;
344 unsigned int evntsel; 433 unsigned int evntsel;
345 434
346 nmi_perfctr_msr = MSR_K7_PERFCTR0; 435 nmi_perfctr_msr = MSR_K7_PERFCTR0;
347 436
348 for(i = 0; i < 4; ++i) { 437 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
349 /* Simulator may not support it */ 438 goto fail;
350 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) { 439
351 nmi_perfctr_msr = 0; 440 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0))
352 return; 441 goto fail1;
353 } 442
354 wrmsrl(MSR_K7_PERFCTR0+i, 0UL); 443 /* Simulator may not support it */
355 } 444 if (checking_wrmsrl(MSR_K7_EVNTSEL0, 0UL))
445 goto fail2;
446 wrmsrl(MSR_K7_PERFCTR0, 0UL);
356 447
357 evntsel = K7_EVNTSEL_INT 448 evntsel = K7_EVNTSEL_INT
358 | K7_EVNTSEL_OS 449 | K7_EVNTSEL_OS
@@ -364,6 +455,13 @@ static void setup_k7_watchdog(void)
364 apic_write(APIC_LVTPC, APIC_DM_NMI); 455 apic_write(APIC_LVTPC, APIC_DM_NMI);
365 evntsel |= K7_EVNTSEL_ENABLE; 456 evntsel |= K7_EVNTSEL_ENABLE;
366 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 457 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
458 return 1;
459fail2:
460 release_evntsel_nmi(MSR_K7_EVNTSEL0);
461fail1:
462 release_perfctr_nmi(nmi_perfctr_msr);
463fail:
464 return 0;
367} 465}
368 466
369 467
@@ -382,22 +480,11 @@ static int setup_p4_watchdog(void)
382 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; 480 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
383#endif 481#endif
384 482
385 if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) 483 if (!reserve_perfctr_nmi(nmi_perfctr_msr))
386 clear_msr_range(0x3F1, 2); 484 goto fail;
387 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current 485
388 docs doesn't fully define it, so leave it alone for now. */ 486 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
389 if (boot_cpu_data.x86_model >= 0x3) { 487 goto fail1;
390 /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
391 clear_msr_range(0x3A0, 26);
392 clear_msr_range(0x3BC, 3);
393 } else {
394 clear_msr_range(0x3A0, 31);
395 }
396 clear_msr_range(0x3C0, 6);
397 clear_msr_range(0x3C8, 6);
398 clear_msr_range(0x3E0, 2);
399 clear_msr_range(MSR_P4_CCCR0, 18);
400 clear_msr_range(MSR_P4_PERFCTR0, 18);
401 488
402 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); 489 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
403 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); 490 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
@@ -406,6 +493,10 @@ static int setup_p4_watchdog(void)
406 apic_write(APIC_LVTPC, APIC_DM_NMI); 493 apic_write(APIC_LVTPC, APIC_DM_NMI);
407 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 494 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
408 return 1; 495 return 1;
496fail1:
497 release_perfctr_nmi(nmi_perfctr_msr);
498fail:
499 return 0;
409} 500}
410 501
411void setup_apic_nmi_watchdog(void) 502void setup_apic_nmi_watchdog(void)
@@ -416,7 +507,8 @@ void setup_apic_nmi_watchdog(void)
416 return; 507 return;
417 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) 508 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
418 return; 509 return;
419 setup_k7_watchdog(); 510 if (!setup_k7_watchdog())
511 return;
420 break; 512 break;
421 case X86_VENDOR_INTEL: 513 case X86_VENDOR_INTEL:
422 if (boot_cpu_data.x86 != 15) 514 if (boot_cpu_data.x86 != 15)
@@ -588,6 +680,12 @@ int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file
588 680
589EXPORT_SYMBOL(nmi_active); 681EXPORT_SYMBOL(nmi_active);
590EXPORT_SYMBOL(nmi_watchdog); 682EXPORT_SYMBOL(nmi_watchdog);
683EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
684EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
685EXPORT_SYMBOL(reserve_perfctr_nmi);
686EXPORT_SYMBOL(release_perfctr_nmi);
687EXPORT_SYMBOL(reserve_evntsel_nmi);
688EXPORT_SYMBOL(release_evntsel_nmi);
591EXPORT_SYMBOL(reserve_lapic_nmi); 689EXPORT_SYMBOL(reserve_lapic_nmi);
592EXPORT_SYMBOL(release_lapic_nmi); 690EXPORT_SYMBOL(release_lapic_nmi);
593EXPORT_SYMBOL(disable_timer_nmi_watchdog); 691EXPORT_SYMBOL(disable_timer_nmi_watchdog);