aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2007-02-13 07:26:22 -0500
committerAndi Kleen <andi@basil.nowhere.org>2007-02-13 07:26:22 -0500
commit90ce4bc4542c10b63dc6482ac920ff1226a6e5ff (patch)
tree2fe3baa98028ddb28ffca83930d0bc63d50c5792 /arch
parent1676193937a538fdb92a2916a86a705093cfd613 (diff)
[PATCH] i386: Handle 32 bit PerfMon Counter writes cleanly in i386 nmi_watchdog
Change i386 nmi handler to handle 32 bit perfmon counter MSR writes cleanly. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/nmi.c64
1 files changed, 48 insertions, 16 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 1a6f8bb8881..7d3f4e22d6f 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -216,6 +216,28 @@ static __init void nmi_cpu_busy(void *data)
216} 216}
217#endif 217#endif
218 218
219static unsigned int adjust_for_32bit_ctr(unsigned int hz)
220{
221 u64 counter_val;
222 unsigned int retval = hz;
223
224 /*
225 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
226 * are writable, with higher bits sign extending from bit 31.
227 * So, we can only program the counter with 31 bit values and
228 * 32nd bit should be 1, for 33.. to be 1.
229 * Find the appropriate nmi_hz
230 */
231 counter_val = (u64)cpu_khz * 1000;
232 do_div(counter_val, retval);
233 if (counter_val > 0x7fffffffULL) {
234 u64 count = (u64)cpu_khz * 1000;
235 do_div(count, 0x7fffffffUL);
236 retval = count + 1;
237 }
238 return retval;
239}
240
219static int __init check_nmi_watchdog(void) 241static int __init check_nmi_watchdog(void)
220{ 242{
221 unsigned int *prev_nmi_count; 243 unsigned int *prev_nmi_count;
@@ -281,18 +303,10 @@ static int __init check_nmi_watchdog(void)
281 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 303 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
282 304
283 nmi_hz = 1; 305 nmi_hz = 1;
284 /* 306
285 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter 307 if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
286 * are writable, with higher bits sign extending from bit 31. 308 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
287 * So, we can only program the counter with 31 bit values and 309 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
288 * 32nd bit should be 1, for 33.. to be 1.
289 * Find the appropriate nmi_hz
290 */
291 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 &&
292 ((u64)cpu_khz * 1000) > 0x7fffffffULL) {
293 u64 count = (u64)cpu_khz * 1000;
294 do_div(count, 0x7fffffffUL);
295 nmi_hz = count + 1;
296 } 310 }
297 } 311 }
298 312
@@ -442,6 +456,17 @@ static void write_watchdog_counter(unsigned int perfctr_msr, const char *descr)
442 wrmsrl(perfctr_msr, 0 - count); 456 wrmsrl(perfctr_msr, 0 - count);
443} 457}
444 458
459static void write_watchdog_counter32(unsigned int perfctr_msr,
460 const char *descr)
461{
462 u64 count = (u64)cpu_khz * 1000;
463
464 do_div(count, nmi_hz);
465 if(descr)
466 Dprintk("setting %s to -0x%08Lx\n", descr, count);
467 wrmsr(perfctr_msr, (u32)(-count), 0);
468}
469
445/* Note that these events don't tick when the CPU idles. This means 470/* Note that these events don't tick when the CPU idles. This means
446 the frequency varies with CPU load. */ 471 the frequency varies with CPU load. */
447 472
@@ -531,7 +556,8 @@ static int setup_p6_watchdog(void)
531 556
532 /* setup the timer */ 557 /* setup the timer */
533 wrmsr(evntsel_msr, evntsel, 0); 558 wrmsr(evntsel_msr, evntsel, 0);
534 write_watchdog_counter(perfctr_msr, "P6_PERFCTR0"); 559 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
560 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0");
535 apic_write(APIC_LVTPC, APIC_DM_NMI); 561 apic_write(APIC_LVTPC, APIC_DM_NMI);
536 evntsel |= P6_EVNTSEL0_ENABLE; 562 evntsel |= P6_EVNTSEL0_ENABLE;
537 wrmsr(evntsel_msr, evntsel, 0); 563 wrmsr(evntsel_msr, evntsel, 0);
@@ -704,7 +730,8 @@ static int setup_intel_arch_watchdog(void)
704 730
705 /* setup the timer */ 731 /* setup the timer */
706 wrmsr(evntsel_msr, evntsel, 0); 732 wrmsr(evntsel_msr, evntsel, 0);
707 write_watchdog_counter(perfctr_msr, "INTEL_ARCH_PERFCTR0"); 733 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
734 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0");
708 apic_write(APIC_LVTPC, APIC_DM_NMI); 735 apic_write(APIC_LVTPC, APIC_DM_NMI);
709 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 736 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
710 wrmsr(evntsel_msr, evntsel, 0); 737 wrmsr(evntsel_msr, evntsel, 0);
@@ -956,6 +983,8 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
956 dummy &= ~P4_CCCR_OVF; 983 dummy &= ~P4_CCCR_OVF;
957 wrmsrl(wd->cccr_msr, dummy); 984 wrmsrl(wd->cccr_msr, dummy);
958 apic_write(APIC_LVTPC, APIC_DM_NMI); 985 apic_write(APIC_LVTPC, APIC_DM_NMI);
986 /* start the cycle over again */
987 write_watchdog_counter(wd->perfctr_msr, NULL);
959 } 988 }
960 else if (wd->perfctr_msr == MSR_P6_PERFCTR0 || 989 else if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
961 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { 990 wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
@@ -964,9 +993,12 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
964 * other P6 variant. 993 * other P6 variant.
965 * ArchPerfom/Core Duo also needs this */ 994 * ArchPerfom/Core Duo also needs this */
966 apic_write(APIC_LVTPC, APIC_DM_NMI); 995 apic_write(APIC_LVTPC, APIC_DM_NMI);
996 /* P6/ARCH_PERFMON has 32 bit counter write */
997 write_watchdog_counter32(wd->perfctr_msr, NULL);
998 } else {
999 /* start the cycle over again */
1000 write_watchdog_counter(wd->perfctr_msr, NULL);
967 } 1001 }
968 /* start the cycle over again */
969 write_watchdog_counter(wd->perfctr_msr, NULL);
970 rc = 1; 1002 rc = 1;
971 } else if (nmi_watchdog == NMI_IO_APIC) { 1003 } else if (nmi_watchdog == NMI_IO_APIC) {
972 /* don't know how to accurately check for this. 1004 /* don't know how to accurately check for this.