aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/nmi.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r--arch/x86_64/kernel/nmi.c46
1 files changed, 32 insertions, 14 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 9cb42ecb7f89..e59cda134166 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -214,6 +214,23 @@ static __init void nmi_cpu_busy(void *data)
214} 214}
215#endif 215#endif
216 216
217static unsigned int adjust_for_32bit_ctr(unsigned int hz)
218{
219 unsigned int retval = hz;
220
221 /*
222 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
223 * are writable, with higher bits sign extending from bit 31.
224 * So, we can only program the counter with 31 bit values and
225 * 32nd bit should be 1, for 33.. to be 1.
226 * Find the appropriate nmi_hz
227 */
228 if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) {
229 retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
230 }
231 return retval;
232}
233
217int __init check_nmi_watchdog (void) 234int __init check_nmi_watchdog (void)
218{ 235{
219 int *counts; 236 int *counts;
@@ -268,17 +285,8 @@ int __init check_nmi_watchdog (void)
268 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 285 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
269 286
270 nmi_hz = 1; 287 nmi_hz = 1;
271 /* 288 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0)
272 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter 289 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
273 * are writable, with higher bits sign extending from bit 31.
274 * So, we can only program the counter with 31 bit values and
275 * 32nd bit should be 1, for 33.. to be 1.
276 * Find the appropriate nmi_hz
277 */
278 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 &&
279 ((u64)cpu_khz * 1000) > 0x7fffffffULL) {
280 nmi_hz = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
281 }
282 } 290 }
283 291
284 kfree(counts); 292 kfree(counts);
@@ -634,7 +642,9 @@ static int setup_intel_arch_watchdog(void)
634 642
635 /* setup the timer */ 643 /* setup the timer */
636 wrmsr(evntsel_msr, evntsel, 0); 644 wrmsr(evntsel_msr, evntsel, 0);
637 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); 645
646 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
647 wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
638 648
639 apic_write(APIC_LVTPC, APIC_DM_NMI); 649 apic_write(APIC_LVTPC, APIC_DM_NMI);
640 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 650 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -855,15 +865,23 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
855 dummy &= ~P4_CCCR_OVF; 865 dummy &= ~P4_CCCR_OVF;
856 wrmsrl(wd->cccr_msr, dummy); 866 wrmsrl(wd->cccr_msr, dummy);
857 apic_write(APIC_LVTPC, APIC_DM_NMI); 867 apic_write(APIC_LVTPC, APIC_DM_NMI);
868 /* start the cycle over again */
869 wrmsrl(wd->perfctr_msr,
870 -((u64)cpu_khz * 1000 / nmi_hz));
858 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { 871 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
859 /* 872 /*
860 * ArchPerfom/Core Duo needs to re-unmask 873 * ArchPerfom/Core Duo needs to re-unmask
861 * the apic vector 874 * the apic vector
862 */ 875 */
863 apic_write(APIC_LVTPC, APIC_DM_NMI); 876 apic_write(APIC_LVTPC, APIC_DM_NMI);
877 /* ARCH_PERFMON has 32 bit counter writes */
878 wrmsr(wd->perfctr_msr,
879 (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
880 } else {
881 /* start the cycle over again */
882 wrmsrl(wd->perfctr_msr,
883 -((u64)cpu_khz * 1000 / nmi_hz));
864 } 884 }
865 /* start the cycle over again */
866 wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
867 rc = 1; 885 rc = 1;
868 } else if (nmi_watchdog == NMI_IO_APIC) { 886 } else if (nmi_watchdog == NMI_IO_APIC) {
869 /* don't know how to accurately check for this. 887 /* don't know how to accurately check for this.