aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 05cc22dbd4ff..62c010063974 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -432,6 +432,27 @@ static const struct wd_ops p6_wd_ops = {
432#define P4_CCCR_ENABLE (1 << 12) 432#define P4_CCCR_ENABLE (1 << 12)
433#define P4_CCCR_OVF (1 << 31) 433#define P4_CCCR_OVF (1 << 31)
434 434
435#define P4_CONTROLS 18
436static unsigned int p4_controls[18] = {
437 MSR_P4_BPU_CCCR0,
438 MSR_P4_BPU_CCCR1,
439 MSR_P4_BPU_CCCR2,
440 MSR_P4_BPU_CCCR3,
441 MSR_P4_MS_CCCR0,
442 MSR_P4_MS_CCCR1,
443 MSR_P4_MS_CCCR2,
444 MSR_P4_MS_CCCR3,
445 MSR_P4_FLAME_CCCR0,
446 MSR_P4_FLAME_CCCR1,
447 MSR_P4_FLAME_CCCR2,
448 MSR_P4_FLAME_CCCR3,
449 MSR_P4_IQ_CCCR0,
450 MSR_P4_IQ_CCCR1,
451 MSR_P4_IQ_CCCR2,
452 MSR_P4_IQ_CCCR3,
453 MSR_P4_IQ_CCCR4,
454 MSR_P4_IQ_CCCR5,
455};
435/* 456/*
436 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 457 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
437 * CRU_ESCR0 (with any non-null event selector) through a complemented 458 * CRU_ESCR0 (with any non-null event selector) through a complemented
@@ -473,6 +494,26 @@ static int setup_p4_watchdog(unsigned nmi_hz)
473 evntsel_msr = MSR_P4_CRU_ESCR0; 494 evntsel_msr = MSR_P4_CRU_ESCR0;
474 cccr_msr = MSR_P4_IQ_CCCR0; 495 cccr_msr = MSR_P4_IQ_CCCR0;
475 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); 496 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
497
498 /*
499 * If we're on the kdump kernel or other situation, we may
500 * still have other performance counter registers set to
501 * interrupt and they'll keep interrupting forever because
502 * of the P4_CCCR_OVF quirk. So we need to ACK all the
503 * pending interrupts and disable all the registers here,
504 * before reenabling the NMI delivery. Refer to p4_rearm()
505 * about the P4_CCCR_OVF quirk.
506 */
507 if (reset_devices) {
508 unsigned int low, high;
509 int i;
510
511 for (i = 0; i < P4_CONTROLS; i++) {
512 rdmsr(p4_controls[i], low, high);
513 low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
514 wrmsr(p4_controls[i], low, high);
515 }
516 }
476 } else { 517 } else {
477 /* logical cpu 1 */ 518 /* logical cpu 1 */
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 519 perfctr_msr = MSR_P4_IQ_PERFCTR1;