aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2006-09-26 04:52:27 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:27 -0400
commit4038f901cf102a40715b900984ed7540a9fa637f (patch)
treee73261bee0e0856dba5a8bc447b18779a61fe235 /arch
parentc41c5cd3b20a2d81c30498f13b1527847a8fdf69 (diff)
[PATCH] i386/x86-64: Fix NMI watchdog suspend/resume
Making NMI suspend/resume work with SMP. We use CPU hotplug to offline APs in SMP suspend/resume. Only BSP executes sysdev's .suspend/.resume method. APs should follow CPU hotplug code path. And: +From: Don Zickus <dzickus@redhat.com> Makes the start/stop paths of nmi watchdog more robust to handle the suspend/resume cases more gracefully. AK: I merged the two patches together Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Don Zickus <dzickus@redhat.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/nmi.c33
-rw-r--r--arch/i386/kernel/smpboot.c3
-rw-r--r--arch/x86_64/kernel/nmi.c33
-rw-r--r--arch/x86_64/kernel/smpboot.c2
4 files changed, 56 insertions, 15 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 6241e4448cab..8e4ed930ce6b 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -63,7 +63,6 @@ struct nmi_watchdog_ctlblk {
63static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); 63static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
64 64
65/* local prototypes */ 65/* local prototypes */
66static void stop_apic_nmi_watchdog(void *unused);
67static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); 66static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
68 67
69extern void show_registers(struct pt_regs *regs); 68extern void show_registers(struct pt_regs *regs);
@@ -341,15 +340,20 @@ static int nmi_pm_active; /* nmi_active before suspend */
341 340
342static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) 341static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
343{ 342{
343 /* only CPU0 goes here, other CPUs should be offline */
344 nmi_pm_active = atomic_read(&nmi_active); 344 nmi_pm_active = atomic_read(&nmi_active);
345 disable_lapic_nmi_watchdog(); 345 stop_apic_nmi_watchdog(NULL);
346 BUG_ON(atomic_read(&nmi_active) != 0);
346 return 0; 347 return 0;
347} 348}
348 349
349static int lapic_nmi_resume(struct sys_device *dev) 350static int lapic_nmi_resume(struct sys_device *dev)
350{ 351{
351 if (nmi_pm_active > 0) 352 /* only CPU0 goes here, other CPUs should be offline */
352 enable_lapic_nmi_watchdog(); 353 if (nmi_pm_active > 0) {
354 setup_apic_nmi_watchdog(NULL);
355 touch_nmi_watchdog();
356 }
353 return 0; 357 return 0;
354} 358}
355 359
@@ -626,11 +630,21 @@ static void stop_p4_watchdog(void)
626 630
627void setup_apic_nmi_watchdog (void *unused) 631void setup_apic_nmi_watchdog (void *unused)
628{ 632{
633 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
634
629 /* only support LOCAL and IO APICs for now */ 635 /* only support LOCAL and IO APICs for now */
630 if ((nmi_watchdog != NMI_LOCAL_APIC) && 636 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
631 (nmi_watchdog != NMI_IO_APIC)) 637 (nmi_watchdog != NMI_IO_APIC))
632 return; 638 return;
633 639
640 if (wd->enabled == 1)
641 return;
642
643 /* cheap hack to support suspend/resume */
644 /* if cpu0 is not active neither should the other cpus */
645 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
646 return;
647
634 if (nmi_watchdog == NMI_LOCAL_APIC) { 648 if (nmi_watchdog == NMI_LOCAL_APIC) {
635 switch (boot_cpu_data.x86_vendor) { 649 switch (boot_cpu_data.x86_vendor) {
636 case X86_VENDOR_AMD: 650 case X86_VENDOR_AMD:
@@ -663,17 +677,22 @@ void setup_apic_nmi_watchdog (void *unused)
663 return; 677 return;
664 } 678 }
665 } 679 }
666 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1; 680 wd->enabled = 1;
667 atomic_inc(&nmi_active); 681 atomic_inc(&nmi_active);
668} 682}
669 683
670static void stop_apic_nmi_watchdog(void *unused) 684void stop_apic_nmi_watchdog(void *unused)
671{ 685{
686 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
687
672 /* only support LOCAL and IO APICs for now */ 688 /* only support LOCAL and IO APICs for now */
673 if ((nmi_watchdog != NMI_LOCAL_APIC) && 689 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
674 (nmi_watchdog != NMI_IO_APIC)) 690 (nmi_watchdog != NMI_IO_APIC))
675 return; 691 return;
676 692
693 if (wd->enabled == 0)
694 return;
695
677 if (nmi_watchdog == NMI_LOCAL_APIC) { 696 if (nmi_watchdog == NMI_LOCAL_APIC) {
678 switch (boot_cpu_data.x86_vendor) { 697 switch (boot_cpu_data.x86_vendor) {
679 case X86_VENDOR_AMD: 698 case X86_VENDOR_AMD:
@@ -697,7 +716,7 @@ static void stop_apic_nmi_watchdog(void *unused)
697 return; 716 return;
698 } 717 }
699 } 718 }
700 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0; 719 wd->enabled = 0;
701 atomic_dec(&nmi_active); 720 atomic_dec(&nmi_active);
702} 721}
703 722
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index efe07990e7fc..9367af76ce37 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1376,7 +1376,8 @@ int __cpu_disable(void)
1376 */ 1376 */
1377 if (cpu == 0) 1377 if (cpu == 0)
1378 return -EBUSY; 1378 return -EBUSY;
1379 1379 if (nmi_watchdog == NMI_LOCAL_APIC)
1380 stop_apic_nmi_watchdog(NULL);
1380 clear_local_APIC(); 1381 clear_local_APIC();
1381 /* Allow any queued timer interrupts to get serviced */ 1382 /* Allow any queued timer interrupts to get serviced */
1382 local_irq_enable(); 1383 local_irq_enable();
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index dd57410dad51..5a35975e5763 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -63,7 +63,6 @@ struct nmi_watchdog_ctlblk {
63static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); 63static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
64 64
65/* local prototypes */ 65/* local prototypes */
66static void stop_apic_nmi_watchdog(void *unused);
67static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); 66static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
68 67
69/* converts an msr to an appropriate reservation bit */ 68/* converts an msr to an appropriate reservation bit */
@@ -337,15 +336,20 @@ static int nmi_pm_active; /* nmi_active before suspend */
337 336
338static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) 337static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
339{ 338{
339 /* only CPU0 goes here, other CPUs should be offline */
340 nmi_pm_active = atomic_read(&nmi_active); 340 nmi_pm_active = atomic_read(&nmi_active);
341 disable_lapic_nmi_watchdog(); 341 stop_apic_nmi_watchdog(NULL);
342 BUG_ON(atomic_read(&nmi_active) != 0);
342 return 0; 343 return 0;
343} 344}
344 345
345static int lapic_nmi_resume(struct sys_device *dev) 346static int lapic_nmi_resume(struct sys_device *dev)
346{ 347{
347 if (nmi_pm_active > 0) 348 /* only CPU0 goes here, other CPUs should be offline */
348 enable_lapic_nmi_watchdog(); 349 if (nmi_pm_active > 0) {
350 setup_apic_nmi_watchdog(NULL);
351 touch_nmi_watchdog();
352 }
349 return 0; 353 return 0;
350} 354}
351 355
@@ -561,11 +565,21 @@ static void stop_p4_watchdog(void)
561 565
562void setup_apic_nmi_watchdog(void *unused) 566void setup_apic_nmi_watchdog(void *unused)
563{ 567{
568 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
569
564 /* only support LOCAL and IO APICs for now */ 570 /* only support LOCAL and IO APICs for now */
565 if ((nmi_watchdog != NMI_LOCAL_APIC) && 571 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
566 (nmi_watchdog != NMI_IO_APIC)) 572 (nmi_watchdog != NMI_IO_APIC))
567 return; 573 return;
568 574
575 if (wd->enabled == 1)
576 return;
577
578 /* cheap hack to support suspend/resume */
579 /* if cpu0 is not active neither should the other cpus */
580 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
581 return;
582
569 if (nmi_watchdog == NMI_LOCAL_APIC) { 583 if (nmi_watchdog == NMI_LOCAL_APIC) {
570 switch (boot_cpu_data.x86_vendor) { 584 switch (boot_cpu_data.x86_vendor) {
571 case X86_VENDOR_AMD: 585 case X86_VENDOR_AMD:
@@ -582,17 +596,22 @@ void setup_apic_nmi_watchdog(void *unused)
582 return; 596 return;
583 } 597 }
584 } 598 }
585 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1; 599 wd->enabled = 1;
586 atomic_inc(&nmi_active); 600 atomic_inc(&nmi_active);
587} 601}
588 602
589static void stop_apic_nmi_watchdog(void *unused) 603void stop_apic_nmi_watchdog(void *unused)
590{ 604{
605 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
606
591 /* only support LOCAL and IO APICs for now */ 607 /* only support LOCAL and IO APICs for now */
592 if ((nmi_watchdog != NMI_LOCAL_APIC) && 608 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
593 (nmi_watchdog != NMI_IO_APIC)) 609 (nmi_watchdog != NMI_IO_APIC))
594 return; 610 return;
595 611
612 if (wd->enabled == 0)
613 return;
614
596 if (nmi_watchdog == NMI_LOCAL_APIC) { 615 if (nmi_watchdog == NMI_LOCAL_APIC) {
597 switch (boot_cpu_data.x86_vendor) { 616 switch (boot_cpu_data.x86_vendor) {
598 case X86_VENDOR_AMD: 617 case X86_VENDOR_AMD:
@@ -607,7 +626,7 @@ static void stop_apic_nmi_watchdog(void *unused)
607 return; 626 return;
608 } 627 }
609 } 628 }
610 __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0; 629 wd->enabled = 0;
611 atomic_dec(&nmi_active); 630 atomic_dec(&nmi_active);
612} 631}
613 632
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 975380207b46..15555879ce95 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -1233,6 +1233,8 @@ int __cpu_disable(void)
1233 if (cpu == 0) 1233 if (cpu == 0)
1234 return -EBUSY; 1234 return -EBUSY;
1235 1235
1236 if (nmi_watchdog == NMI_LOCAL_APIC)
1237 stop_apic_nmi_watchdog(NULL);
1236 clear_local_APIC(); 1238 clear_local_APIC();
1237 1239
1238 /* 1240 /*