diff options
author | Shaohua Li <shaohua.li@intel.com> | 2006-09-26 04:52:27 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:27 -0400 |
commit | 4038f901cf102a40715b900984ed7540a9fa637f (patch) | |
tree | e73261bee0e0856dba5a8bc447b18779a61fe235 /arch/i386/kernel/nmi.c | |
parent | c41c5cd3b20a2d81c30498f13b1527847a8fdf69 (diff) |
[PATCH] i386/x86-64: Fix NMI watchdog suspend/resume
Making NMI suspend/resume work with SMP. We use CPU hotplug to offline
APs in SMP suspend/resume. Only BSP executes sysdev's .suspend/.resume
method. APs should follow CPU hotplug code path.
And:
+From: Don Zickus <dzickus@redhat.com>
Makes the start/stop paths of nmi watchdog more robust to handle the
suspend/resume cases more gracefully.
AK: I merged the two patches together
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/i386/kernel/nmi.c')
-rw-r--r-- | arch/i386/kernel/nmi.c | 33 |
1 files changed, 26 insertions, 7 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 6241e4448cab..8e4ed930ce6b 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -63,7 +63,6 @@ struct nmi_watchdog_ctlblk { | |||
63 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | 63 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); |
64 | 64 | ||
65 | /* local prototypes */ | 65 | /* local prototypes */ |
66 | static void stop_apic_nmi_watchdog(void *unused); | ||
67 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | 66 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); |
68 | 67 | ||
69 | extern void show_registers(struct pt_regs *regs); | 68 | extern void show_registers(struct pt_regs *regs); |
@@ -341,15 +340,20 @@ static int nmi_pm_active; /* nmi_active before suspend */ | |||
341 | 340 | ||
342 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) | 341 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
343 | { | 342 | { |
343 | /* only CPU0 goes here, other CPUs should be offline */ | ||
344 | nmi_pm_active = atomic_read(&nmi_active); | 344 | nmi_pm_active = atomic_read(&nmi_active); |
345 | disable_lapic_nmi_watchdog(); | 345 | stop_apic_nmi_watchdog(NULL); |
346 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
346 | return 0; | 347 | return 0; |
347 | } | 348 | } |
348 | 349 | ||
349 | static int lapic_nmi_resume(struct sys_device *dev) | 350 | static int lapic_nmi_resume(struct sys_device *dev) |
350 | { | 351 | { |
351 | if (nmi_pm_active > 0) | 352 | /* only CPU0 goes here, other CPUs should be offline */ |
352 | enable_lapic_nmi_watchdog(); | 353 | if (nmi_pm_active > 0) { |
354 | setup_apic_nmi_watchdog(NULL); | ||
355 | touch_nmi_watchdog(); | ||
356 | } | ||
353 | return 0; | 357 | return 0; |
354 | } | 358 | } |
355 | 359 | ||
@@ -626,11 +630,21 @@ static void stop_p4_watchdog(void) | |||
626 | 630 | ||
627 | void setup_apic_nmi_watchdog (void *unused) | 631 | void setup_apic_nmi_watchdog (void *unused) |
628 | { | 632 | { |
633 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
634 | |||
629 | /* only support LOCAL and IO APICs for now */ | 635 | /* only support LOCAL and IO APICs for now */ |
630 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | 636 | if ((nmi_watchdog != NMI_LOCAL_APIC) && |
631 | (nmi_watchdog != NMI_IO_APIC)) | 637 | (nmi_watchdog != NMI_IO_APIC)) |
632 | return; | 638 | return; |
633 | 639 | ||
640 | if (wd->enabled == 1) | ||
641 | return; | ||
642 | |||
643 | /* cheap hack to support suspend/resume */ | ||
644 | /* if cpu0 is not active neither should the other cpus */ | ||
645 | if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) | ||
646 | return; | ||
647 | |||
634 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 648 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
635 | switch (boot_cpu_data.x86_vendor) { | 649 | switch (boot_cpu_data.x86_vendor) { |
636 | case X86_VENDOR_AMD: | 650 | case X86_VENDOR_AMD: |
@@ -663,17 +677,22 @@ void setup_apic_nmi_watchdog (void *unused) | |||
663 | return; | 677 | return; |
664 | } | 678 | } |
665 | } | 679 | } |
666 | __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1; | 680 | wd->enabled = 1; |
667 | atomic_inc(&nmi_active); | 681 | atomic_inc(&nmi_active); |
668 | } | 682 | } |
669 | 683 | ||
670 | static void stop_apic_nmi_watchdog(void *unused) | 684 | void stop_apic_nmi_watchdog(void *unused) |
671 | { | 685 | { |
686 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
687 | |||
672 | /* only support LOCAL and IO APICs for now */ | 688 | /* only support LOCAL and IO APICs for now */ |
673 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | 689 | if ((nmi_watchdog != NMI_LOCAL_APIC) && |
674 | (nmi_watchdog != NMI_IO_APIC)) | 690 | (nmi_watchdog != NMI_IO_APIC)) |
675 | return; | 691 | return; |
676 | 692 | ||
693 | if (wd->enabled == 0) | ||
694 | return; | ||
695 | |||
677 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 696 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
678 | switch (boot_cpu_data.x86_vendor) { | 697 | switch (boot_cpu_data.x86_vendor) { |
679 | case X86_VENDOR_AMD: | 698 | case X86_VENDOR_AMD: |
@@ -697,7 +716,7 @@ static void stop_apic_nmi_watchdog(void *unused) | |||
697 | return; | 716 | return; |
698 | } | 717 | } |
699 | } | 718 | } |
700 | __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0; | 719 | wd->enabled = 0; |
701 | atomic_dec(&nmi_active); | 720 | atomic_dec(&nmi_active); |
702 | } | 721 | } |
703 | 722 | ||