diff options
author | Shaohua Li <shaohua.li@intel.com> | 2006-09-26 04:52:27 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:27 -0400 |
commit | 4038f901cf102a40715b900984ed7540a9fa637f (patch) | |
tree | e73261bee0e0856dba5a8bc447b18779a61fe235 /arch/x86_64/kernel/nmi.c | |
parent | c41c5cd3b20a2d81c30498f13b1527847a8fdf69 (diff) |
[PATCH] i386/x86-64: Fix NMI watchdog suspend/resume
Making NMI suspend/resume work with SMP. We use CPU hotplug to offline
APs in SMP suspend/resume. Only BSP executes sysdev's .suspend/.resume
method. APs should follow CPU hotplug code path.
And:
+From: Don Zickus <dzickus@redhat.com>
Makes the start/stop paths of nmi watchdog more robust to handle the
suspend/resume cases more gracefully.
AK: I merged the two patches together
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 33 |
1 files changed, 26 insertions, 7 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index dd57410dad5..5a35975e576 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -63,7 +63,6 @@ struct nmi_watchdog_ctlblk { | |||
63 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | 63 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); |
64 | 64 | ||
65 | /* local prototypes */ | 65 | /* local prototypes */ |
66 | static void stop_apic_nmi_watchdog(void *unused); | ||
67 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | 66 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); |
68 | 67 | ||
69 | /* converts an msr to an appropriate reservation bit */ | 68 | /* converts an msr to an appropriate reservation bit */ |
@@ -337,15 +336,20 @@ static int nmi_pm_active; /* nmi_active before suspend */ | |||
337 | 336 | ||
338 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) | 337 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
339 | { | 338 | { |
339 | /* only CPU0 goes here, other CPUs should be offline */ | ||
340 | nmi_pm_active = atomic_read(&nmi_active); | 340 | nmi_pm_active = atomic_read(&nmi_active); |
341 | disable_lapic_nmi_watchdog(); | 341 | stop_apic_nmi_watchdog(NULL); |
342 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
342 | return 0; | 343 | return 0; |
343 | } | 344 | } |
344 | 345 | ||
345 | static int lapic_nmi_resume(struct sys_device *dev) | 346 | static int lapic_nmi_resume(struct sys_device *dev) |
346 | { | 347 | { |
347 | if (nmi_pm_active > 0) | 348 | /* only CPU0 goes here, other CPUs should be offline */ |
348 | enable_lapic_nmi_watchdog(); | 349 | if (nmi_pm_active > 0) { |
350 | setup_apic_nmi_watchdog(NULL); | ||
351 | touch_nmi_watchdog(); | ||
352 | } | ||
349 | return 0; | 353 | return 0; |
350 | } | 354 | } |
351 | 355 | ||
@@ -561,11 +565,21 @@ static void stop_p4_watchdog(void) | |||
561 | 565 | ||
562 | void setup_apic_nmi_watchdog(void *unused) | 566 | void setup_apic_nmi_watchdog(void *unused) |
563 | { | 567 | { |
568 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
569 | |||
564 | /* only support LOCAL and IO APICs for now */ | 570 | /* only support LOCAL and IO APICs for now */ |
565 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | 571 | if ((nmi_watchdog != NMI_LOCAL_APIC) && |
566 | (nmi_watchdog != NMI_IO_APIC)) | 572 | (nmi_watchdog != NMI_IO_APIC)) |
567 | return; | 573 | return; |
568 | 574 | ||
575 | if (wd->enabled == 1) | ||
576 | return; | ||
577 | |||
578 | /* cheap hack to support suspend/resume */ | ||
579 | /* if cpu0 is not active neither should the other cpus */ | ||
580 | if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) | ||
581 | return; | ||
582 | |||
569 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 583 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
570 | switch (boot_cpu_data.x86_vendor) { | 584 | switch (boot_cpu_data.x86_vendor) { |
571 | case X86_VENDOR_AMD: | 585 | case X86_VENDOR_AMD: |
@@ -582,17 +596,22 @@ void setup_apic_nmi_watchdog(void *unused) | |||
582 | return; | 596 | return; |
583 | } | 597 | } |
584 | } | 598 | } |
585 | __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 1; | 599 | wd->enabled = 1; |
586 | atomic_inc(&nmi_active); | 600 | atomic_inc(&nmi_active); |
587 | } | 601 | } |
588 | 602 | ||
589 | static void stop_apic_nmi_watchdog(void *unused) | 603 | void stop_apic_nmi_watchdog(void *unused) |
590 | { | 604 | { |
605 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
606 | |||
591 | /* only support LOCAL and IO APICs for now */ | 607 | /* only support LOCAL and IO APICs for now */ |
592 | if ((nmi_watchdog != NMI_LOCAL_APIC) && | 608 | if ((nmi_watchdog != NMI_LOCAL_APIC) && |
593 | (nmi_watchdog != NMI_IO_APIC)) | 609 | (nmi_watchdog != NMI_IO_APIC)) |
594 | return; | 610 | return; |
595 | 611 | ||
612 | if (wd->enabled == 0) | ||
613 | return; | ||
614 | |||
596 | if (nmi_watchdog == NMI_LOCAL_APIC) { | 615 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
597 | switch (boot_cpu_data.x86_vendor) { | 616 | switch (boot_cpu_data.x86_vendor) { |
598 | case X86_VENDOR_AMD: | 617 | case X86_VENDOR_AMD: |
@@ -607,7 +626,7 @@ static void stop_apic_nmi_watchdog(void *unused) | |||
607 | return; | 626 | return; |
608 | } | 627 | } |
609 | } | 628 | } |
610 | __get_cpu_var(nmi_watchdog_ctlblk.enabled) = 0; | 629 | wd->enabled = 0; |
611 | atomic_dec(&nmi_active); | 630 | atomic_dec(&nmi_active); |
612 | } | 631 | } |
613 | 632 | ||