aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorEduardo Habkost <ehabkost@redhat.com>2008-11-12 08:34:42 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-12 12:55:47 -0500
commit2ddded213895e41b9cfe1c084127e6c01632ac1a (patch)
tree6748a409d82579834a7bab780939a932ae7db5a6 /arch/x86
parentc370e5e089adfd5b1b863f3464cccae9ebf33cca (diff)
x86: move nmi_shootdown_cpus() to reboot.c
Impact: make nmi_shootdown_cpus() available to the rest of the x86 platform Now nmi_shootdown_cpus() is ready to be used by non-kdump code also. Move it to reboot.c. Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/crash.c76
-rw-r--r--arch/x86/kernel/reboot.c83
2 files changed, 83 insertions, 76 deletions
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index fb298d1daac9..d84a852e4cd7 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -32,12 +32,6 @@
32 32
33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
34 34
35/* This keeps a track of which one is crashing cpu. */
36static int crashing_cpu;
37static nmi_shootdown_cb shootdown_callback;
38
39static atomic_t waiting_for_crash_ipi;
40
41static void kdump_nmi_callback(int cpu, struct die_args *args) 35static void kdump_nmi_callback(int cpu, struct die_args *args)
42{ 36{
43 struct pt_regs *regs; 37 struct pt_regs *regs;
@@ -58,76 +52,6 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
58 disable_local_APIC(); 52 disable_local_APIC();
59} 53}
60 54
61static int crash_nmi_callback(struct notifier_block *self,
62 unsigned long val, void *data)
63{
64 int cpu;
65
66 if (val != DIE_NMI_IPI)
67 return NOTIFY_OK;
68
69 cpu = raw_smp_processor_id();
70
71 /* Don't do anything if this handler is invoked on crashing cpu.
72 * Otherwise, system will completely hang. Crashing cpu can get
73 * an NMI if system was initially booted with nmi_watchdog parameter.
74 */
75 if (cpu == crashing_cpu)
76 return NOTIFY_STOP;
77 local_irq_disable();
78
79 shootdown_callback(cpu, (struct die_args *)data);
80
81 atomic_dec(&waiting_for_crash_ipi);
82 /* Assume hlt works */
83 halt();
84 for (;;)
85 cpu_relax();
86
87 return 1;
88}
89
90static void smp_send_nmi_allbutself(void)
91{
92 cpumask_t mask = cpu_online_map;
93 cpu_clear(safe_smp_processor_id(), mask);
94 if (!cpus_empty(mask))
95 send_IPI_mask(mask, NMI_VECTOR);
96}
97
98static struct notifier_block crash_nmi_nb = {
99 .notifier_call = crash_nmi_callback,
100};
101
102void nmi_shootdown_cpus(nmi_shootdown_cb callback)
103{
104 unsigned long msecs;
105
106 /* Make a note of crashing cpu. Will be used in NMI callback.*/
107 crashing_cpu = safe_smp_processor_id();
108
109 shootdown_callback = callback;
110
111 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
112 /* Would it be better to replace the trap vector here? */
113 if (register_die_notifier(&crash_nmi_nb))
114 return; /* return what? */
115 /* Ensure the new callback function is set before sending
116 * out the NMI
117 */
118 wmb();
119
120 smp_send_nmi_allbutself();
121
122 msecs = 1000; /* Wait at most a second for the other cpus to stop */
123 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
124 mdelay(1);
125 msecs--;
126 }
127
128 /* Leave the nmi callback set */
129}
130
131static void kdump_nmi_shootdown_cpus(void) 55static void kdump_nmi_shootdown_cpus(void)
132{ 56{
133 nmi_shootdown_cpus(kdump_nmi_callback); 57 nmi_shootdown_cpus(kdump_nmi_callback);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..364edeecc235 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -21,6 +21,9 @@
21# include <asm/iommu.h> 21# include <asm/iommu.h>
22#endif 22#endif
23 23
24#include <mach_ipi.h>
25
26
24/* 27/*
25 * Power off function, if any 28 * Power off function, if any
26 */ 29 */
@@ -514,3 +517,83 @@ void machine_crash_shutdown(struct pt_regs *regs)
514 machine_ops.crash_shutdown(regs); 517 machine_ops.crash_shutdown(regs);
515} 518}
516#endif 519#endif
520
521
522#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
523
524/* This keeps a track of which one is crashing cpu. */
525static int crashing_cpu;
526static nmi_shootdown_cb shootdown_callback;
527
528static atomic_t waiting_for_crash_ipi;
529
530static int crash_nmi_callback(struct notifier_block *self,
531 unsigned long val, void *data)
532{
533 int cpu;
534
535 if (val != DIE_NMI_IPI)
536 return NOTIFY_OK;
537
538 cpu = raw_smp_processor_id();
539
540 /* Don't do anything if this handler is invoked on crashing cpu.
541 * Otherwise, system will completely hang. Crashing cpu can get
542 * an NMI if system was initially booted with nmi_watchdog parameter.
543 */
544 if (cpu == crashing_cpu)
545 return NOTIFY_STOP;
546 local_irq_disable();
547
548 shootdown_callback(cpu, (struct die_args *)data);
549
550 atomic_dec(&waiting_for_crash_ipi);
551 /* Assume hlt works */
552 halt();
553 for (;;)
554 cpu_relax();
555
556 return 1;
557}
558
559static void smp_send_nmi_allbutself(void)
560{
561 cpumask_t mask = cpu_online_map;
562 cpu_clear(safe_smp_processor_id(), mask);
563 if (!cpus_empty(mask))
564 send_IPI_mask(mask, NMI_VECTOR);
565}
566
567static struct notifier_block crash_nmi_nb = {
568 .notifier_call = crash_nmi_callback,
569};
570
571void nmi_shootdown_cpus(nmi_shootdown_cb callback)
572{
573 unsigned long msecs;
574
575 /* Make a note of crashing cpu. Will be used in NMI callback.*/
576 crashing_cpu = safe_smp_processor_id();
577
578 shootdown_callback = callback;
579
580 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
581 /* Would it be better to replace the trap vector here? */
582 if (register_die_notifier(&crash_nmi_nb))
583 return; /* return what? */
584 /* Ensure the new callback function is set before sending
585 * out the NMI
586 */
587 wmb();
588
589 smp_send_nmi_allbutself();
590
591 msecs = 1000; /* Wait at most a second for the other cpus to stop */
592 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
593 mdelay(1);
594 msecs--;
595 }
596
597 /* Leave the nmi callback set */
598}
599#endif